content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
setwd("~/Titanic Task")
# Read in train and test data
titanic.train <- read.csv(file = "train.csv", stringsAsFactors = FALSE, header = TRUE)
titanic.test <- read.csv(file = "test.csv", stringsAsFactors = FALSE, header = TRUE)
# adding new column to dataSets for merging
titanic.train$IsTrainSet <- TRUE
titanic.test$IsTrainSet <- FALSE
# Creating a Survived column in test dataSet
titanic.test$Survived <- NA
# Merging train and test dataSets
titanic.full <- rbind(titanic.train, titanic.test)
# Clean missing values
titanic.full[titanic.full$Embarked=='', "Embarked"] <- 'S'
age.median <- median(titanic.full$Age, na.rm = TRUE)
titanic.full[is.na(titanic.full$Age), "Age"] <- age.median
fare.median <- median(titanic.full$Fare, na.rm = TRUE)
titanic.full[is.na(titanic.full$Fare), "Fare"] <- fare.median
# Categorical casting
titanic.full$Pclass <- as.factor(titanic.full$Pclass)
titanic.full$Sex <- as.factor(titanic.full$Sex)
titanic.full$Embarked <- as.factor(titanic.full$Embarked)
# Split dataSet back into train and test
titanic.train <- titanic.full[titanic.full$IsTrainSet == TRUE,]
titanic.test <- titanic.full[titanic.full$IsTrainSet == FALSE,]
# Categorical casting of Survived
titanic.train$Survived <- as.factor(titanic.train$Survived)
# Predict Survived based on the given columns
survived.equation <- "Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked"
# Formula to predict Survived
survived.formula <- as.formula(survived.equation)
install.packages("randomForest")
library(randomForest)
# Predictive model
titanic.model <- randomForest(formula = survived.formula, data = titanic.train, ntree = 500, mtry = 3, nodesize = 0.01 * nrow(titanic.test))
features.equation <- "Pclass + Sex + Age + SibSp + Parch + Fare + Embarked"
# Predict survived from test dataSet
Survived <- predict(titanic.model, newdata = titanic.test)
# dataFrames for passengerId and Survived
PassengerId <- titanic.test$PassengerId
output.df <- as.data.frame(PassengerId)
output.df$Survived <- Survived
# Output results as a .csv file
write.csv(output.df, file = "RESULTS.csv", row.names = FALSE)
| /Titanic Task/Model.R | no_license | Mo-Abdalla/BigData | R | false | false | 2,118 | r | setwd("~/Titanic Task")
# Read in train and test data
titanic.train <- read.csv(file = "train.csv", stringsAsFactors = FALSE, header = TRUE)
titanic.test <- read.csv(file = "test.csv", stringsAsFactors = FALSE, header = TRUE)
# adding new column to dataSets for merging
titanic.train$IsTrainSet <- TRUE
titanic.test$IsTrainSet <- FALSE
# Creating a Survived column in test dataSet
titanic.test$Survived <- NA
# Merging train and test dataSets
titanic.full <- rbind(titanic.train, titanic.test)
# Clean missing values
titanic.full[titanic.full$Embarked=='', "Embarked"] <- 'S'
age.median <- median(titanic.full$Age, na.rm = TRUE)
titanic.full[is.na(titanic.full$Age), "Age"] <- age.median
fare.median <- median(titanic.full$Fare, na.rm = TRUE)
titanic.full[is.na(titanic.full$Fare), "Fare"] <- fare.median
# Categorical casting
titanic.full$Pclass <- as.factor(titanic.full$Pclass)
titanic.full$Sex <- as.factor(titanic.full$Sex)
titanic.full$Embarked <- as.factor(titanic.full$Embarked)
# Split dataSet back into train and test
titanic.train <- titanic.full[titanic.full$IsTrainSet == TRUE,]
titanic.test <- titanic.full[titanic.full$IsTrainSet == FALSE,]
# Categorical casting of Survived
titanic.train$Survived <- as.factor(titanic.train$Survived)
# Predict Survived based on the given columns
survived.equation <- "Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked"
# Formula to predict Survived
survived.formula <- as.formula(survived.equation)
install.packages("randomForest")
library(randomForest)
# Predictive model
titanic.model <- randomForest(formula = survived.formula, data = titanic.train, ntree = 500, mtry = 3, nodesize = 0.01 * nrow(titanic.test))
features.equation <- "Pclass + Sex + Age + SibSp + Parch + Fare + Embarked"
# Predict survived from test dataSet
Survived <- predict(titanic.model, newdata = titanic.test)
# dataFrames for passengerId and Survived
PassengerId <- titanic.test$PassengerId
output.df <- as.data.frame(PassengerId)
output.df$Survived <- Survived
# Output results as a .csv file
write.csv(output.df, file = "RESULTS.csv", row.names = FALSE)
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
SRM_RCPP_SIGMA_Y_INV_WOODBURY_PHI_INV <- function(SIGMA_U_INV, NI) {
.Call('_srm_SRM_RCPP_SIGMA_Y_INV_WOODBURY_PHI_INV', PACKAGE = 'srm', SIGMA_U_INV, NI)
}
SRM_RCPP_SIGMA_Y_INV_WOODBURY_TMAT <- function(A_inv, Z_ind, Phi_inv) {
.Call('_srm_SRM_RCPP_SIGMA_Y_INV_WOODBURY_TMAT', PACKAGE = 'srm', A_inv, Z_ind, Phi_inv)
}
SRM_RCPP_SIGMA_Y_INV_WOODBURY_ZA <- function(Z_ind, A_inv, NZ) {
.Call('_srm_SRM_RCPP_SIGMA_Y_INV_WOODBURY_ZA', PACKAGE = 'srm', Z_ind, A_inv, NZ)
}
SRM_RCPP_SIGMA_Y_INV_WOODBURY_Y_INV <- function(ZA, T_inv, A_inv) {
.Call('_srm_SRM_RCPP_SIGMA_Y_INV_WOODBURY_Y_INV', PACKAGE = 'srm', ZA, T_inv, A_inv)
}
SRM_RCPP_COLSUMS <- function(x) {
.Call('_srm_SRM_RCPP_COLSUMS', PACKAGE = 'srm', x)
}
SRM_RCPP_ROWSUMS <- function(x) {
.Call('_srm_SRM_RCPP_ROWSUMS', PACKAGE = 'srm', x)
}
SRM_ARBSRM_TRACE_PRODUCT_MATRIX <- function(x, y) {
.Call('_srm_SRM_ARBSRM_TRACE_PRODUCT_MATRIX', PACKAGE = 'srm', x, y)
}
SRM_ARBSRM_TRACE_PRODUCT_MATRIX_TRANSPOSE <- function(x, y) {
.Call('_srm_SRM_ARBSRM_TRACE_PRODUCT_MATRIX_TRANSPOSE', PACKAGE = 'srm', x, y)
}
SRM_RCPP_SRM_ARBSRM_ONE_GROUP_ESTIMATE <- function(data, data_resp, bivariate) {
.Call('_srm_SRM_RCPP_SRM_ARBSRM_ONE_GROUP_ESTIMATE', PACKAGE = 'srm', data, data_resp, bivariate)
}
SRM_RCPP_MATRIX_TRACE_PRODUCT <- function(x, y) {
.Call('_srm_SRM_RCPP_MATRIX_TRACE_PRODUCT', PACKAGE = 'srm', x, y)
}
SRM_RCPP_SRM_MATRIX_MULT_LOGICAL <- function(x, y) {
.Call('_srm_SRM_RCPP_SRM_MATRIX_MULT_LOGICAL', PACKAGE = 'srm', x, y)
}
SRM_RCPP_SRM_ARBSRM_SE_CREATE_CWU <- function(NF) {
.Call('_srm_SRM_RCPP_SRM_ARBSRM_SE_CREATE_CWU', PACKAGE = 'srm', NF)
}
SRM_RCPP_SRM_COMPUTE_HESSIAN_RR <- function(hess_list, mu_y_der_list, mu_y_der_bool_list, SIGMA_Y_inv, npar) {
.Call('_srm_SRM_RCPP_SRM_COMPUTE_HESSIAN_RR', PACKAGE = 'srm', hess_list, mu_y_der_list, mu_y_der_bool_list, SIGMA_Y_inv, npar)
}
SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W0 <- function(sigma_y_inv, sigma_y_der, der_bool) {
.Call('_srm_SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W0', PACKAGE = 'srm', sigma_y_inv, sigma_y_der, der_bool)
}
SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W1 <- function(sigma_y_inv, sigma_y_der) {
.Call('_srm_SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W1', PACKAGE = 'srm', sigma_y_inv, sigma_y_der)
}
SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W2 <- function(sigma_y_inv, sigma_y_der, der_bool) {
.Call('_srm_SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W2', PACKAGE = 'srm', sigma_y_inv, sigma_y_der, der_bool)
}
SRM_RCPP_SRM_COMPUTE_NONZERO_GRADIENT_INDICES <- function(sigma_y_der, eps) {
.Call('_srm_SRM_RCPP_SRM_COMPUTE_NONZERO_GRADIENT_INDICES', PACKAGE = 'srm', sigma_y_der, eps)
}
SRM_RCPP_SRM_DATA_LIST_CREATE_INSERTION_MATRIX <- function(x) {
.Call('_srm_SRM_RCPP_SRM_DATA_LIST_CREATE_INSERTION_MATRIX', PACKAGE = 'srm', x)
}
SRM_RCPP_SRM_INSERT_ELEMENTS <- function(sigma_y0, Zis, sigma_u) {
.Call('_srm_SRM_RCPP_SRM_INSERT_ELEMENTS', PACKAGE = 'srm', sigma_y0, Zis, sigma_u)
}
SRM_RCPP_ORDER <- function(x) {
.Call('_srm_SRM_RCPP_ORDER', PACKAGE = 'srm', x)
}
SRM_RCPP_SRM_MAKE_DATA_MATRIX_PERSON_ONE_PERSON <- function(tmp_data3, no_person, no_vars, rr, person, pid) {
.Call('_srm_SRM_RCPP_SRM_MAKE_DATA_MATRIX_PERSON_ONE_PERSON', PACKAGE = 'srm', tmp_data3, no_person, no_vars, rr, person, pid)
}
SRM_RCPP_SRM_MAKE_DATA_MATRIX_PERSON <- function(tmp_data3, no_person, no_vars, rr, persons) {
.Call('_srm_SRM_RCPP_SRM_MAKE_DATA_MATRIX_PERSON', PACKAGE = 'srm', tmp_data3, no_person, no_vars, rr, persons)
}
SRM_RCPP_SRM_MAKE_DATA_MATRIX_DYAD_ONE_DYAD <- function(tmp_data3, no_vars, rr, dyad, did) {
.Call('_srm_SRM_RCPP_SRM_MAKE_DATA_MATRIX_DYAD_ONE_DYAD', PACKAGE = 'srm', tmp_data3, no_vars, rr, dyad, did)
}
SRM_RCPP_SRM_MAKE_DATA_MATRIX_DYAD <- function(tmp_data3, no_vars, rr, no_dyads, dyads) {
.Call('_srm_SRM_RCPP_SRM_MAKE_DATA_MATRIX_DYAD', PACKAGE = 'srm', tmp_data3, no_vars, rr, no_dyads, dyads)
}
SRM_RCPP_SRM_ULS_GRADIENT_SIGMA_PART <- function(cov_resid, SIGMA_Y_der, der_bool) {
.Call('_srm_SRM_RCPP_SRM_ULS_GRADIENT_SIGMA_PART', PACKAGE = 'srm', cov_resid, SIGMA_Y_der, der_bool)
}
| /srm/R/RcppExports.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 4,329 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
SRM_RCPP_SIGMA_Y_INV_WOODBURY_PHI_INV <- function(SIGMA_U_INV, NI) {
.Call('_srm_SRM_RCPP_SIGMA_Y_INV_WOODBURY_PHI_INV', PACKAGE = 'srm', SIGMA_U_INV, NI)
}
SRM_RCPP_SIGMA_Y_INV_WOODBURY_TMAT <- function(A_inv, Z_ind, Phi_inv) {
.Call('_srm_SRM_RCPP_SIGMA_Y_INV_WOODBURY_TMAT', PACKAGE = 'srm', A_inv, Z_ind, Phi_inv)
}
SRM_RCPP_SIGMA_Y_INV_WOODBURY_ZA <- function(Z_ind, A_inv, NZ) {
.Call('_srm_SRM_RCPP_SIGMA_Y_INV_WOODBURY_ZA', PACKAGE = 'srm', Z_ind, A_inv, NZ)
}
SRM_RCPP_SIGMA_Y_INV_WOODBURY_Y_INV <- function(ZA, T_inv, A_inv) {
.Call('_srm_SRM_RCPP_SIGMA_Y_INV_WOODBURY_Y_INV', PACKAGE = 'srm', ZA, T_inv, A_inv)
}
SRM_RCPP_COLSUMS <- function(x) {
.Call('_srm_SRM_RCPP_COLSUMS', PACKAGE = 'srm', x)
}
SRM_RCPP_ROWSUMS <- function(x) {
.Call('_srm_SRM_RCPP_ROWSUMS', PACKAGE = 'srm', x)
}
SRM_ARBSRM_TRACE_PRODUCT_MATRIX <- function(x, y) {
.Call('_srm_SRM_ARBSRM_TRACE_PRODUCT_MATRIX', PACKAGE = 'srm', x, y)
}
SRM_ARBSRM_TRACE_PRODUCT_MATRIX_TRANSPOSE <- function(x, y) {
.Call('_srm_SRM_ARBSRM_TRACE_PRODUCT_MATRIX_TRANSPOSE', PACKAGE = 'srm', x, y)
}
SRM_RCPP_SRM_ARBSRM_ONE_GROUP_ESTIMATE <- function(data, data_resp, bivariate) {
.Call('_srm_SRM_RCPP_SRM_ARBSRM_ONE_GROUP_ESTIMATE', PACKAGE = 'srm', data, data_resp, bivariate)
}
SRM_RCPP_MATRIX_TRACE_PRODUCT <- function(x, y) {
.Call('_srm_SRM_RCPP_MATRIX_TRACE_PRODUCT', PACKAGE = 'srm', x, y)
}
SRM_RCPP_SRM_MATRIX_MULT_LOGICAL <- function(x, y) {
.Call('_srm_SRM_RCPP_SRM_MATRIX_MULT_LOGICAL', PACKAGE = 'srm', x, y)
}
SRM_RCPP_SRM_ARBSRM_SE_CREATE_CWU <- function(NF) {
.Call('_srm_SRM_RCPP_SRM_ARBSRM_SE_CREATE_CWU', PACKAGE = 'srm', NF)
}
SRM_RCPP_SRM_COMPUTE_HESSIAN_RR <- function(hess_list, mu_y_der_list, mu_y_der_bool_list, SIGMA_Y_inv, npar) {
.Call('_srm_SRM_RCPP_SRM_COMPUTE_HESSIAN_RR', PACKAGE = 'srm', hess_list, mu_y_der_list, mu_y_der_bool_list, SIGMA_Y_inv, npar)
}
SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W0 <- function(sigma_y_inv, sigma_y_der, der_bool) {
.Call('_srm_SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W0', PACKAGE = 'srm', sigma_y_inv, sigma_y_der, der_bool)
}
SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W1 <- function(sigma_y_inv, sigma_y_der) {
.Call('_srm_SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W1', PACKAGE = 'srm', sigma_y_inv, sigma_y_der)
}
SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W2 <- function(sigma_y_inv, sigma_y_der, der_bool) {
.Call('_srm_SRM_RCPP_SRM_COMPUTE_LOG_LIKELIHOOD_GRADIENT_W2', PACKAGE = 'srm', sigma_y_inv, sigma_y_der, der_bool)
}
SRM_RCPP_SRM_COMPUTE_NONZERO_GRADIENT_INDICES <- function(sigma_y_der, eps) {
.Call('_srm_SRM_RCPP_SRM_COMPUTE_NONZERO_GRADIENT_INDICES', PACKAGE = 'srm', sigma_y_der, eps)
}
SRM_RCPP_SRM_DATA_LIST_CREATE_INSERTION_MATRIX <- function(x) {
.Call('_srm_SRM_RCPP_SRM_DATA_LIST_CREATE_INSERTION_MATRIX', PACKAGE = 'srm', x)
}
SRM_RCPP_SRM_INSERT_ELEMENTS <- function(sigma_y0, Zis, sigma_u) {
.Call('_srm_SRM_RCPP_SRM_INSERT_ELEMENTS', PACKAGE = 'srm', sigma_y0, Zis, sigma_u)
}
SRM_RCPP_ORDER <- function(x) {
.Call('_srm_SRM_RCPP_ORDER', PACKAGE = 'srm', x)
}
SRM_RCPP_SRM_MAKE_DATA_MATRIX_PERSON_ONE_PERSON <- function(tmp_data3, no_person, no_vars, rr, person, pid) {
.Call('_srm_SRM_RCPP_SRM_MAKE_DATA_MATRIX_PERSON_ONE_PERSON', PACKAGE = 'srm', tmp_data3, no_person, no_vars, rr, person, pid)
}
SRM_RCPP_SRM_MAKE_DATA_MATRIX_PERSON <- function(tmp_data3, no_person, no_vars, rr, persons) {
.Call('_srm_SRM_RCPP_SRM_MAKE_DATA_MATRIX_PERSON', PACKAGE = 'srm', tmp_data3, no_person, no_vars, rr, persons)
}
SRM_RCPP_SRM_MAKE_DATA_MATRIX_DYAD_ONE_DYAD <- function(tmp_data3, no_vars, rr, dyad, did) {
.Call('_srm_SRM_RCPP_SRM_MAKE_DATA_MATRIX_DYAD_ONE_DYAD', PACKAGE = 'srm', tmp_data3, no_vars, rr, dyad, did)
}
SRM_RCPP_SRM_MAKE_DATA_MATRIX_DYAD <- function(tmp_data3, no_vars, rr, no_dyads, dyads) {
.Call('_srm_SRM_RCPP_SRM_MAKE_DATA_MATRIX_DYAD', PACKAGE = 'srm', tmp_data3, no_vars, rr, no_dyads, dyads)
}
SRM_RCPP_SRM_ULS_GRADIENT_SIGMA_PART <- function(cov_resid, SIGMA_Y_der, der_bool) {
.Call('_srm_SRM_RCPP_SRM_ULS_GRADIENT_SIGMA_PART', PACKAGE = 'srm', cov_resid, SIGMA_Y_der, der_bool)
}
|
require(tidyverse)
#{{{ old interprosan GO output
if (FALSE) {
fg = '/home/springer/zhoux379/data/genome/Zmays_v4/61.interpro/15.tsv'
tg = read.table(fg, sep = "\t", as.is = T, header = F, quote = '')
colnames(tg) = c("gid", "goid")
fd = '/home/springer/zhoux379/data/genome/Zmays_v4/61.interpro/16.go.tsv'
#td = read.table(fd, sep = "\t", header = T, as.is = T, quote = '')
fi = '/home/springer/zhoux379/data/genome/Zmays_v4/61.interpro/gids.txt'
ti = read.table(fi, as.is = T)
gids.all = ti$V1
}
#}}}
fgo = '/home/springer/zhoux379/data/genome/B73/GO/10.tsv'
tgo = read_tsv(fgo)
unique(tgo$ctag)
gids_all = unique(tgo$gid)
tgo_ipr = tgo %>% filter(ctag == 'Interproscan5') %>% select(-ctag)
tgo_uni = tgo %>% filter(ctag == 'uniprot.plants') %>% select(-ctag)
tgo_ath = tgo %>% filter(ctag == 'arabidopsis') %>% select(-ctag)
tgo_arg = tgo %>% filter(ctag == 'argot2.5') %>% select(-ctag)
go_enrich <- function(gids, tg) {
#{{{
tgn = tg %>% distinct(goid, goname, gotype, level)
tgs = tg %>% count(goid) %>% transmute(goid=goid, hitInPop = n)
gids_all = tg %>% distinct(gid) %>% pull(gid)
gids = unique(gids)
gids = gids[gids %in% gids_all]
sampleSize = length(gids)
tz = tg %>% filter(gid %in% gids) %>%
count(goid) %>%
transmute(goid = goid, hitInSample = n)
tw = tz %>% inner_join(tgs, by = 'goid') %>%
mutate(sampleSize = length(gids), popSize = length(gids_all),
pval.raw = phyper(hitInSample-1, hitInPop, popSize-hitInPop, sampleSize, lower.tail = F),
pval.adj = p.adjust(pval.raw, method = "BH")) %>%
filter(pval.raw < 0.05) %>%
arrange(pval.adj) %>%
transmute(goid = goid, ratioInSample = sprintf("%d/%d", hitInSample, sampleSize),
ratioInPop = sprintf("%d/%d", hitInPop, popSize),
pval.raw = pval.raw, pval.adj = pval.adj)
tw %>% left_join(tgn, by = 'goid') %>% arrange(pval.adj, pval.raw)
#}}}
}
go_enrich_gosets <- function(gids, tgo. = tgo, pval.cutoff = 0.05,
srcs = c("uniprot.plants", "arabidopsis", "corncyc", "tfdb", "Interproscan5")) {
#{{{
to = tibble()
for (src in srcs) {
tgoc = tgo %>% filter(ctag == src) %>% select(-ctag)
to1 = go_enrich(gids, tgoc) %>%
filter(pval.adj <= pval.cutoff) %>%
mutate(source = src) %>%
select(source, goid, ratioInSample, ratioInPop, pval.adj, gotype, goname)
to = rbind(to, to1)
}
to
#}}}
}
go_enrich_genesets <- function(tgs, pval.cutoff = 0.05) {
#{{{
te = tibble()
for (tag1 in unique(tge$tag)) {
gids = tge %>% filter(tag == tag1) %>% pull(gid)
te1 = go_enrich_gosets(gids, pval.cutoff = pval.cutoff) %>% mutate(tag = tag1) %>%
select(tag, everything())
te = rbind(te, te1)
}
te
#}}}
}
#fisher.test(matrix(c(hitInSample, hitInPop-hitInSample, sampleSize-hitInSample, failInPop-sampleSize +hitInSample), 2, 2), alternative='two.sided')
| /r/enrich.R | no_license | orionzhou/archive_luffy | R | false | false | 3,060 | r | require(tidyverse)
#{{{ old interprosan GO output
if (FALSE) {
fg = '/home/springer/zhoux379/data/genome/Zmays_v4/61.interpro/15.tsv'
tg = read.table(fg, sep = "\t", as.is = T, header = F, quote = '')
colnames(tg) = c("gid", "goid")
fd = '/home/springer/zhoux379/data/genome/Zmays_v4/61.interpro/16.go.tsv'
#td = read.table(fd, sep = "\t", header = T, as.is = T, quote = '')
fi = '/home/springer/zhoux379/data/genome/Zmays_v4/61.interpro/gids.txt'
ti = read.table(fi, as.is = T)
gids.all = ti$V1
}
#}}}
fgo = '/home/springer/zhoux379/data/genome/B73/GO/10.tsv'
tgo = read_tsv(fgo)
unique(tgo$ctag)
gids_all = unique(tgo$gid)
tgo_ipr = tgo %>% filter(ctag == 'Interproscan5') %>% select(-ctag)
tgo_uni = tgo %>% filter(ctag == 'uniprot.plants') %>% select(-ctag)
tgo_ath = tgo %>% filter(ctag == 'arabidopsis') %>% select(-ctag)
tgo_arg = tgo %>% filter(ctag == 'argot2.5') %>% select(-ctag)
go_enrich <- function(gids, tg) {
#{{{
tgn = tg %>% distinct(goid, goname, gotype, level)
tgs = tg %>% count(goid) %>% transmute(goid=goid, hitInPop = n)
gids_all = tg %>% distinct(gid) %>% pull(gid)
gids = unique(gids)
gids = gids[gids %in% gids_all]
sampleSize = length(gids)
tz = tg %>% filter(gid %in% gids) %>%
count(goid) %>%
transmute(goid = goid, hitInSample = n)
tw = tz %>% inner_join(tgs, by = 'goid') %>%
mutate(sampleSize = length(gids), popSize = length(gids_all),
pval.raw = phyper(hitInSample-1, hitInPop, popSize-hitInPop, sampleSize, lower.tail = F),
pval.adj = p.adjust(pval.raw, method = "BH")) %>%
filter(pval.raw < 0.05) %>%
arrange(pval.adj) %>%
transmute(goid = goid, ratioInSample = sprintf("%d/%d", hitInSample, sampleSize),
ratioInPop = sprintf("%d/%d", hitInPop, popSize),
pval.raw = pval.raw, pval.adj = pval.adj)
tw %>% left_join(tgn, by = 'goid') %>% arrange(pval.adj, pval.raw)
#}}}
}
go_enrich_gosets <- function(gids, tgo. = tgo, pval.cutoff = 0.05,
srcs = c("uniprot.plants", "arabidopsis", "corncyc", "tfdb", "Interproscan5")) {
#{{{
to = tibble()
for (src in srcs) {
tgoc = tgo %>% filter(ctag == src) %>% select(-ctag)
to1 = go_enrich(gids, tgoc) %>%
filter(pval.adj <= pval.cutoff) %>%
mutate(source = src) %>%
select(source, goid, ratioInSample, ratioInPop, pval.adj, gotype, goname)
to = rbind(to, to1)
}
to
#}}}
}
go_enrich_genesets <- function(tgs, pval.cutoff = 0.05) {
#{{{
te = tibble()
for (tag1 in unique(tge$tag)) {
gids = tge %>% filter(tag == tag1) %>% pull(gid)
te1 = go_enrich_gosets(gids, pval.cutoff = pval.cutoff) %>% mutate(tag = tag1) %>%
select(tag, everything())
te = rbind(te, te1)
}
te
#}}}
}
#fisher.test(matrix(c(hitInSample, hitInPop-hitInSample, sampleSize-hitInSample, failInPop-sampleSize +hitInSample), 2, 2), alternative='two.sided')
|
# Show VBZ lines
loadAllShp <- function(data_path,shpfiles){
shp_kreis <- shapefile(paste0(data_path,shpfiles$Stadtkreis) )
crs00 <- CRS('+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs ')
shp_lines <- shapefile(paste0(data_path,shpfiles$VBZ_ptways) )
crs(shp_lines) <- crs00
shp_stops <- shapefile(paste0(data_path,shpfiles$VBZ_stops))
crs(shp_stops) <- crs00
shp_points <- shapefile(paste0(data_path,shpfiles$VBZ_points) )
crs(shp_points) <- crs00
return(list(
shp_kreis = shp_kreis,
shp_lines = shp_lines,
shp_stops = shp_stops,
shp_points = shp_points
))
}
loadAllShp_MEM <- memoise(loadAllShp)
show_lines <- function(lines, this.day = ymd('2015-10-04')){
shpfiles <- data_frame(
Fussgaengerzone = 'shapefiles/fussgaengerzone/Fussgaengerzone.shp',
Fahrverbotszone = 'shapefiles/fahrverbotszone/Fahrverbotszone.shp',
Stadtkreis = 'shapefiles/stadtkreis/Stadtkreis.shp',
VBZ_ptways = 'shapefiles/vbz/ptways_j17.ptw.shp',
VBZ_stops = 'shapefiles/vbz/stopareas.stp.shp',
VBZ_points = 'shapefiles/vbz/stoppingpoints.stp.shp'
)
res <- loadAllShp_MEM(data_path,shpfiles)
shp_kreis <- res$shp_kreis
shp_lines <- res$shp_lines
shp_stops <- res$shp_stops
shp_points <- res$shp_points
# Subset the shapefiles :
line_sel <- lines %>% as.character()
ind <- shp_lines@data$LineEFA %in% line_sel
shp_lines_sub <- shp_lines[ind,]
# Query total delays :
dly <- query_delays(as.integer(lines), this.day)
stat_diva_ids <- dly$halt_diva_von %>% unique()
shp_stops_sub <- shp_stops[shp_stops$StopID %in% stat_diva_ids, ]
shp_stops_sub <- sp::merge(shp_stops_sub, dly, by.x = 'StopID', by.y = 'halt_diva_von')
tm_shape(shp = shp_kreis, is.master = T) +
tm_polygons(col = 'KNAME', alpha = 0.3, legend.show = F) +
tm_shape(shp = shp_lines_sub, is.master = F) +
tm_lines(col = 'LineEFA', lwd = 5) +
tm_shape(shp = shp_stops_sub, is.master = T) +
tm_bubbles(col = 'tot_delay', alpha = 0.5, size = 0.3)
}
show_lines(lines = c(10), this.day = ymd('2015-11-04'))
| /sources/show_lines.R | permissive | CraigWangStat/ODDZurich_Shiny | R | false | false | 2,204 | r | # Show VBZ lines
loadAllShp <- function(data_path,shpfiles){
shp_kreis <- shapefile(paste0(data_path,shpfiles$Stadtkreis) )
crs00 <- CRS('+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs ')
shp_lines <- shapefile(paste0(data_path,shpfiles$VBZ_ptways) )
crs(shp_lines) <- crs00
shp_stops <- shapefile(paste0(data_path,shpfiles$VBZ_stops))
crs(shp_stops) <- crs00
shp_points <- shapefile(paste0(data_path,shpfiles$VBZ_points) )
crs(shp_points) <- crs00
return(list(
shp_kreis = shp_kreis,
shp_lines = shp_lines,
shp_stops = shp_stops,
shp_points = shp_points
))
}
loadAllShp_MEM <- memoise(loadAllShp)
show_lines <- function(lines, this.day = ymd('2015-10-04')){
shpfiles <- data_frame(
Fussgaengerzone = 'shapefiles/fussgaengerzone/Fussgaengerzone.shp',
Fahrverbotszone = 'shapefiles/fahrverbotszone/Fahrverbotszone.shp',
Stadtkreis = 'shapefiles/stadtkreis/Stadtkreis.shp',
VBZ_ptways = 'shapefiles/vbz/ptways_j17.ptw.shp',
VBZ_stops = 'shapefiles/vbz/stopareas.stp.shp',
VBZ_points = 'shapefiles/vbz/stoppingpoints.stp.shp'
)
res <- loadAllShp_MEM(data_path,shpfiles)
shp_kreis <- res$shp_kreis
shp_lines <- res$shp_lines
shp_stops <- res$shp_stops
shp_points <- res$shp_points
# Subset the shapefiles :
line_sel <- lines %>% as.character()
ind <- shp_lines@data$LineEFA %in% line_sel
shp_lines_sub <- shp_lines[ind,]
# Query total delays :
dly <- query_delays(as.integer(lines), this.day)
stat_diva_ids <- dly$halt_diva_von %>% unique()
shp_stops_sub <- shp_stops[shp_stops$StopID %in% stat_diva_ids, ]
shp_stops_sub <- sp::merge(shp_stops_sub, dly, by.x = 'StopID', by.y = 'halt_diva_von')
tm_shape(shp = shp_kreis, is.master = T) +
tm_polygons(col = 'KNAME', alpha = 0.3, legend.show = F) +
tm_shape(shp = shp_lines_sub, is.master = F) +
tm_lines(col = 'LineEFA', lwd = 5) +
tm_shape(shp = shp_stops_sub, is.master = T) +
tm_bubbles(col = 'tot_delay', alpha = 0.5, size = 0.3)
}
show_lines(lines = c(10), this.day = ymd('2015-11-04'))
|
context("Missing values")
test_that("blanks read as missing [xlsx]", {
blanks <- read_excel("blanks.xlsx")
expect_equal(blanks$x, c(NA, 1))
expect_equal(blanks$y, c("a", NA))
})
test_that("blanks read as missing [xls]", {
blanks <- read_excel("blanks.xls")
expect_equal(blanks$x, c(NA, 1))
expect_equal(blanks$y, c("a", NA))
})
test_that("By default, NA read as text", {
df <- read_xls("missing-values.xls")
expect_equal(df$x, c("NA", "1.000000", "1.000000"))
})
test_that("na arg maps strings to to NA [xls]", {
df <- read_excel("missing-values.xls", na = "NA")
expect_equal(df$x, c(NA, 1, 1))
})
test_that("na arg maps strings to to NA [xlsx]", {
df <- read_excel("missing-values.xlsx", na = "NA")
expect_equal(df$x, c(NA, 1, 1))
})
test_that("text values in numeric column gives warning & NA", {
expect_warning(
df <- read_excel("missing-values.xls", col_types = "numeric"),
"Expecting numeric"
)
expect_equal(df$x, c(NA, 1, 1))
expect_warning(
df <- read_excel("missing-values.xlsx", col_types = "numeric"),
"expecting numeric"
)
expect_equal(df$x, c(NA, 1, 1))
})
| /tests/testthat/test-missing-values.R | no_license | JakeRuss/readxl | R | false | false | 1,127 | r | context("Missing values")
test_that("blanks read as missing [xlsx]", {
blanks <- read_excel("blanks.xlsx")
expect_equal(blanks$x, c(NA, 1))
expect_equal(blanks$y, c("a", NA))
})
test_that("blanks read as missing [xls]", {
blanks <- read_excel("blanks.xls")
expect_equal(blanks$x, c(NA, 1))
expect_equal(blanks$y, c("a", NA))
})
test_that("By default, NA read as text", {
df <- read_xls("missing-values.xls")
expect_equal(df$x, c("NA", "1.000000", "1.000000"))
})
test_that("na arg maps strings to to NA [xls]", {
df <- read_excel("missing-values.xls", na = "NA")
expect_equal(df$x, c(NA, 1, 1))
})
test_that("na arg maps strings to to NA [xlsx]", {
df <- read_excel("missing-values.xlsx", na = "NA")
expect_equal(df$x, c(NA, 1, 1))
})
test_that("text values in numeric column gives warning & NA", {
expect_warning(
df <- read_excel("missing-values.xls", col_types = "numeric"),
"Expecting numeric"
)
expect_equal(df$x, c(NA, 1, 1))
expect_warning(
df <- read_excel("missing-values.xlsx", col_types = "numeric"),
"expecting numeric"
)
expect_equal(df$x, c(NA, 1, 1))
})
|
library(visreg)
f <- system.file('tests', 'enhances-lme4.R', package='visreg')
source(f)
| /tests/run/test-enhances-lme4.R | no_license | cran/visreg | R | false | false | 89 | r | library(visreg)
f <- system.file('tests', 'enhances-lme4.R', package='visreg')
source(f)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering_functions.R
\name{AP_preferenceRange}
\alias{AP_preferenceRange}
\title{Affinity propagation preference range}
\usage{
AP_preferenceRange(data, method = "bound", threads = 1)
}
\arguments{
\item{data}{a matrix. Either a similarity matrix (where number of rows equal to number of columns) or a 3-dimensional matrix where the 1st, 2nd and 3rd column correspond to (i-index, j-index, value) triplet of a similarity matrix.}
\item{method}{a character string specifying the preference range method to use. One of 'exact', 'bound'. See the details section for more information.}
\item{threads}{an integer specifying the number of cores to run in parallel ( applies only if \emph{method} is set to 'exact' which is more computationally intensive )}
}
\description{
Affinity propagation preference range
}
\details{
Given a set of similarities, \emph{data}, this function computes a lower bound, pmin, on the value for the preference where the optimal number of clusters (exemplars) changes from 1 to 2,
and the exact value of the preference, pmax, where the optimal number of clusters changes from n-1 to n. For N data points, there may be as many as N^2-N pair-wise similarities (note that
the similarity of data point i to k need not be equal to the similarity of data point k to i). These may be passed in an NxN matrix of similarities, \emph{data}, where data(i,k) is the similarity of
point i to point k. In fact, only a smaller number of relevant similarities need to be provided, in which case the others are assumed to be -Inf. M similarity values are known, can be passed
in an Mx3 matrix \emph{data}, where each row of \emph{data} contains a pair of data point indices and a corresponding similarity value: data(j,3) is the similarity of data point data(j,1) to
data point data(j,2).
A single-cluster solution may not exist, in which case pmin is set to NaN. The \emph{AP_preferenceRange} uses one of the methods below to compute pmin and pmax:
\emph{exact} : Computes the exact values for pmin and pmax (Warning: This can be quite slow)
\emph{bound} : Computes the exact value for pmax, but estimates pmin using a bound (default)
}
\examples{
set.seed(1)
dat = matrix(sample(1:255, 2500, replace = TRUE), 100, 25)
smt = 1.0 - distance_matrix(dat, method = 'euclidean', upper = TRUE, diagonal = TRUE)
diag(smt) = 0.0
ap_range = AP_preferenceRange(smt, method = "bound")
}
\references{
https://www.psi.toronto.edu/affinitypropagation/preferenceRange.m
}
| /man/AP_preferenceRange.Rd | no_license | mlampros/ClusterR | R | false | true | 2,553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering_functions.R
\name{AP_preferenceRange}
\alias{AP_preferenceRange}
\title{Affinity propagation preference range}
\usage{
AP_preferenceRange(data, method = "bound", threads = 1)
}
\arguments{
\item{data}{a matrix. Either a similarity matrix (where number of rows equal to number of columns) or a 3-dimensional matrix where the 1st, 2nd and 3rd column correspond to (i-index, j-index, value) triplet of a similarity matrix.}
\item{method}{a character string specifying the preference range method to use. One of 'exact', 'bound'. See the details section for more information.}
\item{threads}{an integer specifying the number of cores to run in parallel ( applies only if \emph{method} is set to 'exact' which is more computationally intensive )}
}
\description{
Affinity propagation preference range
}
\details{
Given a set of similarities, \emph{data}, this function computes a lower bound, pmin, on the value for the preference where the optimal number of clusters (exemplars) changes from 1 to 2,
and the exact value of the preference, pmax, where the optimal number of clusters changes from n-1 to n. For N data points, there may be as many as N^2-N pair-wise similarities (note that
the similarity of data point i to k need not be equal to the similarity of data point k to i). These may be passed in an NxN matrix of similarities, \emph{data}, where data(i,k) is the similarity of
point i to point k. In fact, only a smaller number of relevant similarities need to be provided, in which case the others are assumed to be -Inf. M similarity values are known, can be passed
in an Mx3 matrix \emph{data}, where each row of \emph{data} contains a pair of data point indices and a corresponding similarity value: data(j,3) is the similarity of data point data(j,1) to
data point data(j,2).
A single-cluster solution may not exist, in which case pmin is set to NaN. The \emph{AP_preferenceRange} uses one of the methods below to compute pmin and pmax:
\emph{exact} : Computes the exact values for pmin and pmax (Warning: This can be quite slow)
\emph{bound} : Computes the exact value for pmax, but estimates pmin using a bound (default)
}
\examples{
set.seed(1)
dat = matrix(sample(1:255, 2500, replace = TRUE), 100, 25)
smt = 1.0 - distance_matrix(dat, method = 'euclidean', upper = TRUE, diagonal = TRUE)
diag(smt) = 0.0
ap_range = AP_preferenceRange(smt, method = "bound")
}
\references{
https://www.psi.toronto.edu/affinitypropagation/preferenceRange.m
}
|
#wrapper to call each of the scripts for calculating climate contributions
source("estimateClimateContributions_Colonization.R")
source("estimateClimateContributions_Growth.R")
source("estimateClimateContributions_Survival.R") | /analysis/quadBM/vitalRateRegressions/cache/OLD_VERSIONS/finalModels/wrapper_allClimateContributions.R | no_license | atredennick/MicroMesoForecast | R | false | false | 227 | r | #wrapper to call each of the scripts for calculating climate contributions
source("estimateClimateContributions_Colonization.R")
source("estimateClimateContributions_Growth.R")
source("estimateClimateContributions_Survival.R") |
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 2.76774696985651e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615778728-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 348 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 2.76774696985651e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
data1 <- read.table("ke0702.csv",header = TRUE, sep =",")
data1
fm <- lm(lnY ~ lnL + lnK, data = data1)
summary(fm)
| /exercises/ke0702.R | no_license | Prunus1350/Econometrics | R | false | false | 116 | r | data1 <- read.table("ke0702.csv",header = TRUE, sep =",")
data1
fm <- lm(lnY ~ lnL + lnK, data = data1)
summary(fm)
|
# + ------------------------------------------------------- +
# | Draw volatility curves at the end of a selected day |
# + ------------------------------------------------------- +
require(dplyr)
require(tidyr)
require(ggplot2)
# +------------------------------------+
# | Prepare data
# +------------------------------------+
# TODO: Add RVI or RTSVX level
# rts.data = read.csv('RTSI.txt') %>% select(c(3, 8)) %>% mutate(Dates = as.Date(as.character(X.DATE.), format='%Y%m%d')) %>% select(c(3, 2))
# names(rts.data) = c('Dates', 'Close')
# save(rts.data, file = 'rtsi.RData')
load('rtsi.RData')
# smile.data = read.csv(file = 'ri_smile.csv', sep = ';', header=T, dec=',') %>% select(c(-1))
# smile.data$tms = as.Date(substr(as.character(smile.data$tms), 0, 10))
# save(smile.data, file = 'smile.RData')
load('smile.RData')
dates.rng = c(min(smile.data$tms), max(smile.data$tms))
# +------------------------------------+
# | For each option series calc smile
# | External variables used: all.data
# +------------------------------------+
CalcSmilesSeries = function(strikeRng = 0.2,
<<<<<<< HEAD
smileDate = as.Date('2015-04-30'),
nearest = 10){
### Find coefs for inputed date
vx.at.date = smile.data %>% filter(tms == smileDate) %>% top_n(nearest, 1/t)
=======
smileDate = as.Date('2010-09-06'),
nearest = 0){
options(warn=-1)
### Find coefs for intuted date
vx.at.date = smile.data %>% filter(tms == smileDate) %>% arrange(t)
if(nearest > 0){
vx.at.date = vx.at.date[order(vx.at.date$t),]
vx.at.date = vx.at.date[1:nearest, ]
}
>>>>>>> origin/master
### Make strikes range, include futures values
rng = strikeRng
strikes = seq( min(vx.at.date$fut_price)*(1-rng), max(vx.at.date$fut_price)*(1+rng), length.out = 50 )
strikes = sort(c(strikes, vx.at.date$fut_price))
### Calc smile for every exp.date, strike value
smiles = lapply( c(1:nrow(vx.at.date)), function(x){
x.row = x
sapply(strikes, function(x){
strike = x
fut = vx.at.date[x.row, 'fut_price', drop=T]
tdays = vx.at.date[x.row, 't', drop=T] * 250
coef.vector = as.vector(vx.at.date[x.row, c('s', 'a', 'b', 'c', 'd', 'e')])
vxSmile(strike, fut, tdays, coef.vector, method = 2)
})
})
### Arrange data for ggplot
names(smiles) = as.vector(vx.at.date$small_name)
smiles = gather(data = as.data.frame(c(list(strike = strikes), smiles)), key=strike )
names(smiles) = c('Strike', 'BaseFutures', 'IV')
<<<<<<< HEAD
smiles$BaseFutures = as.character(smiles$BaseFutures)
fut.days = vx.at.date %>%
select(small_name, t) %>%
mutate( tdays = (round(t * 250, 0)) )
fut.days$small_name = as.character(fut.days$small_name)
#%>%
smiles = dplyr::left_join(smiles, fut.days, by = c('BaseFutures' = 'small_name'))
smiles$tdays = as.character(smiles$tdays)
=======
try({smiles = vx.at.date %>% select(small_name, t) %>% mutate(tdays = as.factor(round(t * 250, 0))) %>%
left_join(smiles, by = c('small_name' = 'BaseFutures')) %>% arrange(t)
})
>>>>>>> origin/master
return(smiles)
}
<<<<<<< HEAD
=======
#CalcSmilesSeries()
>>>>>>> origin/master
# +------------------------------------+
# | IV smile functions
# +------------------------------------+
vxSmile = function(strike, fut, tdays, coef.vector=NULL, method=2)
{
s = try(as.numeric(coef.vector[['s']]), silent = T)
a = try(as.numeric(coef.vector[['a']]), silent = T)
b = try(as.numeric(coef.vector[['b']]), silent = T)
c = try(as.numeric(coef.vector[['c']]), silent = T)
d = try(as.numeric(coef.vector[['d']]), silent = T)
e = try(as.numeric(coef.vector[['e']]), silent = T)
f = try(as.numeric(coef.vector[['f']]), silent = T)
g = try(as.numeric(coef.vector[['g']]), silent = T)
try({
if(method==1)
vxs=a + b*(1 - e ^ ( (-1)*c*( 1/(tdays/365)^0.5 * log(strike / fut)-s )^2 )) + d * atan(e * (1 / (tdays / 365) ^ 0.5 * log(strike / fut) - s)) / e
if(method==2)
vxs = a + b*(1 - exp(-c * (1 / (tdays / 365) ^ 0.5 * log(strike / fut) - s) ^ 2)) + d * atan(e * (1 / (tdays / 365) ^ 0.5 * log(strike / fut) - s)) / e
if(method==3)
vxs = a + b*strike + c*strike^2 + d*strike^3 + e*strike^4 + f*strike^5 + g*strike^6
}, silent=T)
return(as.numeric(vxs))
}
| /history_manipulation.R | no_license | davydovpv/appSmile | R | false | false | 4,563 | r |
# + ------------------------------------------------------- +
# | Draw volatility curves at the end of a selected day |
# + ------------------------------------------------------- +
require(dplyr)
require(tidyr)
require(ggplot2)
# +------------------------------------+
# | Prepare data
# +------------------------------------+
# TODO: Add RVI or RTSVX level
# rts.data = read.csv('RTSI.txt') %>% select(c(3, 8)) %>% mutate(Dates = as.Date(as.character(X.DATE.), format='%Y%m%d')) %>% select(c(3, 2))
# names(rts.data) = c('Dates', 'Close')
# save(rts.data, file = 'rtsi.RData')
load('rtsi.RData')
# smile.data = read.csv(file = 'ri_smile.csv', sep = ';', header=T, dec=',') %>% select(c(-1))
# smile.data$tms = as.Date(substr(as.character(smile.data$tms), 0, 10))
# save(smile.data, file = 'smile.RData')
load('smile.RData')
dates.rng = c(min(smile.data$tms), max(smile.data$tms))
# +------------------------------------+
# | For each option series calc smile
# | External variables used: all.data
# +------------------------------------+
CalcSmilesSeries = function(strikeRng = 0.2,
<<<<<<< HEAD
smileDate = as.Date('2015-04-30'),
nearest = 10){
### Find coefs for inputed date
vx.at.date = smile.data %>% filter(tms == smileDate) %>% top_n(nearest, 1/t)
=======
smileDate = as.Date('2010-09-06'),
nearest = 0){
options(warn=-1)
### Find coefs for intuted date
vx.at.date = smile.data %>% filter(tms == smileDate) %>% arrange(t)
if(nearest > 0){
vx.at.date = vx.at.date[order(vx.at.date$t),]
vx.at.date = vx.at.date[1:nearest, ]
}
>>>>>>> origin/master
### Make strikes range, include futures values
rng = strikeRng
strikes = seq( min(vx.at.date$fut_price)*(1-rng), max(vx.at.date$fut_price)*(1+rng), length.out = 50 )
strikes = sort(c(strikes, vx.at.date$fut_price))
### Calc smile for every exp.date, strike value
smiles = lapply( c(1:nrow(vx.at.date)), function(x){
x.row = x
sapply(strikes, function(x){
strike = x
fut = vx.at.date[x.row, 'fut_price', drop=T]
tdays = vx.at.date[x.row, 't', drop=T] * 250
coef.vector = as.vector(vx.at.date[x.row, c('s', 'a', 'b', 'c', 'd', 'e')])
vxSmile(strike, fut, tdays, coef.vector, method = 2)
})
})
### Arrange data for ggplot
names(smiles) = as.vector(vx.at.date$small_name)
smiles = gather(data = as.data.frame(c(list(strike = strikes), smiles)), key=strike )
names(smiles) = c('Strike', 'BaseFutures', 'IV')
<<<<<<< HEAD
smiles$BaseFutures = as.character(smiles$BaseFutures)
fut.days = vx.at.date %>%
select(small_name, t) %>%
mutate( tdays = (round(t * 250, 0)) )
fut.days$small_name = as.character(fut.days$small_name)
#%>%
smiles = dplyr::left_join(smiles, fut.days, by = c('BaseFutures' = 'small_name'))
smiles$tdays = as.character(smiles$tdays)
=======
try({smiles = vx.at.date %>% select(small_name, t) %>% mutate(tdays = as.factor(round(t * 250, 0))) %>%
left_join(smiles, by = c('small_name' = 'BaseFutures')) %>% arrange(t)
})
>>>>>>> origin/master
return(smiles)
}
<<<<<<< HEAD
=======
#CalcSmilesSeries()
>>>>>>> origin/master
# +------------------------------------+
# | IV smile functions
# +------------------------------------+
vxSmile = function(strike, fut, tdays, coef.vector=NULL, method=2)
{
s = try(as.numeric(coef.vector[['s']]), silent = T)
a = try(as.numeric(coef.vector[['a']]), silent = T)
b = try(as.numeric(coef.vector[['b']]), silent = T)
c = try(as.numeric(coef.vector[['c']]), silent = T)
d = try(as.numeric(coef.vector[['d']]), silent = T)
e = try(as.numeric(coef.vector[['e']]), silent = T)
f = try(as.numeric(coef.vector[['f']]), silent = T)
g = try(as.numeric(coef.vector[['g']]), silent = T)
try({
if(method==1)
vxs=a + b*(1 - e ^ ( (-1)*c*( 1/(tdays/365)^0.5 * log(strike / fut)-s )^2 )) + d * atan(e * (1 / (tdays / 365) ^ 0.5 * log(strike / fut) - s)) / e
if(method==2)
vxs = a + b*(1 - exp(-c * (1 / (tdays / 365) ^ 0.5 * log(strike / fut) - s) ^ 2)) + d * atan(e * (1 / (tdays / 365) ^ 0.5 * log(strike / fut) - s)) / e
if(method==3)
vxs = a + b*strike + c*strike^2 + d*strike^3 + e*strike^4 + f*strike^5 + g*strike^6
}, silent=T)
return(as.numeric(vxs))
}
|
.onAttach <- function(libname, pkgname){
packageStartupMessage("")
packageStartupMessage("***********************************************************")
packageStartupMessage("")
packageStartupMessage(" This is 'GenomicMating' package, v 2.0")
packageStartupMessage("")
packageStartupMessage("Citation details with citation('GenomicMating')")
packageStartupMessage("")
packageStartupMessage("Further information with help(GenomicMating)...")
packageStartupMessage("")
packageStartupMessage("***********************************************************")
}
| /GenomicMating/R/init.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 578 | r | .onAttach <- function(libname, pkgname){
packageStartupMessage("")
packageStartupMessage("***********************************************************")
packageStartupMessage("")
packageStartupMessage(" This is 'GenomicMating' package, v 2.0")
packageStartupMessage("")
packageStartupMessage("Citation details with citation('GenomicMating')")
packageStartupMessage("")
packageStartupMessage("Further information with help(GenomicMating)...")
packageStartupMessage("")
packageStartupMessage("***********************************************************")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simData.R
\name{simData}
\alias{simData}
\title{Simulate different scenarios of abundance change in entities}
\usage{
simData(tree = NULL, data = NULL, obj = NULL, scenario = "S1",
from.A = NULL, from.B = NULL, minTip.A = 0, maxTip.A = Inf,
minTip.B = 0, maxTip.B = Inf, minPr.A = 0, maxPr.A = 1,
ratio = 2, adjB = NULL, pct = 0.6, nSam = c(50, 50),
mu = 10000, size = 50, n = 1, fun = sum)
}
\arguments{
\item{tree}{A phylo object. Only use when \code{obj} is NULL.}
\item{data}{A matrix, representing a table of values, such as count, collected
from real data. It has the entities corresponding to tree leaves in the row
and samples in the column. Only use when \code{obj} is NULL.}
\item{obj}{A leafSummarizedExperiment object that includes a list of
matrix-like elements, or a matrix-like element in assays, and a phylo object
in metadata. In other words, \strong{obj} provides the same information
given by \strong{tree} and \strong{data}.}
\item{scenario}{\dQuote{S1}, \dQuote{S2}, or \dQuote{S3} (see \bold{Details}).
Default is \dQuote{S1}.}
\item{from.A, from.B}{The branch node labels of branches A and B for which the
signal is swapped. Default, both are NULL. In simulation, we select two
branches (A & B) to have differential abundance under different conditions.
One could specify these two branches or let \code{doData} choose. (Note: If
\code{from.A} is NULL, \code{from.B} is set to NULL).}
\item{minTip.A}{The minimum number of leaves in branch A}
\item{maxTip.A}{The maximum number of leaves in branch A}
\item{minTip.B}{The minimum number of leaves in branch B}
\item{maxTip.B}{The maximum number of leaves in branch B}
\item{minPr.A}{A numeric value selected from 0 to 1. The minimum abundance
proportion of leaves in branch A}
\item{maxPr.A}{A numeric value selected from 0 to 1. The maximum abundance
proportion of leaves in branch A}
\item{ratio}{A numeric value. The proportion ratio of branch B to branch A.
This value is used to select branches(see \bold{Details}). If there are no
branches having exactly this ratio, the pair with the value closest to
\code{ratio} would be selected.}
\item{adjB}{a numeric value selected from 0 and 1 (only for \code{scenario} is
\dQuote{S3}). Default is NULL. If NULL, branch A and the selected part of
branch B swap their proportions. If a numeric value, e.g. 0.1, then the
selected part of branch B decreases to its one tenth proportion and the
decrease in branch B is added to branch A. For example, assume there are two
experimental conditions (C1 & C2), branch A has 10 and branch B has 40 in
C1. If adjB is set to 0.1, then in C2 branch B becomes 4 and branch A 46 so
that the total proportion stays the same.}
\item{pct}{The percentage of leaves in branch B that have differential
abundance under different conditions (only for scenario \dQuote{S3})}
\item{nSam}{A numeric vector of length 2, containing the sample size for two
different conditions}
\item{mu, size}{The parameters of the Negative Binomial distribution. (see mu
and size in \code{\link[stats]{rnbinom}}). Parameters used to generate the
library size for each simulated sample.}
\item{n}{A numeric value to specify how many count tables would be generated
with the same settings. Default is one and one count table would be obtained
at the end. If above one, the output of \code{doData} is a list of matrices
(count tables). This is useful, when one needs multiple simulations.}
\item{fun}{A function to derive the count at each internal node based on its
descendant leaves, e.g. sum, mean. The argument of the function is a numeric
vector with the counts of an internal node's descendant leaves.}
}
\value{
a list of objects \item{FC}{the fold change of entities correspondint
to the tree leaves.} \item{Count}{a list of count table or a count table.
Entities on the row and samples in the column. Each count table includes
entities corresponding to all nodes on the tree structure.}
\item{Branch}{the information about two selected branches.} \describe{
\item{A}{the branch node label of branch A} \item{B}{the branch node label
of branch B} \item{ratio}{the count proportion ratio of branch B to branch
A} \item{A_tips}{the number of leaves on branch A} \item{B_tips}{the number
of leaves on branch B} \item{A_prop}{the count proportion of branch A (a
value not above 1)} \item{B_prop}{the count proportion of branch B (the
maximum is 1a value not above 1)} }
}
\description{
\code{simData} simulates different abundance patterns for entities under
different conditions. These entities have their corresponding nodes on a tree.
More details about the simulated patterns could be found in the vignette via
\code{browseVignettes("treeAGG")}.
}
\details{
\code{simData} simulates a count table for entities which are
corresponding to the nodes of a tree. The entities are in rows and the
samples from different groups or conditions are in columns. The library size
of each sample is sampled from a Negative Binomial distribution with mean
and size specified by the arguments \code{mu} and \code{size}. The counts of
entities, which are located on the tree leaves, in the same sample are
assumed to follow a Dirichlet-Multinomial distribution. The parameters for
the Dirichlet-Multinomial distribution are estimated from a real data set
specified by the argument \code{data} via the function \code{dirmult} (see
\code{\link[dirmult]{dirmult}}). To generate different abundance patterns
under different conditions, we provide three different scenarios,
\dQuote{S1}, \dQuote{S2}, and \dQuote{S3} (specified via \code{scenario}).
Our vignette provides figures to explain these three scenarios (try
\code{browseVignettes("treeAGG")}). \itemize{ \item S1: two branches are
selected to swap their proportions, and leaves on the same branch have the
same fold change. \item S2: two branches are selected to swap their
proportions. Leaves in the same branch have different fold changes but same
direction (either increase or decrease). \item S3: two branches are
selected. One branch has its proportion swapped with the proportion of some
leaves from the other branch.}
}
\examples{
set.seed(1)
y <- matrix(rnbinom(100,size=1,mu=10),nrow=10)
colnames(y) <- paste("S", 1:10, sep = "")
rownames(y) <- tinyTree$tip.label
toy_lse <- leafSummarizedExperiment(tree = tinyTree,
assays = list(y))
res <- parEstimate(data = toy_lse)
set.seed(1122)
dat1 <- simData(obj = res)
}
\author{
Ruizhu Huang
}
| /man/simData.Rd | no_license | fionarhuang/treeAGG | R | false | true | 6,593 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simData.R
\name{simData}
\alias{simData}
\title{Simulate different scenarios of abundance change in entities}
\usage{
simData(tree = NULL, data = NULL, obj = NULL, scenario = "S1",
from.A = NULL, from.B = NULL, minTip.A = 0, maxTip.A = Inf,
minTip.B = 0, maxTip.B = Inf, minPr.A = 0, maxPr.A = 1,
ratio = 2, adjB = NULL, pct = 0.6, nSam = c(50, 50),
mu = 10000, size = 50, n = 1, fun = sum)
}
\arguments{
\item{tree}{A phylo object. Only use when \code{obj} is NULL.}
\item{data}{A matrix, representing a table of values, such as count, collected
from real data. It has the entities corresponding to tree leaves in the row
and samples in the column. Only use when \code{obj} is NULL.}
\item{obj}{A leafSummarizedExperiment object that includes a list of
matrix-like elements, or a matrix-like element in assays, and a phylo object
in metadata. In other words, \strong{obj} provides the same information
given by \strong{tree} and \strong{data}.}
\item{scenario}{\dQuote{S1}, \dQuote{S2}, or \dQuote{S3} (see \bold{Details}).
Default is \dQuote{S1}.}
\item{from.A, from.B}{The branch node labels of branches A and B for which the
signal is swapped. Default, both are NULL. In simulation, we select two
branches (A & B) to have differential abundance under different conditions.
One could specify these two branches or let \code{doData} choose. (Note: If
\code{from.A} is NULL, \code{from.B} is set to NULL).}
\item{minTip.A}{The minimum number of leaves in branch A}
\item{maxTip.A}{The maximum number of leaves in branch A}
\item{minTip.B}{The minimum number of leaves in branch B}
\item{maxTip.B}{The maximum number of leaves in branch B}
\item{minPr.A}{A numeric value selected from 0 to 1. The minimum abundance
proportion of leaves in branch A}
\item{maxPr.A}{A numeric value selected from 0 to 1. The maximum abundance
proportion of leaves in branch A}
\item{ratio}{A numeric value. The proportion ratio of branch B to branch A.
This value is used to select branches(see \bold{Details}). If there are no
branches having exactly this ratio, the pair with the value closest to
\code{ratio} would be selected.}
\item{adjB}{a numeric value selected from 0 and 1 (only for \code{scenario} is
\dQuote{S3}). Default is NULL. If NULL, branch A and the selected part of
branch B swap their proportions. If a numeric value, e.g. 0.1, then the
selected part of branch B decreases to its one tenth proportion and the
decrease in branch B is added to branch A. For example, assume there are two
experimental conditions (C1 & C2), branch A has 10 and branch B has 40 in
C1. If adjB is set to 0.1, then in C2 branch B becomes 4 and branch A 46 so
that the total proportion stays the same.}
\item{pct}{The percentage of leaves in branch B that have differential
abundance under different conditions (only for scenario \dQuote{S3})}
\item{nSam}{A numeric vector of length 2, containing the sample size for two
different conditions}
\item{mu, size}{The parameters of the Negative Binomial distribution. (see mu
and size in \code{\link[stats]{rnbinom}}). Parameters used to generate the
library size for each simulated sample.}
\item{n}{A numeric value to specify how many count tables would be generated
with the same settings. Default is one and one count table would be obtained
at the end. If above one, the output of \code{doData} is a list of matrices
(count tables). This is useful, when one needs multiple simulations.}
\item{fun}{A function to derive the count at each internal node based on its
descendant leaves, e.g. sum, mean. The argument of the function is a numeric
vector with the counts of an internal node's descendant leaves.}
}
\value{
a list of objects \item{FC}{the fold change of entities correspondint
to the tree leaves.} \item{Count}{a list of count table or a count table.
Entities on the row and samples in the column. Each count table includes
entities corresponding to all nodes on the tree structure.}
\item{Branch}{the information about two selected branches.} \describe{
\item{A}{the branch node label of branch A} \item{B}{the branch node label
of branch B} \item{ratio}{the count proportion ratio of branch B to branch
A} \item{A_tips}{the number of leaves on branch A} \item{B_tips}{the number
of leaves on branch B} \item{A_prop}{the count proportion of branch A (a
value not above 1)} \item{B_prop}{the count proportion of branch B (the
maximum is 1a value not above 1)} }
}
\description{
\code{simData} simulates different abundance patterns for entities under
different conditions. These entities have their corresponding nodes on a tree.
More details about the simulated patterns could be found in the vignette via
\code{browseVignettes("treeAGG")}.
}
\details{
\code{simData} simulates a count table for entities which are
corresponding to the nodes of a tree. The entities are in rows and the
samples from different groups or conditions are in columns. The library size
of each sample is sampled from a Negative Binomial distribution with mean
and size specified by the arguments \code{mu} and \code{size}. The counts of
entities, which are located on the tree leaves, in the same sample are
assumed to follow a Dirichlet-Multinomial distribution. The parameters for
the Dirichlet-Multinomial distribution are estimated from a real data set
specified by the argument \code{data} via the function \code{dirmult} (see
\code{\link[dirmult]{dirmult}}). To generate different abundance patterns
under different conditions, we provide three different scenarios,
\dQuote{S1}, \dQuote{S2}, and \dQuote{S3} (specified via \code{scenario}).
Our vignette provides figures to explain these three scenarios (try
\code{browseVignettes("treeAGG")}). \itemize{ \item S1: two branches are
selected to swap their proportions, and leaves on the same branch have the
same fold change. \item S2: two branches are selected to swap their
proportions. Leaves in the same branch have different fold changes but same
direction (either increase or decrease). \item S3: two branches are
selected. One branch has its proportion swapped with the proportion of some
leaves from the other branch.}
}
\examples{
set.seed(1)
y <- matrix(rnbinom(100,size=1,mu=10),nrow=10)
colnames(y) <- paste("S", 1:10, sep = "")
rownames(y) <- tinyTree$tip.label
toy_lse <- leafSummarizedExperiment(tree = tinyTree,
assays = list(y))
res <- parEstimate(data = toy_lse)
set.seed(1122)
dat1 <- simData(obj = res)
}
\author{
Ruizhu Huang
}
|
rm (list = ls())
# Read the data
data <- read.csv("data/09.06_COMBINED DATASET REDUCED.csv", header = T, stringsAsFactors = F, skipNul = TRUE)
data$tot_personyrs <- as.numeric(data$tot_personyrs)
data[(is.na(data$tot_personyrs)),]$tot_personyrs <-
data[(is.na(data$tot_personyrs)),]$mean_followup * data[(is.na(data$tot_personyrs)),]$n_baseline
data[(is.na(data$mean_followup)),]$mean_followup <-
data[(is.na(data$mean_followup)),]$tot_personyrs / data[(is.na(data$mean_followup)),]$n_baseline
data$outcome <- trimws(data$outcome)
# Read all the functions
source("all-functions.R")
# Identify unique outcomes
uoutcome <- data.frame(outcome = as.character(unique(data$outcome)))
uoutcome$outcome <- as.character(uoutcome$outcome)
# all/male population - stroke remove 70
# CHD remove 38
summary_table <- data.frame(exposure = character(),
outcome = character(),
overall = numeric(),
gender = numeric(),
sample_size = numeric(),
total_population = character(),
stringsAsFactors = FALSE)
index <- 1
for (i in 1:nrow(uoutcome)){
paexpg = c("LTPA", "TPA")
ov <- 1
for(paexp in paexpg){
gg <- c(0, 1, 2)
for (g in gg){
# cat(g, "\n")
# g <- 1
# cat("Unprocessed - Outcome: ", uoutcome$outcome[i], " and i ", i, "\n")
# if (is.null(g)){
if (g == 0){
acmdata <- getDiseaseSpecificData(data, uoutcome$outcome[i], paexposure = paexp, overall1 = 1)
# cat("overall")
}else{
acmdata <- getDiseaseSpecificData(data, uoutcome$outcome[i], paexposure = paexp, gender = g)
}
#acmdata <- subset(acmdata, outcome_type == "mortality")
acmfdata <- formatData(acmdata, kcases = T, infertotalpersons = T)
# Remove all cases where both rr and dose are null
acmfdata <- subset(acmfdata, !is.na(rr) & !is.na(dose))
# Remove when totalperson is not available for hr, and personsyears for rr/or
acmfdata <- subset(acmfdata, !((effect_measure == "hr" & (is.na(personyears) | personyears == 0) ) |
(effect_measure != "hr" & (is.na(totalpersons | totalpersons == 0) ) ) ))
if (uoutcome$outcome[i] == 'stroke' && paexp == "TPA" && g == 0){
# Remove study # 70 from stroke
acmfdata <- subset(acmfdata, !ref_number %in% c(70))
}
if(uoutcome$outcome[i] == 'CHD' && paexp == "TPA" && g == 0){
# Remove study # 38 from stroke
acmfdata <- subset(acmfdata, !ref_number %in% c(70))
}
if(uoutcome$outcome[i] == 'CHD' && paexp == "TPA" && g == 2){
# Remove study # 38 from stroke
acmfdata <- subset(acmfdata, !ref_number %in% c(38))
}
# cat("Studies ", unique(acmfdata$ref_number), "\n")
if (i %in% c(5, 6))
acmfdata[acmfdata$logrr == 0,]$se <- acmfdata[acmfdata$logrr == 0,]$lci <- acmfdata[acmfdata$logrr == 0,]$uci <- 0
if (i == 5 && paexp == "TPA" && g == 2){
plot_data <- data.frame(metaAnalysis(acmfdata, ptitle = "", covMethed = T, returnval = T))
}else{
plot_data <- data.frame(metaAnalysis(acmfdata, ptitle = "", covMethed = T, returnval = T, minQuantile = 0, maxQuantile = 0.85))
}
summary_table[index, 1] <- paexp
summary_table[index, 2] <- uoutcome$outcome[i]
summary_table[index, 3] <- ifelse(g == 0, 1, 0)
summary_table[index, 4] <- ifelse(g == 0, 0, g)
if (nrow(plot_data) > 0){
# cat("Outcome: ", uoutcome$outcome[i], " and i ", i, "\n")
colnames(plot_data) <- c("dose","RR", "lb", "ub")
summary_table[index, 5] <- length(unique(acmfdata$id))
summary_table[index, 6] <- formatC(round(sum(acmfdata$totalpersons, na.rm = T)),
format = "f", big.mark = ",", drop0trailing = TRUE)
}else{
# cat("(NOT) Outcome: ", uoutcome$outcome[i], " and i ", i, "\n")
summary_table[index, 5] <- 0
summary_table[index, 6] <- 0
}
index <- index + 1
}
}
}
| /legacy/summary-table.R | no_license | meta-analyses/meta-analysis | R | false | false | 4,228 | r | rm (list = ls())
# Read the data
data <- read.csv("data/09.06_COMBINED DATASET REDUCED.csv", header = T, stringsAsFactors = F, skipNul = TRUE)
data$tot_personyrs <- as.numeric(data$tot_personyrs)
data[(is.na(data$tot_personyrs)),]$tot_personyrs <-
data[(is.na(data$tot_personyrs)),]$mean_followup * data[(is.na(data$tot_personyrs)),]$n_baseline
data[(is.na(data$mean_followup)),]$mean_followup <-
data[(is.na(data$mean_followup)),]$tot_personyrs / data[(is.na(data$mean_followup)),]$n_baseline
data$outcome <- trimws(data$outcome)
# Read all the functions
source("all-functions.R")
# Identify unique outcomes
uoutcome <- data.frame(outcome = as.character(unique(data$outcome)))
uoutcome$outcome <- as.character(uoutcome$outcome)
# all/male population - stroke remove 70
# CHD remove 38
summary_table <- data.frame(exposure = character(),
outcome = character(),
overall = numeric(),
gender = numeric(),
sample_size = numeric(),
total_population = character(),
stringsAsFactors = FALSE)
index <- 1
for (i in 1:nrow(uoutcome)){
paexpg = c("LTPA", "TPA")
ov <- 1
for(paexp in paexpg){
gg <- c(0, 1, 2)
for (g in gg){
# cat(g, "\n")
# g <- 1
# cat("Unprocessed - Outcome: ", uoutcome$outcome[i], " and i ", i, "\n")
# if (is.null(g)){
if (g == 0){
acmdata <- getDiseaseSpecificData(data, uoutcome$outcome[i], paexposure = paexp, overall1 = 1)
# cat("overall")
}else{
acmdata <- getDiseaseSpecificData(data, uoutcome$outcome[i], paexposure = paexp, gender = g)
}
#acmdata <- subset(acmdata, outcome_type == "mortality")
acmfdata <- formatData(acmdata, kcases = T, infertotalpersons = T)
# Remove all cases where both rr and dose are null
acmfdata <- subset(acmfdata, !is.na(rr) & !is.na(dose))
# Remove when totalperson is not available for hr, and personsyears for rr/or
acmfdata <- subset(acmfdata, !((effect_measure == "hr" & (is.na(personyears) | personyears == 0) ) |
(effect_measure != "hr" & (is.na(totalpersons | totalpersons == 0) ) ) ))
if (uoutcome$outcome[i] == 'stroke' && paexp == "TPA" && g == 0){
# Remove study # 70 from stroke
acmfdata <- subset(acmfdata, !ref_number %in% c(70))
}
if(uoutcome$outcome[i] == 'CHD' && paexp == "TPA" && g == 0){
# Remove study # 38 from stroke
acmfdata <- subset(acmfdata, !ref_number %in% c(70))
}
if(uoutcome$outcome[i] == 'CHD' && paexp == "TPA" && g == 2){
# Remove study # 38 from stroke
acmfdata <- subset(acmfdata, !ref_number %in% c(38))
}
# cat("Studies ", unique(acmfdata$ref_number), "\n")
if (i %in% c(5, 6))
acmfdata[acmfdata$logrr == 0,]$se <- acmfdata[acmfdata$logrr == 0,]$lci <- acmfdata[acmfdata$logrr == 0,]$uci <- 0
if (i == 5 && paexp == "TPA" && g == 2){
plot_data <- data.frame(metaAnalysis(acmfdata, ptitle = "", covMethed = T, returnval = T))
}else{
plot_data <- data.frame(metaAnalysis(acmfdata, ptitle = "", covMethed = T, returnval = T, minQuantile = 0, maxQuantile = 0.85))
}
summary_table[index, 1] <- paexp
summary_table[index, 2] <- uoutcome$outcome[i]
summary_table[index, 3] <- ifelse(g == 0, 1, 0)
summary_table[index, 4] <- ifelse(g == 0, 0, g)
if (nrow(plot_data) > 0){
# cat("Outcome: ", uoutcome$outcome[i], " and i ", i, "\n")
colnames(plot_data) <- c("dose","RR", "lb", "ub")
summary_table[index, 5] <- length(unique(acmfdata$id))
summary_table[index, 6] <- formatC(round(sum(acmfdata$totalpersons, na.rm = T)),
format = "f", big.mark = ",", drop0trailing = TRUE)
}else{
# cat("(NOT) Outcome: ", uoutcome$outcome[i], " and i ", i, "\n")
summary_table[index, 5] <- 0
summary_table[index, 6] <- 0
}
index <- index + 1
}
}
}
|
## Clear workspace
rm(list=ls())
library(lubridate)
## load data frame
householdPwrData <- read.table(
"./data/household_power_consumption.txt",
header=TRUE, ## sourcefile has header row
sep=";", ## data is separeted by semicolons
na.strings = c("?"), ## missing values are coded as '?'
colClasses = c(
## (variable definitions below)
'character', ## Date
'character', ## Time
'numeric', ## Global_active_power
'numeric', ## Global_reactive_power
'numeric', ## Voltage
'numeric', ## Global_intensity
'numeric', ## Sub_metering_1
'numeric', ## Sub_metering_2
'numeric' ## Sub_metering_3
),
stringsAsFactors=FALSE
)
## combine Date & Time strings and convert to new POSIXlt variable DateTime
householdPwrData$DateTime <- strptime(paste(householdPwrData$Date, householdPwrData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
## select target date range (ie - 1/2/2007 ≤ t ≤ 2/2/2007 )
householdPwrTarget <- subset(
householdPwrData,
DateTime >= as.Date("1/2/2007 00:00:00", "%d/%m/%Y %H:%M:%S")
& DateTime < as.Date("3/2/2007 00:00:00", "%d/%m/%Y %H:%M:%S")
)
## Generate plot
with(householdPwrTarget, {
plot(DateTime, Sub_metering_1, ylab="Energy sub metering", xlab="", type = "n")
lines(DateTime, Sub_metering_1, col="black")
lines(DateTime, Sub_metering_2, col="red")
lines(DateTime, Sub_metering_3, col="blue")
}
)
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col= c("black", "red", "blue"), lty="solid", cex=0.75, bty=1)
## Copy screen plot to .png
dev.copy(png,"plot3.png", width=480, height=480)
dev.off()
## ********************************
## VARIABLE DEFINITIONS
##
## Date:
## Date in format dd/mm/yyyy
## Time:
## time in format hh:mm:ss
## Global_active_power:
## household global minute-averaged active power (in kilowatt)
## Global_reactive_power:
## household global minute-averaged reactive power (in kilowatt)
## Voltage:
## minute-averaged voltage (in volt)
## Global_intensity:
## household global minute-averaged current intensity (in ampere)
## Sub_metering_1:
## energy sub-metering No. 1 (in watt-hour of active energy).
## It corresponds to the kitchen, containing mainly a dishwasher, an
## oven and a microwave (hot plates are not electric but gas powered).
## Sub_metering_2:
## energy sub-metering No. 2 (in watt-hour of active energy).
## It corresponds to the laundry room, containing a washing-machine,
## a tumble-drier, a refrigerator and a light.
## Sub_metering_3:
## energy sub-metering No. 3 (in watt-hour of active energy).
## It corresponds to an electric water-heater and an air-conditioner.
##
| /plot3.R | no_license | gverissimo/ExData_Plotting1 | R | false | false | 3,039 | r | ## Clear workspace
rm(list=ls())
library(lubridate)
## load data frame
householdPwrData <- read.table(
"./data/household_power_consumption.txt",
header=TRUE, ## sourcefile has header row
sep=";", ## data is separeted by semicolons
na.strings = c("?"), ## missing values are coded as '?'
colClasses = c(
## (variable definitions below)
'character', ## Date
'character', ## Time
'numeric', ## Global_active_power
'numeric', ## Global_reactive_power
'numeric', ## Voltage
'numeric', ## Global_intensity
'numeric', ## Sub_metering_1
'numeric', ## Sub_metering_2
'numeric' ## Sub_metering_3
),
stringsAsFactors=FALSE
)
## combine Date & Time strings and convert to new POSIXlt variable DateTime
householdPwrData$DateTime <- strptime(paste(householdPwrData$Date, householdPwrData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
## select target date range (ie - 1/2/2007 ≤ t ≤ 2/2/2007 )
householdPwrTarget <- subset(
householdPwrData,
DateTime >= as.Date("1/2/2007 00:00:00", "%d/%m/%Y %H:%M:%S")
& DateTime < as.Date("3/2/2007 00:00:00", "%d/%m/%Y %H:%M:%S")
)
## Generate plot
with(householdPwrTarget, {
plot(DateTime, Sub_metering_1, ylab="Energy sub metering", xlab="", type = "n")
lines(DateTime, Sub_metering_1, col="black")
lines(DateTime, Sub_metering_2, col="red")
lines(DateTime, Sub_metering_3, col="blue")
}
)
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col= c("black", "red", "blue"), lty="solid", cex=0.75, bty=1)
## Copy screen plot to .png
dev.copy(png,"plot3.png", width=480, height=480)
dev.off()
## ********************************
## VARIABLE DEFINITIONS
##
## Date:
## Date in format dd/mm/yyyy
## Time:
## time in format hh:mm:ss
## Global_active_power:
## household global minute-averaged active power (in kilowatt)
## Global_reactive_power:
## household global minute-averaged reactive power (in kilowatt)
## Voltage:
## minute-averaged voltage (in volt)
## Global_intensity:
## household global minute-averaged current intensity (in ampere)
## Sub_metering_1:
## energy sub-metering No. 1 (in watt-hour of active energy).
## It corresponds to the kitchen, containing mainly a dishwasher, an
## oven and a microwave (hot plates are not electric but gas powered).
## Sub_metering_2:
## energy sub-metering No. 2 (in watt-hour of active energy).
## It corresponds to the laundry room, containing a washing-machine,
## a tumble-drier, a refrigerator and a light.
## Sub_metering_3:
## energy sub-metering No. 3 (in watt-hour of active energy).
## It corresponds to an electric water-heater and an air-conditioner.
##
|
globalVariables(c(".","cumulative_data_fraction","cumulative_capture_rate","cumulative_lift","cumulative_response_rate",
"model_id","standardized_coefficients","coefficients","metalearner","reorder","scaled_importance",
"test_data","plot","best","n_vars","explain", "save_png","variable","add_column","str_split","drop_na","n_models","model_rank"))
| /R/global_variables.R | no_license | PeerChristensen/autoMLviz | R | false | false | 386 | r |
globalVariables(c(".","cumulative_data_fraction","cumulative_capture_rate","cumulative_lift","cumulative_response_rate",
"model_id","standardized_coefficients","coefficients","metalearner","reorder","scaled_importance",
"test_data","plot","best","n_vars","explain", "save_png","variable","add_column","str_split","drop_na","n_models","model_rank"))
|
################# CHARGEMENT DES LIBRAIRIES
library(shiny)
library(shinythemes)
library(DT)
library(dplyr)
library(shinyBS)
library(shinyTime)
library(RPostgreSQL)
library(shinyalert)
################ CHARGEMENT DE LA BASE DE DONNEES
con<- dbConnect(PostgreSQL(), host="pggeodb-preprod.nancy.inra.fr", dbname="", user="", password="")
############### LISTE DE CHOIX
# liste de choix pour les selectInput, ces listes sont updatés si l'utilisateur le souhaite en utilisant l'option autre dans la selection
bleGravChoices = list("superficielle", "légère","profonde",
"fracture", "fracture _consolidée",
" plaie_fermée", " pelade")
bleTraitChoices = list("allumisol", "serflex_allumisol",
"points", "euthanasie", "rien")
blelocalisationChoices = list(" 1" = 1, "2" = 2, "3" = 3,"Autre localisation")
################## FORMULAIRE CARACTERISTIQUES DE L'ANIMAL
contentcaractanimal = fluidPage(
#titlePanel("Caract. de l'animal"),
fluidRow(
column(2, numericInput(inputId = "nSabot", value = " ",label = h4("N° Sabot"), min=1, max=28)),
column(2, numericInput(inputId = "pSabotPlein", value = " ",label = h4("Poids Sabot Plein"),min=0,max=70 )),
column(2, numericInput(inputId = "pSabotVide", value = " ",label = h4("Poids Sabot Vide"),min=0,max=60 )),
column(2, h4("Poids Animal"),textOutput("value")),
column(12,hr()),
column(2, checkboxInput(inputId = "estNouvelAnimal", value = T,label = h4("Nouvel Animal"))),
column(2, numericInput(inputId = "nAnimal", value = " ",label = h4("N° Animal"),min=0 )),
column(2, selectizeInput("idSite", h4("Site"),
choices = dbGetQuery(con,"select (sit_nom_court) from public.tr_site_capture_sit"),options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL)),
column(12),
column(2, timeInput("heureDebut",h4("Heure début"),seconds = FALSE)),
column(2, selectInput("idRFID", h4("Rfid"),
choices = list("1" = 1, "2" = 2,"3" = 3), selected = 1)),
column(2, selectInput("idTagOrG", h4("Tag Oreille Gauche"),
choices = list(" 1" = 1, "2" = 2,"3" = 3), selected = 1) ),
column(2, selectInput("idTagOrD", h4("Tag Oreille Droite"),
choices = list("Site 1" = 1, "Site 2" = 2,"Site 3" = 3), selected = 1)),
column(12,hr()),
column(2, dateInput('dateCapture',label=h4("Date"),value = Sys.Date())),
column(2, radioButtons("sexe",h4("Sexe"),choiceNames = list("M","F"),choiceValues = list(0,1)))
),
hr(),
fluidRow(
column(2, numericInput("cirCou", value=" ", h4("Circonférence cou"),min=0)),
column(2, numericInput("lPattArriere", value=" ", h4("Longueur de la patte arrière"),min=0)),
column(2, numericInput("tglucose", value="", h4("Taux de Glucose sanguin"), min=0))
),
conditionalPanel(
condition = "input.sexe == 0",
fluidRow(
column(2, numericInput("lBoisGauche", value=" ", h4("Longueur bois gauche"),min=0)),
column(2, numericInput("lBoisDroit", value=" ", h4("Longueur bois droit"),min=0)),
column(2, selectInput("etatBois", h4("État bois"), choices = list("Velours", "tombés", "durs"), selected = 1))
)
)
)
################## FORMULAIRE BLESSURES
contentblessures = fluidPage(
# titlePanel("Blessures"),
fluidRow(
column(3,
selectInput("blelocalisation_sel", h4("Localisation"),
choices = blelocalisationChoices, selected = 1),
bsModal("nouvelleLocalization_modal", "Entrer la localisation","", size = "large",wellPanel(
textInput("nouvelle_localisation_txt",""),
actionButton("ok_button", "OK"),
actionButton("quit_button", "Quitter")
)),
textInput("blelocalisation_txt","")
),
column(3, selectInput("bleGrav_sel", h4("Gravité"), choices = bleGravChoices, selected = "superficielle"),
textInput("bleGrav_txt","") ),
column(3, selectInput("bleTrait_sel", h4("Traitement"), choices = bleTraitChoices, selected = 1),
textInput("bleTrait_txt","")),
column(3, actionButton("ajoutBle","Ajouter une blessure"))
),
hr(),
fluidRow(
sidebarLayout(
mainPanel(
dataTableOutput("tableblessure")),
sidebarPanel(actionButton("sup_Bles", "Supprimer blessure"))
)
))
################## FORMULAIRE PRELEVEMENTS
contentprelevement = fluidPage()
################## FORMULAIRE COLLIER
contentcollier = fluidPage(
#titlePanel("Caracteristique du collier"),
fluidRow(
#titlePanel("Pose de collier"),
column(3, checkboxInput(inputId = "new_collier", value = F,label = h4("Nouveau collier"))),
column(3, actionButton("ajoutColl","Confirmer la nouvelle pose"))
))
################## FORMULAIRE COMPORTEMENT TABLE
contenttable = fluidPage(
#titlePanel("Comportement sur table"),
fluidRow(
column(2,timeInput("time_table", h4("Heure:"),seconds = FALSE),
actionButton("to_current_time_table", "Afficher l'heure")),
column(3,numericInput("rectTemp", value=" ", h4("Température rectale"),step = 1)),
column(3,numericInput("ExtTemp", value=" ", h4("Température extérieure"),step = 1)),
column(12,hr()),
column(2,radioButtons("lutte",h4("Lutte"),choiceNames = list("Oui","Non"),choiceValues = list(T,F), selected = character(0))),
column(2,radioButtons("halete",h4("Halete"),choiceNames = list("Oui","Non"),choiceValues = list(T,F), selected =character(0))),
column(2,radioButtons("cribague",h4("Cri Bague"), choices = list(NA,"0", "1-2", ">2"))),
column(2,radioButtons("criautre", h4("Cri Autre"), choices = list("0", "1-2", ">2"), selected = F)),
column(12,hr()),
column(2,selectizeInput("Notation_euro_table", h4("Notation Eurodeer"),
choices = dbGetQuery(con,"select (ect_comportement) from lu_tables.tr_eurodeer_comp_table_ect"),options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL))
))
################## FORMULAIRE HISTORIQUE :
contenthistorique = fluidPage(
#titlePanel("Historique"),
fluidRow(
tabPanel("Caractéristiques de l'animal",
checkboxInput("recap","recapture ?", 1),
conditionalPanel(
condition = "input.recap == 1",
fluidRow(column(width= 2, selectInput(inputId = "ani_etiq", label = h4("N°Animal"),
choices = dbGetQuery(con,"Select ani_etiq from public.t_animal_ani order by ani_etiq")), selected = NULL, offset= 0.5))),
conditionalPanel(
condition = "input.recap == 0",
fluidRow(column(width= 2, numericInput("new_ani_etiq",value="" ,h4("N°Animal"))))),
tabPanel("Historique de capture", DT::dataTableOutput("historique"))
)))
################## FORMULAIRE CHECKLIST 1 :
contentcheck1 = fluidPage(fluidRow(
titlePanel("Checklist - Caractéristiques"),
tabPanel("Cheklist 1", DT::dataTableOutput("tablechecklist1")),
column(12,useShinyalert(),
actionButton("checklist_1", "Checklist",icon('eye'),width='25%')),
# titlePanel("Checklist - Prelevements"),
column(12,hr()),
#conditionalPanel(
# condition = "input.new_collier == 1",
# fluidRow(titlePanel("Checklist - Collier"))) ,
titlePanel("Checklist - Table"),
tabPanel("Checklist Table",DT::dataTableOutput("tablechecklist_table")),
column(12,useShinyalert(),
actionButton("checklist_tab", "Checklist",icon('eye'),width='25%'))
))
################## FORMULAIRE COMPORTEMENT AU LACHER :
###submitButton(format(Sys.time(), "%X"))
#timeInput("time2", "Heure lâcher:", value = Sys.time(),seconds = FALSE))
contentlacher = fluidPage(
# titlePanel("Comportement au lâcher"),
fluidRow(
column(2,timeInput("time", h4("Heure de lâcher:"),seconds = FALSE),
actionButton("to_current_time", "Afficher l'heure")),
column(2, timeInput("time2", h4("Heure de 2nd lâcher:"),seconds = FALSE),
actionButton("to_current_time2", "Afficher l'heure")),
column(1,numericInput("nbre_stops",value=NULL, h4("Nombre de stops"),min=0)),
column(1,numericInput("nbre_personnes", value=NULL, h4("Nbre de personnes"),min=1)),
column(12,hr()),
column(1,radioButtons("vitesse",h4("Vitesse"),choiceNames = list("Pas","Course"),choiceValues = list(0,1), selected = F)),
column(1,radioButtons("allure",h4("Allure"),choiceNames = list("Réfléchi","Bolide"),choiceValues = list(0,1), selected = F)),
column(1,radioButtons("cabriole_saut",h4("Cabriole"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = F)),
column(1,radioButtons("gratte_collier", h4("Gratte collier"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = F)),
column(1,radioButtons("tombe", h4("Tombe"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = F)),
column(1,radioButtons("cri",h4("Cri"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected = F)),
column(1,radioButtons("titube",h4("Titube"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected = F)),
column(1,radioButtons("couche",h4("Couché"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = character(0))),
column(12,hr()),
column(2,selectizeInput("visibilite", h4("Visibilité fuite"),
choices = list("0-10","11-50","51-100",">100","Nuit"), options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL)),
column(2,selectizeInput("habitat", h4("Habitat lâcher"),
choices = dbGetQuery(con,"select distinct (t_capture_cpt.cpt_lache_habitat_lache) from cmpt.t_capture_cpt"), options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL)),
column(2, selectizeInput("habitat_perte", h4("Habitat perte de vue"),
choices = dbGetQuery(con,"select distinct (t_capture_cpt.cpt_lache_habitat_pertevue) from cmpt.t_capture_cpt"),options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL)),
column(2,selectizeInput("Notation_euro", h4("Notation Eurodeer"),
choices = dbGetQuery(con,"select (ecl_comportement_lache) from lu_tables.tr_eurodeer_comp_lache_ecl"),options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL))
))
################## FORMULAIRE CHECKLIST 2 :
contentcheck2 = fluidPage(fluidRow(
tabPanel("Checklist 2", DT::dataTableOutput("tablechecklist2")),
column(12,useShinyalert(),
actionButton("checklist_2", "Checklist",icon('eye'),width='25%'))))
################## FORMULAIRE COMPORTEMENT CAPTURE :
contentcapture = fluidPage(
#titlePanel("Comportement Capture"),
fluidRow(
column(2,dateInput('date',label=h4("Date"),value = Sys.Date())),
column(2,selectizeInput("nSabot",label = h4("N° Sabot"), choices = dbGetQuery(con,"select distinct cap_num_sabot FROM public.t_capture_cap"),options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL)),
column(2,timeInput("cpt_heure_debut_filet",h4("Heure arrivée filet"),seconds = FALSE)),
#column(3,
#selectInput("N° sabot", h4("N°sabot"), choices = list("Velours", "tombés", "durs"), selected = 1),
#numericInput(inputId = "nSabot", value = " ",label = h4("N° Sabot"), min=1, max=28)
#), # faut trouver un moyen de récuperer dans la table les différents N°sabot enregistrer dans la rubrique caractéristique de l'animal
column(12,hr()),
column(2,timeInput("cpt_temps_filet", h4("Temps passé filet"),seconds = FALSE)),
column(2,textInput("nom_capteur_txt",label=h4("Nom des capteurs",""))),
column(2,selectInput("Nbre_pers_experimentes",h4("Nombre de capteurs expérimentés"),choices = list("1"=1,"2"=2,"3"=3,"4"=4,"5"=5),selected = 1)),
column(12,hr()),
column(1,radioButtons("cpt_filet_vitesse",h4("Vitesse"),choiceNames = list("Pas","Course"),choiceValues = list(0,1), selected = F)),
column(1,radioButtons("cpt_filet_allure",h4("Allure"),choiceNames = list("Réfléchi","Bolide"),choiceValues = list(0,1), selected = F)),
column(1,radioButtons("cpt_filet_lutte", h4("Lutte"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = character(0))),
column(1,radioButtons("cpt_filet_halete",h4("Halete"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = character(0))),
column(1,radioButtons("cpt_filet_cri",h4("Cri"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected =c("None selected" = ""))),
column(12,hr()),
column(2,textInput("Remarques",label=h4("Remarques","")))
))
################## FORMULAIRE COMPORTEMENT SABOT :
contentsabot = fluidPage(
# titlePanel("Comportement sabot"),
fluidRow(
#Heure de mise en sabot
column(3, timeInput("cpt_heure_mise_sabot", h4("Heure de mise en sabot:"),seconds = FALSE)),
#Fin de surveillance
column(3,timeInput("cpt_heure_fin_surv", h4("Fin de surveillance"),seconds = FALSE)),
column(12,hr()),
#Acepromazine
column(2,selectizeInput("cpt_dose_acepromazine",h4("Acepromazine"), choices = dbGetQuery(con,"select distinct cpt_dose_acepromazine from cmpt.t_capture_cpt order by cpt_dose_acepromazine"),options = (list(create = TRUE,placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }'))), selected = NULL)),
#Sur le dos
column(1,radioButtons("cpt_sabot_retournement",h4("Sur le dos"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected =c("None selected" = ""))),
#Couché
column(1, radioButtons("cpt_sabot_couche",h4("Couché"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected =c("None selected" = ""))),
#Agité
column(1, radioButtons("cpt_sabot_agitation",h4("Agité"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected =c("None selected" = ""))),
column(12,hr()),
#Observateur
column(3,textInput("Observateur",label=h4("Observateurs",""))),
#Remarque
column(3,textInput("Remarques",label=h4("Remarque","")))
)
)
######## ORGANISATION DES RUBRIQUES
caractanimal = tabPanel("Caract. de l'animal",contentcaractanimal)
blessures = tabPanel("Blessures",contentblessures)
prelevement= tabPanel("Prélèvements",contentprelevement)
caractcollier = tabPanel("Caract. du collier",contentcollier)
comportable = tabPanel("Comportement table",contenttable)
historique = tabPanel("Historique captures",contenthistorique)
checklist1 = tabPanel("checklist 1",contentcheck1)
comporlacher = tabPanel("Comportement lâcher",contentlacher)
checklist2 = tabPanel("checklist 2",contentcheck2)
comporcapture = tabPanel("Comportement capture",contentcapture)
comporsabot = tabPanel("Comportement sabot",contentsabot)
################## UI :
##Lumen or cerulean or sandstone
ui <- shinyUI(navbarPage("Formulaires",
#theme=shinytheme("sandstone"),
# Application title
# titlePanel("Carnet Electronique"),
#tabsetPanel(
tabPanel ("Animal", caractanimal),
tabPanel ("Blessures", blessures),
tabPanel ("Prelevement", prelevement),
tabPanel ("Collier",caractcollier),
tabPanel ("Table",comportable),
tabPanel ("historique",historique),
tabPanel ("checklist 1",checklist1),
tabPanel ( "Lâcher",comporlacher),
tabPanel ("Checklist 2",checklist2),
tabPanel ("Capture",comporcapture),
tabPanel( "Sabot",comporsabot)
#tabPanel("Summary", verbatimTextOutput("summary")),
#tabPanel("Table", tableOutput("table"))
)
)
################## SERVER :
server <- function(input, output,session) {
output$value = renderText({input$pSabotPlein-input$pSabotVide})
blessure = data.frame()
row.names(blessure) = NULL
output$tableblessure = DT::renderDT(expr = blessure,server = F)
observe({
# if(length(input$sexe)>1) {
# updateCheckboxGroupInput(session,"sexe", selected= tail(input$sexe,1))
# }
})
sup_Ligne = observeEvent(input$sup_Bles, {
if (!is.null(input$tableblessure_rows_selected)) {
blessure <<- blessure[-as.numeric(input$tableblessure_rows_selected),]
output$tableblessure = DT::renderDT(blessure,server = F)
}
}
)
observeEvent (input$blelocalisation_sel, {
if (!is.null(input$blelocalisation_sel)) {
if( input$blelocalisation_sel == "Autre localisation"){
toggleModal(session,"nouvelleLocalization_modal","open")
# showModal(modalDialog(
# title = "Entrer la localisation",
# textInput("nouvelle_localisation_txt",""),
#
# easyClose = TRUE
#
# ))
}
}
})
observeEvent(input$ajoutBle, {
loca = ""
grav = ""
trait = ""
if (input$blelocalisation_txt != "") {
loca = input$blelocalisation_txt
x = input$blelocalisation_sel
#blelocalisationChoices[[length(blelocalisationChoices)-1]] =
blelocalisationChoices <<- cbind(blelocalisationChoices,input$blelocalisation_txt)
updateSelectInput(session,"blelocalisation_sel",
choices = blelocalisationChoices,
selected = input$blelocalisation_txt
)
} else {
loca = input$blelocalisation_sel
}
if (input$bleGrav_txt != "") {
grav = input$bleGrav_txt
} else {
grav = input$bleGrav_sel
}
if (input$bleTrait_txt != "") {
trait = input$bleTrait_txt
} else {
trait = input$bleTrait_sel
}
blessure <<- rbind(blessure,data.frame("Localisation" = c(loca), "Gravité" =c(grav), "Traitement" = c(trait)))
output$tableblessure = DT::renderDT(blessure,server = F)
}
)
######## PARTIE TABLE
observeEvent(input$to_current_time_table, {
updateTimeInput(session, "time_table", value = Sys.time())
})
######### Partie historique :
output$historique <- DT::renderDataTable({
outp <- dbGetQuery(con,paste0("select t.ani_etiq as ani, t.ani_sexe as s, t.cap_date as date, t.cap_poids as poids, t.cap_lpa as lpa, t.cap_age_classe as age, t.sit_nom_court as site,
t.teq_nom_court as teq, t.eqa_date_debut as debut, t.eqa_date_fin as fin, t.cap_annee_suivi as an, round(t.temps_suivi/30.43) as mois, count(t.cpos_id) as locs, t.eqt_id_usuel as equip, t.mar_libelle as marque, t.mod_libelle as modele, array_to_string( array_agg( distinct eqc_sen_id), ', ') as capteurs from (SELECT eqc_sen_id, cpos_id, ani_etiq, ani_sexe, cap_date, cap_poids, cap_lpa, cap_age_classe, sit_nom_court,
teq_nom_court, cap_annee_suivi, eqa_date_debut, eqa_date_fin, eqa_date_fin - eqa_date_debut as temps_suivi, eqt_id_usuel, mar_libelle, mod_libelle
FROM public.v_aniposi_gpsgsm, public.t_equipement_conf_eqc ) as t where t.ani_etiq = '",input$ani_etiq,"' group by t.ani_etiq, t.ani_sexe, t.cap_date, t.cap_poids, t.cap_lpa, t.cap_age_classe, t.sit_nom_court,
t.teq_nom_court, t.cap_annee_suivi, t.eqa_date_debut, t.eqa_date_fin, t.temps_suivi, t.eqt_id_usuel, t.mar_libelle, t.mod_libelle order by cap_date"))
ret <- DT::datatable(outp)
return(ret)
})
######### PARTIE CHECKLIST 1
checklist1 = data.frame()
row.names(checklist1) = NULL
output$tablechecklist1 = DT::renderDT(expr = checklist1,server = F)
observeEvent(input$checklist_1, {
#cat(file=stderr(), "test", input$nSabot, "\n")
if (!is.na(input$nSabot)) {
checklist1 <<- data.frame("nSabot" = input$nSabot)}
else {checklist1 <<- data.frame("nSabot"= c("NULL"))}
if (!is.na(input$nAnimal)) {
checklist1 <<- cbind(checklist1,data.frame("nAnimal" = input$nAnimal))}
else {checklist1 <<- cbind(checklist1,data.frame("nAnimal"= c("NULL")))}
if ((input$idSite)!="") {
checklist1 <<- cbind(checklist1,data.frame("idSite" = input$idSite))}
else {checklist1 <<- cbind(checklist1,data.frame("idSite"= c("NULL")))}
if ((input$idRFID)!="") {
checklist1 <<- cbind(checklist1,data.frame("idRFID" = input$idRFID))}
else {checklist1 <<- cbind(checklist1,data.frame("idRFID"= c("NULL")))}
if ((input$idTagOrG)!="") {
checklist1 <<- cbind(checklist1,data.frame("Tag_gauche" = input$idTagOrG))}
else {checklist1 <<- cbind(checklist1,data.frame("Tag_gauche"= c("NULL")))}
if ((input$idTagOrD)!="") {
checklist1 <<- cbind(checklist1,data.frame("Tag_droit" = input$idTagOrD))}
else {checklist1 <<- cbind(checklist1,data.frame("Tag_droit"= c("NULL")))}
if (!is.na(input$lPattArriere)) {
checklist1 <<- cbind(checklist1,data.frame("lPattArriere" = input$lPattArriere))}
else {checklist1 <<- cbind(checklist1,data.frame("lPattArriere"= c("NULL")))}
if ((input$sexe)!="") {
checklist1 <<- cbind(checklist1,data.frame("sexe" = input$sexe))}
else {checklist1 <<- cbind(checklist1,data.frame("sexe"= c("NULL")))}
if (!is.na(input$lBoisGauche)& (input$sexe==0)) {
checklist1 <<- cbind(checklist1,data.frame("lBoisGauche" = input$lBoisGauche))}
else if (is.na(input$lBoisGauche)& (input$sexe==0)) {checklist1 <<- cbind(checklist1,data.frame("lBoisGauche"= c("NULL")))}
if (!is.na(input$lBoisDroit) & (input$sexe==0)) {
checklist1 <<- cbind(checklist1,data.frame("lBoisDroit" = input$lBoisDroit))}
else if (is.na(input$lBoisDroit)& (input$sexe==0)) {checklist1 <<- cbind(checklist1,data.frame("lBoisDroit"= c("NULL")))}
if (((input$etatBois)!="") &(input$sexe==0)){
checklist1 <<- cbind(checklist1,data.frame("etatBois" = input$etatBois))}
else if (((input$etatBois)!="")& (input$sexe==0)) {checklist1 <<- cbind(checklist1,data.frame("etatBois"= c("NULL")))}
if (!is.na(input$tglucose)) {
checklist1 <<- cbind(checklist1,data.frame("Glucose" = input$tglucose))}
else {checklist1 <<- cbind(checklist1,data.frame("Glucose"= c("NULL")))}
if (!is.na(input$cirCou)) {
checklist1 <<- cbind(checklist1,data.frame("cirCou" = input$cirCou))}
else {checklist1 <<- cbind(checklist1,data.frame("cirCou"= c("NULL")))}
output$tablechecklist1 = DT::renderDT(checklist1,server = F)
})
# CHECKLIST TABLE
checklist_table = data.frame()
row.names(checklist_table) = NULL
output$tablechecklist_table = DT::renderDT(expr = checklist_table,server = F)
observeEvent(input$checklist_tab, {
#cat(file=stderr(), "test", input$nSabot, "\n")
if (!is.na(input$ExtTemp)) {
checklist_table <<- data.frame("ExtTemp" = input$ExtTemp)}
else {checklist_table <<- data.frame("ExtTemp"= c("NULL"))}
if (!is.na(input$rectTemp)) {
checklist_table <<- data.frame("rectTemp" = input$rectTemp)}
else {checklist_table <<- data.frame("rectTemp"= c("NULL"))}
if (!is.null(input$lutte)) {
checklist_table <<- cbind(checklist_table,data.frame("lutte" = input$lutte))}
else {checklist_table <<- cbind(checklist_table,data.frame("lutte"= c("NULL")))}
if (!is.null(input$halete)) {
checklist_table <<- cbind(checklist_table,data.frame("halete" = input$halete))}
else {checklist_table <<- cbind(checklist_table,data.frame("halete"= c("NULL")))}
if (!is.null(input$cribague)) {
checklist_table <<- cbind(checklist_table,data.frame("cribague" = input$cribague))}
else {checklist_table <<- cbind(checklist_table,data.frame("cribague"= c("NULL")))}
if (!is.null(input$criautre)) {
checklist_table <<- cbind(checklist_table,data.frame("criautre" = input$criautre))}
else {checklist_table <<- cbind(checklist_table,data.frame("criautre"= c("NULL")))}
if ((input$Notation_euro_table)!="") {
checklist_table <<- cbind(checklist_table,data.frame("Notation_euro_table" = input$Notation_euro_table))}
else {checklist_table <<- cbind(checklist_table,data.frame("Notation_euro_table"= c("NULL")))}
output$tablechecklist_table = DT::renderDT(checklist_table,server = F)
})
####### Partie comportement lacher :
observeEvent(input$to_current_time, {
updateTimeInput(session, "time", value = Sys.time())
})
observeEvent(input$to_current_time2, {
updateTimeInput(session, "time2", value = Sys.time())
})
observeEvent(input$checklist_2, {
# cat(file=stderr(), "visi", input$titube, "\n")
if (is.null(input$vitesse) | is.null(input$titube) | is.null(input$couche) | is.null(input$cabriole_saut)
| is.null(input$cri) | is.null(input$allure) | is.null(input$gratte_collier) | is.null(input$tombe)
| (input$habitat)=="" | (input$Notation_euro)=="" | (input$habitat_perte)=="" | is.na(input$nbre_stops) | (input$visibilite)=="" | is.na(input$nbre_personnes))
{shinyalert("STOP!", "Données manquantes", type = "warning",confirmButtonText="Valider quand même", showCancelButton=T,cancelButtonText="Annuler", callbackR = modalCallback2)}
else
{shinyalert("Nice!", "Parfait", type = "success",showCancelButton=T, callbackR = modalCallback2)}
})
######### CHECKLIST 2
checklist2 = data.frame()
row.names(checklist2) = NULL
output$tablechecklist2 = DT::renderDT(expr = checklist2,server = F)
observeEvent(input$checklist_2, {
if (!is.null(input$vitesse)) {
checklist2 <<- data.frame("Vitesse" = input$vitesse)}
else {checklist2 <<- data.frame("Vitesse"= c("NULL"))}
if (!is.null(input$titube)) {
checklist2 <<- cbind(checklist2,data.frame("titube" = input$titube))}
else {checklist2 <<- cbind(checklist2,data.frame("titube"= c("NULL")))}
if (!is.null(input$couche)) {
checklist2 <<- cbind(checklist2,data.frame("couche" = input$couche))}
else {checklist2 <<- cbind(checklist2,data.frame("couche"= c("NULL")))}
if (!is.null(input$cabriole_saut)) {
checklist2 <<- cbind(checklist2,data.frame("cabriole_saut" = input$cabriole_saut))}
else {checklist2 <<- cbind(checklist2,data.frame("cabriole_saut"= c("NULL")))}
if (!is.null(input$cri)) {
checklist2 <<- cbind(checklist2,data.frame("cri" = input$cri))}
else {checklist2 <<- cbind(checklist2,data.frame("cri"= c("NULL")))}
if (!is.null(input$allure)) {
checklist2 <<- cbind(checklist2,data.frame("allure" = input$allure))}
else {checklist2 <<- cbind(checklist2,data.frame("allure"= c("NULL")))}
if (!is.null(input$gratte_collier)) {
checklist2 <<- cbind(checklist2,data.frame("Gratte_Collier" = input$gratte_collier))}
else {checklist2 <<- cbind(checklist2,data.frame("Gratte_Collier"= c("NULL")))}
if (!is.null(input$tombe)) {
checklist2 <<- cbind(checklist2,data.frame("tombe" = input$tombe))}
else {checklist2 <<- cbind(checklist2,data.frame("tombe"= c("NULL")))}
if ((input$habitat)!="") {
checklist2 <<- cbind(checklist2,data.frame("habitat" = input$habitat))}
else {checklist2 <<- cbind(checklist2,data.frame("habitat"= c("NULL")))}
if ((input$Notation_euro)!="") {
checklist2 <<- cbind(checklist2,data.frame("Eurodeer" = input$Notation_euro))}
else {checklist2 <<- cbind(checklist2,data.frame("Eurodeer"= c("NULL")))}
if ((input$habitat_perte)!="") {
checklist2 <<- cbind(checklist2,data.frame("Habitat" = input$habitat_perte))}
else {checklist2 <<- cbind(checklist2,data.frame("Habitat"= c("NULL")))}
if (!is.na(input$nbre_stops)) {
checklist2 <<- cbind(checklist2,data.frame("Stops" = input$nbre_stops))}
else {checklist2 <<- cbind(checklist2,data.frame("Stops"= c("NULL")))}
if ((input$visibilite)!="") {
checklist2 <<- cbind(checklist2,data.frame("Visibilite" = input$visibilite))}
else {checklist2 <<- cbind(checklist2,data.frame("Visibilite"= c("NULL")))}
if (!is.na(input$nbre_personnes)) {
checklist2 <<- cbind(checklist2,data.frame("Nbre_personnes" = input$nbre_personnes))}
else {checklist2 <<- cbind(checklist2,data.frame("Nbre_personnes"= c("NULL")))}
output$tablechecklist2 = DT::renderDT(checklist2,server = F)
})
######## AJOUTER VALEURS DE LA CHECKLIST DANS BASE DE DONNEES
# pour obtenir le cpt_id suivant
#max_value=dbGetQuery(con,paste0('SELECT cpt_id FROM cmpt.t_capture_cpt order by cpt_id desc limit 1'))
#max_value=as.integer((max_value[1,1])+1)
#modalCallback2 <- function(value) {
# if (value == TRUE) {
# dbSendQuery(con,sprintf("INSERT INTO cmpt.t_capture_cpt (cpt_id,cpt_ani_etiq, cpt_date,cpt_annee_suivi, cpt_lache_visibilite, cpt_cap_id)
#VALUES (%s,100,'1961-06-16',1111,'exemple',000)",max_value))
# }}
#}
################## LANCEMENT DE L'APPLICATION :
#dbDisconnect(con)
shinyApp(ui = ui, server = server)
| /app_220418.R | no_license | Liinwe/Electronic-notebook | R | false | false | 30,055 | r | ################# CHARGEMENT DES LIBRAIRIES
library(shiny)
library(shinythemes)
library(DT)
library(dplyr)
library(shinyBS)
library(shinyTime)
library(RPostgreSQL)
library(shinyalert)
################ CHARGEMENT DE LA BASE DE DONNEES
con<- dbConnect(PostgreSQL(), host="pggeodb-preprod.nancy.inra.fr", dbname="", user="", password="")
############### LISTE DE CHOIX
# liste de choix pour les selectInput, ces listes sont updatés si l'utilisateur le souhaite en utilisant l'option autre dans la selection
bleGravChoices = list("superficielle", "légère","profonde",
"fracture", "fracture _consolidée",
" plaie_fermée", " pelade")
bleTraitChoices = list("allumisol", "serflex_allumisol",
"points", "euthanasie", "rien")
blelocalisationChoices = list(" 1" = 1, "2" = 2, "3" = 3,"Autre localisation")
################## FORMULAIRE CARACTERISTIQUES DE L'ANIMAL
contentcaractanimal = fluidPage(
#titlePanel("Caract. de l'animal"),
fluidRow(
column(2, numericInput(inputId = "nSabot", value = " ",label = h4("N° Sabot"), min=1, max=28)),
column(2, numericInput(inputId = "pSabotPlein", value = " ",label = h4("Poids Sabot Plein"),min=0,max=70 )),
column(2, numericInput(inputId = "pSabotVide", value = " ",label = h4("Poids Sabot Vide"),min=0,max=60 )),
column(2, h4("Poids Animal"),textOutput("value")),
column(12,hr()),
column(2, checkboxInput(inputId = "estNouvelAnimal", value = T,label = h4("Nouvel Animal"))),
column(2, numericInput(inputId = "nAnimal", value = " ",label = h4("N° Animal"),min=0 )),
column(2, selectizeInput("idSite", h4("Site"),
choices = dbGetQuery(con,"select (sit_nom_court) from public.tr_site_capture_sit"),options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL)),
column(12),
column(2, timeInput("heureDebut",h4("Heure début"),seconds = FALSE)),
column(2, selectInput("idRFID", h4("Rfid"),
choices = list("1" = 1, "2" = 2,"3" = 3), selected = 1)),
column(2, selectInput("idTagOrG", h4("Tag Oreille Gauche"),
choices = list(" 1" = 1, "2" = 2,"3" = 3), selected = 1) ),
column(2, selectInput("idTagOrD", h4("Tag Oreille Droite"),
choices = list("Site 1" = 1, "Site 2" = 2,"Site 3" = 3), selected = 1)),
column(12,hr()),
column(2, dateInput('dateCapture',label=h4("Date"),value = Sys.Date())),
column(2, radioButtons("sexe",h4("Sexe"),choiceNames = list("M","F"),choiceValues = list(0,1)))
),
hr(),
fluidRow(
column(2, numericInput("cirCou", value=" ", h4("Circonférence cou"),min=0)),
column(2, numericInput("lPattArriere", value=" ", h4("Longueur de la patte arrière"),min=0)),
column(2, numericInput("tglucose", value="", h4("Taux de Glucose sanguin"), min=0))
),
conditionalPanel(
condition = "input.sexe == 0",
fluidRow(
column(2, numericInput("lBoisGauche", value=" ", h4("Longueur bois gauche"),min=0)),
column(2, numericInput("lBoisDroit", value=" ", h4("Longueur bois droit"),min=0)),
column(2, selectInput("etatBois", h4("État bois"), choices = list("Velours", "tombés", "durs"), selected = 1))
)
)
)
################## FORMULAIRE BLESSURES
contentblessures = fluidPage(
# titlePanel("Blessures"),
fluidRow(
column(3,
selectInput("blelocalisation_sel", h4("Localisation"),
choices = blelocalisationChoices, selected = 1),
bsModal("nouvelleLocalization_modal", "Entrer la localisation","", size = "large",wellPanel(
textInput("nouvelle_localisation_txt",""),
actionButton("ok_button", "OK"),
actionButton("quit_button", "Quitter")
)),
textInput("blelocalisation_txt","")
),
column(3, selectInput("bleGrav_sel", h4("Gravité"), choices = bleGravChoices, selected = "superficielle"),
textInput("bleGrav_txt","") ),
column(3, selectInput("bleTrait_sel", h4("Traitement"), choices = bleTraitChoices, selected = 1),
textInput("bleTrait_txt","")),
column(3, actionButton("ajoutBle","Ajouter une blessure"))
),
hr(),
fluidRow(
sidebarLayout(
mainPanel(
dataTableOutput("tableblessure")),
sidebarPanel(actionButton("sup_Bles", "Supprimer blessure"))
)
))
################## FORMULAIRE PRELEVEMENTS
contentprelevement = fluidPage()
################## FORMULAIRE COLLIER
contentcollier = fluidPage(
#titlePanel("Caracteristique du collier"),
fluidRow(
#titlePanel("Pose de collier"),
column(3, checkboxInput(inputId = "new_collier", value = F,label = h4("Nouveau collier"))),
column(3, actionButton("ajoutColl","Confirmer la nouvelle pose"))
))
################## FORMULAIRE COMPORTEMENT TABLE
contenttable = fluidPage(
#titlePanel("Comportement sur table"),
fluidRow(
column(2,timeInput("time_table", h4("Heure:"),seconds = FALSE),
actionButton("to_current_time_table", "Afficher l'heure")),
column(3,numericInput("rectTemp", value=" ", h4("Température rectale"),step = 1)),
column(3,numericInput("ExtTemp", value=" ", h4("Température extérieure"),step = 1)),
column(12,hr()),
column(2,radioButtons("lutte",h4("Lutte"),choiceNames = list("Oui","Non"),choiceValues = list(T,F), selected = character(0))),
column(2,radioButtons("halete",h4("Halete"),choiceNames = list("Oui","Non"),choiceValues = list(T,F), selected =character(0))),
column(2,radioButtons("cribague",h4("Cri Bague"), choices = list(NA,"0", "1-2", ">2"))),
column(2,radioButtons("criautre", h4("Cri Autre"), choices = list("0", "1-2", ">2"), selected = F)),
column(12,hr()),
column(2,selectizeInput("Notation_euro_table", h4("Notation Eurodeer"),
choices = dbGetQuery(con,"select (ect_comportement) from lu_tables.tr_eurodeer_comp_table_ect"),options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL))
))
################## FORMULAIRE HISTORIQUE :
contenthistorique = fluidPage(
#titlePanel("Historique"),
fluidRow(
tabPanel("Caractéristiques de l'animal",
checkboxInput("recap","recapture ?", 1),
conditionalPanel(
condition = "input.recap == 1",
fluidRow(column(width= 2, selectInput(inputId = "ani_etiq", label = h4("N°Animal"),
choices = dbGetQuery(con,"Select ani_etiq from public.t_animal_ani order by ani_etiq")), selected = NULL, offset= 0.5))),
conditionalPanel(
condition = "input.recap == 0",
fluidRow(column(width= 2, numericInput("new_ani_etiq",value="" ,h4("N°Animal"))))),
tabPanel("Historique de capture", DT::dataTableOutput("historique"))
)))
################## FORMULAIRE CHECKLIST 1 :
contentcheck1 = fluidPage(fluidRow(
titlePanel("Checklist - Caractéristiques"),
tabPanel("Cheklist 1", DT::dataTableOutput("tablechecklist1")),
column(12,useShinyalert(),
actionButton("checklist_1", "Checklist",icon('eye'),width='25%')),
# titlePanel("Checklist - Prelevements"),
column(12,hr()),
#conditionalPanel(
# condition = "input.new_collier == 1",
# fluidRow(titlePanel("Checklist - Collier"))) ,
titlePanel("Checklist - Table"),
tabPanel("Checklist Table",DT::dataTableOutput("tablechecklist_table")),
column(12,useShinyalert(),
actionButton("checklist_tab", "Checklist",icon('eye'),width='25%'))
))
################## FORMULAIRE COMPORTEMENT AU LACHER :
###submitButton(format(Sys.time(), "%X"))
#timeInput("time2", "Heure lâcher:", value = Sys.time(),seconds = FALSE))
contentlacher = fluidPage(
# titlePanel("Comportement au lâcher"),
fluidRow(
column(2,timeInput("time", h4("Heure de lâcher:"),seconds = FALSE),
actionButton("to_current_time", "Afficher l'heure")),
column(2, timeInput("time2", h4("Heure de 2nd lâcher:"),seconds = FALSE),
actionButton("to_current_time2", "Afficher l'heure")),
column(1,numericInput("nbre_stops",value=NULL, h4("Nombre de stops"),min=0)),
column(1,numericInput("nbre_personnes", value=NULL, h4("Nbre de personnes"),min=1)),
column(12,hr()),
column(1,radioButtons("vitesse",h4("Vitesse"),choiceNames = list("Pas","Course"),choiceValues = list(0,1), selected = F)),
column(1,radioButtons("allure",h4("Allure"),choiceNames = list("Réfléchi","Bolide"),choiceValues = list(0,1), selected = F)),
column(1,radioButtons("cabriole_saut",h4("Cabriole"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = F)),
column(1,radioButtons("gratte_collier", h4("Gratte collier"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = F)),
column(1,radioButtons("tombe", h4("Tombe"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = F)),
column(1,radioButtons("cri",h4("Cri"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected = F)),
column(1,radioButtons("titube",h4("Titube"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected = F)),
column(1,radioButtons("couche",h4("Couché"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = character(0))),
column(12,hr()),
column(2,selectizeInput("visibilite", h4("Visibilité fuite"),
choices = list("0-10","11-50","51-100",">100","Nuit"), options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL)),
column(2,selectizeInput("habitat", h4("Habitat lâcher"),
choices = dbGetQuery(con,"select distinct (t_capture_cpt.cpt_lache_habitat_lache) from cmpt.t_capture_cpt"), options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL)),
column(2, selectizeInput("habitat_perte", h4("Habitat perte de vue"),
choices = dbGetQuery(con,"select distinct (t_capture_cpt.cpt_lache_habitat_pertevue) from cmpt.t_capture_cpt"),options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL)),
column(2,selectizeInput("Notation_euro", h4("Notation Eurodeer"),
choices = dbGetQuery(con,"select (ecl_comportement_lache) from lu_tables.tr_eurodeer_comp_lache_ecl"),options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL))
))
################## FORMULAIRE CHECKLIST 2 :
contentcheck2 = fluidPage(fluidRow(
tabPanel("Checklist 2", DT::dataTableOutput("tablechecklist2")),
column(12,useShinyalert(),
actionButton("checklist_2", "Checklist",icon('eye'),width='25%'))))
################## FORMULAIRE COMPORTEMENT CAPTURE :
contentcapture = fluidPage(
#titlePanel("Comportement Capture"),
fluidRow(
column(2,dateInput('date',label=h4("Date"),value = Sys.Date())),
column(2,selectizeInput("nSabot",label = h4("N° Sabot"), choices = dbGetQuery(con,"select distinct cap_num_sabot FROM public.t_capture_cap"),options=list(placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }')), selected = NULL)),
column(2,timeInput("cpt_heure_debut_filet",h4("Heure arrivée filet"),seconds = FALSE)),
#column(3,
#selectInput("N° sabot", h4("N°sabot"), choices = list("Velours", "tombés", "durs"), selected = 1),
#numericInput(inputId = "nSabot", value = " ",label = h4("N° Sabot"), min=1, max=28)
#), # faut trouver un moyen de récuperer dans la table les différents N°sabot enregistrer dans la rubrique caractéristique de l'animal
column(12,hr()),
column(2,timeInput("cpt_temps_filet", h4("Temps passé filet"),seconds = FALSE)),
column(2,textInput("nom_capteur_txt",label=h4("Nom des capteurs",""))),
column(2,selectInput("Nbre_pers_experimentes",h4("Nombre de capteurs expérimentés"),choices = list("1"=1,"2"=2,"3"=3,"4"=4,"5"=5),selected = 1)),
column(12,hr()),
column(1,radioButtons("cpt_filet_vitesse",h4("Vitesse"),choiceNames = list("Pas","Course"),choiceValues = list(0,1), selected = F)),
column(1,radioButtons("cpt_filet_allure",h4("Allure"),choiceNames = list("Réfléchi","Bolide"),choiceValues = list(0,1), selected = F)),
column(1,radioButtons("cpt_filet_lutte", h4("Lutte"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = character(0))),
column(1,radioButtons("cpt_filet_halete",h4("Halete"), choiceNames = list("Oui","Non"), choiceValues = list(1,0), selected = character(0))),
column(1,radioButtons("cpt_filet_cri",h4("Cri"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected =c("None selected" = ""))),
column(12,hr()),
column(2,textInput("Remarques",label=h4("Remarques","")))
))
################## FORMULAIRE COMPORTEMENT SABOT :
contentsabot = fluidPage(
# titlePanel("Comportement sabot"),
fluidRow(
#Heure de mise en sabot
column(3, timeInput("cpt_heure_mise_sabot", h4("Heure de mise en sabot:"),seconds = FALSE)),
#Fin de surveillance
column(3,timeInput("cpt_heure_fin_surv", h4("Fin de surveillance"),seconds = FALSE)),
column(12,hr()),
#Acepromazine
column(2,selectizeInput("cpt_dose_acepromazine",h4("Acepromazine"), choices = dbGetQuery(con,"select distinct cpt_dose_acepromazine from cmpt.t_capture_cpt order by cpt_dose_acepromazine"),options = (list(create = TRUE,placeholder='Choisir une valeur :', onInitialize = I('function() { this.setValue(""); }'))), selected = NULL)),
#Sur le dos
column(1,radioButtons("cpt_sabot_retournement",h4("Sur le dos"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected =c("None selected" = ""))),
#Couché
column(1, radioButtons("cpt_sabot_couche",h4("Couché"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected =c("None selected" = ""))),
#Agité
column(1, radioButtons("cpt_sabot_agitation",h4("Agité"),choiceNames = list("Oui","Non"),choiceValues = list(1,0), selected =c("None selected" = ""))),
column(12,hr()),
#Observateur
column(3,textInput("Observateur",label=h4("Observateurs",""))),
#Remarque
column(3,textInput("Remarques",label=h4("Remarque","")))
)
)
######## ORGANISATION DES RUBRIQUES
caractanimal = tabPanel("Caract. de l'animal",contentcaractanimal)
blessures = tabPanel("Blessures",contentblessures)
prelevement= tabPanel("Prélèvements",contentprelevement)
caractcollier = tabPanel("Caract. du collier",contentcollier)
comportable = tabPanel("Comportement table",contenttable)
historique = tabPanel("Historique captures",contenthistorique)
checklist1 = tabPanel("checklist 1",contentcheck1)
comporlacher = tabPanel("Comportement lâcher",contentlacher)
checklist2 = tabPanel("checklist 2",contentcheck2)
comporcapture = tabPanel("Comportement capture",contentcapture)
comporsabot = tabPanel("Comportement sabot",contentsabot)
################## UI :
##Lumen or cerulean or sandstone
ui <- shinyUI(navbarPage("Formulaires",
#theme=shinytheme("sandstone"),
# Application title
# titlePanel("Carnet Electronique"),
#tabsetPanel(
tabPanel ("Animal", caractanimal),
tabPanel ("Blessures", blessures),
tabPanel ("Prelevement", prelevement),
tabPanel ("Collier",caractcollier),
tabPanel ("Table",comportable),
tabPanel ("historique",historique),
tabPanel ("checklist 1",checklist1),
tabPanel ( "Lâcher",comporlacher),
tabPanel ("Checklist 2",checklist2),
tabPanel ("Capture",comporcapture),
tabPanel( "Sabot",comporsabot)
#tabPanel("Summary", verbatimTextOutput("summary")),
#tabPanel("Table", tableOutput("table"))
)
)
################## SERVER :
server <- function(input, output,session) {
output$value = renderText({input$pSabotPlein-input$pSabotVide})
blessure = data.frame()
row.names(blessure) = NULL
output$tableblessure = DT::renderDT(expr = blessure,server = F)
observe({
# if(length(input$sexe)>1) {
# updateCheckboxGroupInput(session,"sexe", selected= tail(input$sexe,1))
# }
})
sup_Ligne = observeEvent(input$sup_Bles, {
if (!is.null(input$tableblessure_rows_selected)) {
blessure <<- blessure[-as.numeric(input$tableblessure_rows_selected),]
output$tableblessure = DT::renderDT(blessure,server = F)
}
}
)
observeEvent (input$blelocalisation_sel, {
if (!is.null(input$blelocalisation_sel)) {
if( input$blelocalisation_sel == "Autre localisation"){
toggleModal(session,"nouvelleLocalization_modal","open")
# showModal(modalDialog(
# title = "Entrer la localisation",
# textInput("nouvelle_localisation_txt",""),
#
# easyClose = TRUE
#
# ))
}
}
})
observeEvent(input$ajoutBle, {
loca = ""
grav = ""
trait = ""
if (input$blelocalisation_txt != "") {
loca = input$blelocalisation_txt
x = input$blelocalisation_sel
#blelocalisationChoices[[length(blelocalisationChoices)-1]] =
blelocalisationChoices <<- cbind(blelocalisationChoices,input$blelocalisation_txt)
updateSelectInput(session,"blelocalisation_sel",
choices = blelocalisationChoices,
selected = input$blelocalisation_txt
)
} else {
loca = input$blelocalisation_sel
}
if (input$bleGrav_txt != "") {
grav = input$bleGrav_txt
} else {
grav = input$bleGrav_sel
}
if (input$bleTrait_txt != "") {
trait = input$bleTrait_txt
} else {
trait = input$bleTrait_sel
}
blessure <<- rbind(blessure,data.frame("Localisation" = c(loca), "Gravité" =c(grav), "Traitement" = c(trait)))
output$tableblessure = DT::renderDT(blessure,server = F)
}
)
######## PARTIE TABLE
observeEvent(input$to_current_time_table, {
updateTimeInput(session, "time_table", value = Sys.time())
})
######### Partie historique :
output$historique <- DT::renderDataTable({
outp <- dbGetQuery(con,paste0("select t.ani_etiq as ani, t.ani_sexe as s, t.cap_date as date, t.cap_poids as poids, t.cap_lpa as lpa, t.cap_age_classe as age, t.sit_nom_court as site,
t.teq_nom_court as teq, t.eqa_date_debut as debut, t.eqa_date_fin as fin, t.cap_annee_suivi as an, round(t.temps_suivi/30.43) as mois, count(t.cpos_id) as locs, t.eqt_id_usuel as equip, t.mar_libelle as marque, t.mod_libelle as modele, array_to_string( array_agg( distinct eqc_sen_id), ', ') as capteurs from (SELECT eqc_sen_id, cpos_id, ani_etiq, ani_sexe, cap_date, cap_poids, cap_lpa, cap_age_classe, sit_nom_court,
teq_nom_court, cap_annee_suivi, eqa_date_debut, eqa_date_fin, eqa_date_fin - eqa_date_debut as temps_suivi, eqt_id_usuel, mar_libelle, mod_libelle
FROM public.v_aniposi_gpsgsm, public.t_equipement_conf_eqc ) as t where t.ani_etiq = '",input$ani_etiq,"' group by t.ani_etiq, t.ani_sexe, t.cap_date, t.cap_poids, t.cap_lpa, t.cap_age_classe, t.sit_nom_court,
t.teq_nom_court, t.cap_annee_suivi, t.eqa_date_debut, t.eqa_date_fin, t.temps_suivi, t.eqt_id_usuel, t.mar_libelle, t.mod_libelle order by cap_date"))
ret <- DT::datatable(outp)
return(ret)
})
######### PARTIE CHECKLIST 1
checklist1 = data.frame()
row.names(checklist1) = NULL
output$tablechecklist1 = DT::renderDT(expr = checklist1,server = F)
observeEvent(input$checklist_1, {
#cat(file=stderr(), "test", input$nSabot, "\n")
if (!is.na(input$nSabot)) {
checklist1 <<- data.frame("nSabot" = input$nSabot)}
else {checklist1 <<- data.frame("nSabot"= c("NULL"))}
if (!is.na(input$nAnimal)) {
checklist1 <<- cbind(checklist1,data.frame("nAnimal" = input$nAnimal))}
else {checklist1 <<- cbind(checklist1,data.frame("nAnimal"= c("NULL")))}
if ((input$idSite)!="") {
checklist1 <<- cbind(checklist1,data.frame("idSite" = input$idSite))}
else {checklist1 <<- cbind(checklist1,data.frame("idSite"= c("NULL")))}
if ((input$idRFID)!="") {
checklist1 <<- cbind(checklist1,data.frame("idRFID" = input$idRFID))}
else {checklist1 <<- cbind(checklist1,data.frame("idRFID"= c("NULL")))}
if ((input$idTagOrG)!="") {
checklist1 <<- cbind(checklist1,data.frame("Tag_gauche" = input$idTagOrG))}
else {checklist1 <<- cbind(checklist1,data.frame("Tag_gauche"= c("NULL")))}
if ((input$idTagOrD)!="") {
checklist1 <<- cbind(checklist1,data.frame("Tag_droit" = input$idTagOrD))}
else {checklist1 <<- cbind(checklist1,data.frame("Tag_droit"= c("NULL")))}
if (!is.na(input$lPattArriere)) {
checklist1 <<- cbind(checklist1,data.frame("lPattArriere" = input$lPattArriere))}
else {checklist1 <<- cbind(checklist1,data.frame("lPattArriere"= c("NULL")))}
if ((input$sexe)!="") {
checklist1 <<- cbind(checklist1,data.frame("sexe" = input$sexe))}
else {checklist1 <<- cbind(checklist1,data.frame("sexe"= c("NULL")))}
if (!is.na(input$lBoisGauche)& (input$sexe==0)) {
checklist1 <<- cbind(checklist1,data.frame("lBoisGauche" = input$lBoisGauche))}
else if (is.na(input$lBoisGauche)& (input$sexe==0)) {checklist1 <<- cbind(checklist1,data.frame("lBoisGauche"= c("NULL")))}
if (!is.na(input$lBoisDroit) & (input$sexe==0)) {
checklist1 <<- cbind(checklist1,data.frame("lBoisDroit" = input$lBoisDroit))}
else if (is.na(input$lBoisDroit)& (input$sexe==0)) {checklist1 <<- cbind(checklist1,data.frame("lBoisDroit"= c("NULL")))}
if (((input$etatBois)!="") &(input$sexe==0)){
checklist1 <<- cbind(checklist1,data.frame("etatBois" = input$etatBois))}
else if (((input$etatBois)!="")& (input$sexe==0)) {checklist1 <<- cbind(checklist1,data.frame("etatBois"= c("NULL")))}
if (!is.na(input$tglucose)) {
checklist1 <<- cbind(checklist1,data.frame("Glucose" = input$tglucose))}
else {checklist1 <<- cbind(checklist1,data.frame("Glucose"= c("NULL")))}
if (!is.na(input$cirCou)) {
checklist1 <<- cbind(checklist1,data.frame("cirCou" = input$cirCou))}
else {checklist1 <<- cbind(checklist1,data.frame("cirCou"= c("NULL")))}
output$tablechecklist1 = DT::renderDT(checklist1,server = F)
})
# CHECKLIST TABLE
checklist_table = data.frame()
row.names(checklist_table) = NULL
output$tablechecklist_table = DT::renderDT(expr = checklist_table,server = F)
observeEvent(input$checklist_tab, {
#cat(file=stderr(), "test", input$nSabot, "\n")
if (!is.na(input$ExtTemp)) {
checklist_table <<- data.frame("ExtTemp" = input$ExtTemp)}
else {checklist_table <<- data.frame("ExtTemp"= c("NULL"))}
if (!is.na(input$rectTemp)) {
checklist_table <<- data.frame("rectTemp" = input$rectTemp)}
else {checklist_table <<- data.frame("rectTemp"= c("NULL"))}
if (!is.null(input$lutte)) {
checklist_table <<- cbind(checklist_table,data.frame("lutte" = input$lutte))}
else {checklist_table <<- cbind(checklist_table,data.frame("lutte"= c("NULL")))}
if (!is.null(input$halete)) {
checklist_table <<- cbind(checklist_table,data.frame("halete" = input$halete))}
else {checklist_table <<- cbind(checklist_table,data.frame("halete"= c("NULL")))}
if (!is.null(input$cribague)) {
checklist_table <<- cbind(checklist_table,data.frame("cribague" = input$cribague))}
else {checklist_table <<- cbind(checklist_table,data.frame("cribague"= c("NULL")))}
if (!is.null(input$criautre)) {
checklist_table <<- cbind(checklist_table,data.frame("criautre" = input$criautre))}
else {checklist_table <<- cbind(checklist_table,data.frame("criautre"= c("NULL")))}
if ((input$Notation_euro_table)!="") {
checklist_table <<- cbind(checklist_table,data.frame("Notation_euro_table" = input$Notation_euro_table))}
else {checklist_table <<- cbind(checklist_table,data.frame("Notation_euro_table"= c("NULL")))}
output$tablechecklist_table = DT::renderDT(checklist_table,server = F)
})
####### Partie comportement lacher :
observeEvent(input$to_current_time, {
updateTimeInput(session, "time", value = Sys.time())
})
observeEvent(input$to_current_time2, {
updateTimeInput(session, "time2", value = Sys.time())
})
observeEvent(input$checklist_2, {
# cat(file=stderr(), "visi", input$titube, "\n")
if (is.null(input$vitesse) | is.null(input$titube) | is.null(input$couche) | is.null(input$cabriole_saut)
| is.null(input$cri) | is.null(input$allure) | is.null(input$gratte_collier) | is.null(input$tombe)
| (input$habitat)=="" | (input$Notation_euro)=="" | (input$habitat_perte)=="" | is.na(input$nbre_stops) | (input$visibilite)=="" | is.na(input$nbre_personnes))
{shinyalert("STOP!", "Données manquantes", type = "warning",confirmButtonText="Valider quand même", showCancelButton=T,cancelButtonText="Annuler", callbackR = modalCallback2)}
else
{shinyalert("Nice!", "Parfait", type = "success",showCancelButton=T, callbackR = modalCallback2)}
})
######### CHECKLIST 2
checklist2 = data.frame()
row.names(checklist2) = NULL
output$tablechecklist2 = DT::renderDT(expr = checklist2,server = F)
observeEvent(input$checklist_2, {
if (!is.null(input$vitesse)) {
checklist2 <<- data.frame("Vitesse" = input$vitesse)}
else {checklist2 <<- data.frame("Vitesse"= c("NULL"))}
if (!is.null(input$titube)) {
checklist2 <<- cbind(checklist2,data.frame("titube" = input$titube))}
else {checklist2 <<- cbind(checklist2,data.frame("titube"= c("NULL")))}
if (!is.null(input$couche)) {
checklist2 <<- cbind(checklist2,data.frame("couche" = input$couche))}
else {checklist2 <<- cbind(checklist2,data.frame("couche"= c("NULL")))}
if (!is.null(input$cabriole_saut)) {
checklist2 <<- cbind(checklist2,data.frame("cabriole_saut" = input$cabriole_saut))}
else {checklist2 <<- cbind(checklist2,data.frame("cabriole_saut"= c("NULL")))}
if (!is.null(input$cri)) {
checklist2 <<- cbind(checklist2,data.frame("cri" = input$cri))}
else {checklist2 <<- cbind(checklist2,data.frame("cri"= c("NULL")))}
if (!is.null(input$allure)) {
checklist2 <<- cbind(checklist2,data.frame("allure" = input$allure))}
else {checklist2 <<- cbind(checklist2,data.frame("allure"= c("NULL")))}
if (!is.null(input$gratte_collier)) {
checklist2 <<- cbind(checklist2,data.frame("Gratte_Collier" = input$gratte_collier))}
else {checklist2 <<- cbind(checklist2,data.frame("Gratte_Collier"= c("NULL")))}
if (!is.null(input$tombe)) {
checklist2 <<- cbind(checklist2,data.frame("tombe" = input$tombe))}
else {checklist2 <<- cbind(checklist2,data.frame("tombe"= c("NULL")))}
if ((input$habitat)!="") {
checklist2 <<- cbind(checklist2,data.frame("habitat" = input$habitat))}
else {checklist2 <<- cbind(checklist2,data.frame("habitat"= c("NULL")))}
if ((input$Notation_euro)!="") {
checklist2 <<- cbind(checklist2,data.frame("Eurodeer" = input$Notation_euro))}
else {checklist2 <<- cbind(checklist2,data.frame("Eurodeer"= c("NULL")))}
if ((input$habitat_perte)!="") {
checklist2 <<- cbind(checklist2,data.frame("Habitat" = input$habitat_perte))}
else {checklist2 <<- cbind(checklist2,data.frame("Habitat"= c("NULL")))}
if (!is.na(input$nbre_stops)) {
checklist2 <<- cbind(checklist2,data.frame("Stops" = input$nbre_stops))}
else {checklist2 <<- cbind(checklist2,data.frame("Stops"= c("NULL")))}
if ((input$visibilite)!="") {
checklist2 <<- cbind(checklist2,data.frame("Visibilite" = input$visibilite))}
else {checklist2 <<- cbind(checklist2,data.frame("Visibilite"= c("NULL")))}
if (!is.na(input$nbre_personnes)) {
checklist2 <<- cbind(checklist2,data.frame("Nbre_personnes" = input$nbre_personnes))}
else {checklist2 <<- cbind(checklist2,data.frame("Nbre_personnes"= c("NULL")))}
output$tablechecklist2 = DT::renderDT(checklist2,server = F)
})
######## AJOUTER VALEURS DE LA CHECKLIST DANS BASE DE DONNEES
# pour obtenir le cpt_id suivant
#max_value=dbGetQuery(con,paste0('SELECT cpt_id FROM cmpt.t_capture_cpt order by cpt_id desc limit 1'))
#max_value=as.integer((max_value[1,1])+1)
#modalCallback2 <- function(value) {
# if (value == TRUE) {
# dbSendQuery(con,sprintf("INSERT INTO cmpt.t_capture_cpt (cpt_id,cpt_ani_etiq, cpt_date,cpt_annee_suivi, cpt_lache_visibilite, cpt_cap_id)
#VALUES (%s,100,'1961-06-16',1111,'exemple',000)",max_value))
# }}
#}
################## LANCEMENT DE L'APPLICATION :
#dbDisconnect(con)
shinyApp(ui = ui, server = server)
|
# setwd("/spin1/users/zhangh24/breast_cancer_data_analysis/")
# filedir <- './whole_genome_age/ICOG/Intrinsic_subtypes/result/'
# files <- dir(filedir,pattern="intrinsic_subytpe_icog")
# total <- 564*5
# missingid <- matrix(0,total,2)
# temp <- 0
# for(i1 in 1:564){
# print(i1)
# for(i2 in 1:5){
# text <- paste0("intrinsic_subytpe_icog",i1,"_",i2)
# if((text%in%files)==F){
# temp <- temp+1
# missingid[temp,] <- c(i1,i2)
# }
# }
# }
# missingid <- missingid[1:temp,]
# icog.unique.resubmit <- unique(missingid[,1])
# save(icog.unique.resubmit,file="./whole_genome_age/ICOG/Intrinsic_subtypes/result/icog.unique.resubmit.Rdata")
# submit <- rep("c",length(icog.unique.resubmit)*15)
# temp <- 1
# for(i in 1:length(icog.unique.resubmit)){
# for(j in 1:15){
# submit[temp] <- paste0("Rscript /spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/code/intrinsic_subtype_icog.R ",icog.unique.resubmit[i]," ",j)
# temp <- temp+1
# }
#
# }
# write.table(submit,file="/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/code/icog_resubmit.sh",
# row.names=F,quote=F,col.names=F)
setwd("/spin1/users/zhangh24/breast_cancer_data_analysis/")
filedir <- './whole_genome_age/ICOG/Intrinsic_subtypes/result/'
files <- dir(filedir,pattern="intrinsic_subytpe_icog_resubmit")
result_files <- dir(filedir,pattern="intrinsic_subytpe_icog")
Filesdir <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed/"
Files <- dir(Filesdir,pattern="icogs_merged_b1_12.",full.names=T)
Filesex <- dir(Filesdir,pattern="icogs_merged_b1_12.chr23",full.names=T)
idx.sex <- Files%in%Filesex
Files <- Files[idx.sex]
library(gtools)
Files <- mixedsort(Files)
Files <- gsub("/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed/icogs_merged_b1_12.","",Files)
Files <- gsub(".txt.gz","",Files)
Files_sub <- data.frame(chr=rep(1,length(Files)),p1=rep(0,length(Files)),p2=rep(0,length(Files)))
for(i in 1:length(Files)){
temp <- gsub("/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed//icogs_merged_b1_12.","",Files[i])
temp <- strsplit(temp,"\\.")
temp <- unlist(temp)
chr = as.integer(gsub("chr","",temp[1]))
p_temp <- temp[2]
p_temp <- strsplit(p_temp,"_")
p_temp <- unlist(p_temp)
p1 <- as.integer(p_temp[1])
p2 <- as.integer(p_temp[2])
Files_sub[i,] <- c(chr,p1,p2)
}
idx <- order(Files_sub$chr,Files_sub$p1)
File_sub_order <- Files_sub[order(Files_sub$chr,Files_sub$p1),]
result.dir <- './whole_genome_age/ICOG/Intrinsic_subtypes/result/'
load("/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/Icog_result_sex.Rdata")
#rs_id <- icog_result$rs_id
num <- nrow(icog_result)
# num.total <- 0
# for(i in 1:564){
# print(i)
#
# }
#rs_id <- rep("c",num)
number.of.tumor <- 4
score <- matrix(0,nrow=num,ncol = (number.of.tumor+1))
infor <- matrix(0,nrow = num,ncol = (number.of.tumor+1)^2)
freq.all <- rep(0,num)
rs_id <- rep("c",num)
# resubimt_resubmimt_id <- c(48,47,148,147,150,151,353,369,494,504,506,514,515,548,552,553)
#
# resubmit_id <- matrix(0,100,2)
# resubmit_temp <- 0
num.total <- 0
for(i in 1:length(Files)){
print(i)
for(k in 1:30){
load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog_sex_",idx[i],"_",k))
if(length(which(result[[1]]=="c"))>=1){
temp.i = c(temp.i,i)
temp.k = c(temp.k,k)
}
temp <- nrow(result[[2]])
rs_id[num.total+(1:temp)] <- result[[1]]
score[num.total+(1:temp),] <- result[[2]]
infor[num.total+(1:temp),] <- result[[3]]
num.total <- temp+num.total
}
}
load("/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2GRADE_fixed_baseline/result/icog_info_sex.Rdata")
# icog_info <- cbind(icog_info,CHR)
# save(icog_info,file="/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2GRADE_fixed_baseline/result/icog_info.Rdata")
idx <- which(icog_info$rs_id!=rs_id)
all.equal(icog_info$rs_id,rs_id)
#idx.diff <- which(icog_info$rs_id!=rs_id)
CHR <- rep(23,nrow(icog_info))
icog_info <- icog_info[,1:10]
icog_result_casecase <- data.frame(icog_info,score,infor,CHR)
save(icog_result_casecase,file="/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/Icog_result_intrinsic_subtype_sex.Rdata")
# for(j in 1:temp){
# infor_j <- result[[3]][(number.of.tumor*j-(number.of.tumor-1)):((number.of.tumor)*j),]
# infor[num.total+j,] <- as.vector(infor_j)
# }
# if(num.total< 12327300&(num.total+temp)> 12327300){
# print(c(i,k))
# }
# if(sum(result[[1]]=="c")!=0){
# resubmit_temp <- resubmit_temp+1
# resubmit_id[resubmit_temp,1] <- idx[i]
# resubmit_id[resubmit_temp,2] <- k
# }
# }
# file_load = paste0("intrinsic_subytpe_icog_resubmit",idx[i],"_",1)
# if(idx[i]%in%resubimt_resubmimt_id){
# for(k in 1:70){
# load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog_resubmit_resubmit_resubmit",idx[i],"_",k))
# temp <- nrow(result[[2]])
# rs_id[num.total+(1:temp)] <- result[[1]]
# score[num.total+(1:temp),] <- result[[2]]
# infor[num.total+(1:temp),] <- result[[3]]
# num.total <- temp+num.total
# if(sum(result[[1]]=="c")!=0){
# resubmit_temp <- resubmit_temp+1
# resubmit_id[resubmit_temp,1] <- idx[i]
# resubmit_id[resubmit_temp,2] <- k
# }
# }
# }else if(idx[i]==413){
# for(k in 1:1000){
# load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog_resubmit_resubmit",idx[i],"_",k))
# temp <- nrow(result[[2]])
# rs_id[num.total+(1:temp)] <- result[[1]]
# score[num.total+(1:temp),] <- result[[2]]
# infor[num.total+(1:temp),] <- result[[3]]
# num.total <- temp+num.total
# if(sum(result[[1]]=="c")!=0){
# resubmit_temp <- resubmit_temp+1
# resubmit_id[resubmit_temp,1] <- idx[i]
# resubmit_id[resubmit_temp,2] <- k
# }
# }
# }else if(file_load%in%result_files){
# for(k in 1:15){
# load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog_resubmit",idx[i],"_",k))
# temp <- nrow(result[[2]])
# rs_id[num.total+(1:temp)] <- result[[1]]
# score[num.total+(1:temp),] <- result[[2]]
# infor[num.total+(1:temp),] <- result[[3]]
# num.total <- temp+num.total
# if(sum(result[[1]]=="c")!=0){
# resubmit_temp <- resubmit_temp+1
# resubmit_id[resubmit_temp,1] <- idx[i]
# resubmit_id[resubmit_temp,2] <- k
# }
# }
# }else{
# for(k in 1:5){
# load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog",idx[i],"_",k))
# temp <- nrow(result[[2]])
# rs_id[num.total+(1:temp)] <- result[[1]]
# score[num.total+(1:temp),] <- result[[2]]
# infor[num.total+(1:temp),] <- result[[3]]
# # for(j in 1:temp){
# # infor_j <- result[[3]][(number.of.tumor*j-(number.of.tumor-1)):((number.of.tumor)*j),]
# # infor[num.total+j,] <- as.vector(infor_j)
# # }
# # if(num.total< 12327300&(num.total+temp)> 12327300){
# # print(c(i,k))
# # }
# num.total <- temp+num.total
# if(sum(result[[1]]=="c")!=0){
# resubmit_temp <- resubmit_temp+1
# resubmit_id[resubmit_temp,1] <- idx[i]
# resubmit_id[resubmit_temp,2] <- k
# }
# }
#}
# resubmit_id <- resubmit_id[1:resubmit_temp,]
# unique(resubmit_id[,1])
# k <- 1
# load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog",idx[i],"_",k))
# idx.try <- which(result[[1]]=="c")
# print(length(idx.try))
#try <- merge(icog_info,rs_id,by.x=rs_id,by.y=rs_id,all=T)
#####to get the total number of SNPs from the information files
# icog_info <- data.frame(snp_id = rep("c",num.total),rs_id = rep("c",num.total),
# position=rep(0,num.total),exp_freq_a1=rep(0,num.total),info=rep(0,num.total),
# certainty=rep(0,num.total),type=rep(0,num.total),info_type0=rep(0,num.total),
# concord_type0=rep(0,num.total),r2_type0=rep(0,num.total),stringsAsFactors=F)
# CHR <- rep(0,num.total)
# num.total <- 0
# library(data.table)
# for(i in 1:22){
# print(i)
# filedir <- paste0("/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_info_files/chr",i)
# files <- dir(filedir,pattern="txt_info",full.names=T)
# files_num <- gsub(paste0(filedir,"/icogs_euro12_chr",i,"_phased"),
# "",files)
# files_num <- gsub(".txt_info","",files_num)
# files_num <- strsplit(files_num,"_")
# files_num <- as.integer(unlist(files_num)[seq(1,2*length(files_num),2)])
# idx <- order(files_num)
# for(j in 1:length(idx)){
# #print(j)
# data <- as.data.frame(fread(files[idx[j]],header=T,stringsAsFactors=F))
#
# temp <- nrow(data)
# icog_info[num.total+(1:temp),] <- data
# CHR[num.total+(1:temp)] <- i
# num.total <- temp+num.total
# }
#
# }
#
# icog_result_baseline <- data.frame(icog_info,score_baseline,infor_baseline,CHR)
# save(icog_result_baseline,file="/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2GRADE_fixed_baseline/result/Icog_result_baseline.Rdata")
# print(1)
| /whole_genome_age/ICOG/Intrinsic_subtypes/code/merge_sex.r | no_license | andrewhaoyu/breast_cancer_data_analysis | R | false | false | 9,380 | r | # setwd("/spin1/users/zhangh24/breast_cancer_data_analysis/")
# filedir <- './whole_genome_age/ICOG/Intrinsic_subtypes/result/'
# files <- dir(filedir,pattern="intrinsic_subytpe_icog")
# total <- 564*5
# missingid <- matrix(0,total,2)
# temp <- 0
# for(i1 in 1:564){
# print(i1)
# for(i2 in 1:5){
# text <- paste0("intrinsic_subytpe_icog",i1,"_",i2)
# if((text%in%files)==F){
# temp <- temp+1
# missingid[temp,] <- c(i1,i2)
# }
# }
# }
# missingid <- missingid[1:temp,]
# icog.unique.resubmit <- unique(missingid[,1])
# save(icog.unique.resubmit,file="./whole_genome_age/ICOG/Intrinsic_subtypes/result/icog.unique.resubmit.Rdata")
# submit <- rep("c",length(icog.unique.resubmit)*15)
# temp <- 1
# for(i in 1:length(icog.unique.resubmit)){
# for(j in 1:15){
# submit[temp] <- paste0("Rscript /spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/code/intrinsic_subtype_icog.R ",icog.unique.resubmit[i]," ",j)
# temp <- temp+1
# }
#
# }
# write.table(submit,file="/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/code/icog_resubmit.sh",
# row.names=F,quote=F,col.names=F)
setwd("/spin1/users/zhangh24/breast_cancer_data_analysis/")
filedir <- './whole_genome_age/ICOG/Intrinsic_subtypes/result/'
files <- dir(filedir,pattern="intrinsic_subytpe_icog_resubmit")
result_files <- dir(filedir,pattern="intrinsic_subytpe_icog")
Filesdir <- "/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed/"
Files <- dir(Filesdir,pattern="icogs_merged_b1_12.",full.names=T)
Filesex <- dir(Filesdir,pattern="icogs_merged_b1_12.chr23",full.names=T)
idx.sex <- Files%in%Filesex
Files <- Files[idx.sex]
library(gtools)
Files <- mixedsort(Files)
Files <- gsub("/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed/icogs_merged_b1_12.","",Files)
Files <- gsub(".txt.gz","",Files)
Files_sub <- data.frame(chr=rep(1,length(Files)),p1=rep(0,length(Files)),p2=rep(0,length(Files)))
for(i in 1:length(Files)){
temp <- gsub("/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_imputed//icogs_merged_b1_12.","",Files[i])
temp <- strsplit(temp,"\\.")
temp <- unlist(temp)
chr = as.integer(gsub("chr","",temp[1]))
p_temp <- temp[2]
p_temp <- strsplit(p_temp,"_")
p_temp <- unlist(p_temp)
p1 <- as.integer(p_temp[1])
p2 <- as.integer(p_temp[2])
Files_sub[i,] <- c(chr,p1,p2)
}
idx <- order(Files_sub$chr,Files_sub$p1)
File_sub_order <- Files_sub[order(Files_sub$chr,Files_sub$p1),]
result.dir <- './whole_genome_age/ICOG/Intrinsic_subtypes/result/'
load("/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/Icog_result_sex.Rdata")
#rs_id <- icog_result$rs_id
num <- nrow(icog_result)
# num.total <- 0
# for(i in 1:564){
# print(i)
#
# }
#rs_id <- rep("c",num)
number.of.tumor <- 4
score <- matrix(0,nrow=num,ncol = (number.of.tumor+1))
infor <- matrix(0,nrow = num,ncol = (number.of.tumor+1)^2)
freq.all <- rep(0,num)
rs_id <- rep("c",num)
# resubimt_resubmimt_id <- c(48,47,148,147,150,151,353,369,494,504,506,514,515,548,552,553)
#
# resubmit_id <- matrix(0,100,2)
# resubmit_temp <- 0
num.total <- 0
for(i in 1:length(Files)){
print(i)
for(k in 1:30){
load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog_sex_",idx[i],"_",k))
if(length(which(result[[1]]=="c"))>=1){
temp.i = c(temp.i,i)
temp.k = c(temp.k,k)
}
temp <- nrow(result[[2]])
rs_id[num.total+(1:temp)] <- result[[1]]
score[num.total+(1:temp),] <- result[[2]]
infor[num.total+(1:temp),] <- result[[3]]
num.total <- temp+num.total
}
}
load("/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2GRADE_fixed_baseline/result/icog_info_sex.Rdata")
# icog_info <- cbind(icog_info,CHR)
# save(icog_info,file="/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2GRADE_fixed_baseline/result/icog_info.Rdata")
idx <- which(icog_info$rs_id!=rs_id)
all.equal(icog_info$rs_id,rs_id)
#idx.diff <- which(icog_info$rs_id!=rs_id)
CHR <- rep(23,nrow(icog_info))
icog_info <- icog_info[,1:10]
icog_result_casecase <- data.frame(icog_info,score,infor,CHR)
save(icog_result_casecase,file="/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/Icog_result_intrinsic_subtype_sex.Rdata")
# for(j in 1:temp){
# infor_j <- result[[3]][(number.of.tumor*j-(number.of.tumor-1)):((number.of.tumor)*j),]
# infor[num.total+j,] <- as.vector(infor_j)
# }
# if(num.total< 12327300&(num.total+temp)> 12327300){
# print(c(i,k))
# }
# if(sum(result[[1]]=="c")!=0){
# resubmit_temp <- resubmit_temp+1
# resubmit_id[resubmit_temp,1] <- idx[i]
# resubmit_id[resubmit_temp,2] <- k
# }
# }
# file_load = paste0("intrinsic_subytpe_icog_resubmit",idx[i],"_",1)
# if(idx[i]%in%resubimt_resubmimt_id){
# for(k in 1:70){
# load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog_resubmit_resubmit_resubmit",idx[i],"_",k))
# temp <- nrow(result[[2]])
# rs_id[num.total+(1:temp)] <- result[[1]]
# score[num.total+(1:temp),] <- result[[2]]
# infor[num.total+(1:temp),] <- result[[3]]
# num.total <- temp+num.total
# if(sum(result[[1]]=="c")!=0){
# resubmit_temp <- resubmit_temp+1
# resubmit_id[resubmit_temp,1] <- idx[i]
# resubmit_id[resubmit_temp,2] <- k
# }
# }
# }else if(idx[i]==413){
# for(k in 1:1000){
# load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog_resubmit_resubmit",idx[i],"_",k))
# temp <- nrow(result[[2]])
# rs_id[num.total+(1:temp)] <- result[[1]]
# score[num.total+(1:temp),] <- result[[2]]
# infor[num.total+(1:temp),] <- result[[3]]
# num.total <- temp+num.total
# if(sum(result[[1]]=="c")!=0){
# resubmit_temp <- resubmit_temp+1
# resubmit_id[resubmit_temp,1] <- idx[i]
# resubmit_id[resubmit_temp,2] <- k
# }
# }
# }else if(file_load%in%result_files){
# for(k in 1:15){
# load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog_resubmit",idx[i],"_",k))
# temp <- nrow(result[[2]])
# rs_id[num.total+(1:temp)] <- result[[1]]
# score[num.total+(1:temp),] <- result[[2]]
# infor[num.total+(1:temp),] <- result[[3]]
# num.total <- temp+num.total
# if(sum(result[[1]]=="c")!=0){
# resubmit_temp <- resubmit_temp+1
# resubmit_id[resubmit_temp,1] <- idx[i]
# resubmit_id[resubmit_temp,2] <- k
# }
# }
# }else{
# for(k in 1:5){
# load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog",idx[i],"_",k))
# temp <- nrow(result[[2]])
# rs_id[num.total+(1:temp)] <- result[[1]]
# score[num.total+(1:temp),] <- result[[2]]
# infor[num.total+(1:temp),] <- result[[3]]
# # for(j in 1:temp){
# # infor_j <- result[[3]][(number.of.tumor*j-(number.of.tumor-1)):((number.of.tumor)*j),]
# # infor[num.total+j,] <- as.vector(infor_j)
# # }
# # if(num.total< 12327300&(num.total+temp)> 12327300){
# # print(c(i,k))
# # }
# num.total <- temp+num.total
# if(sum(result[[1]]=="c")!=0){
# resubmit_temp <- resubmit_temp+1
# resubmit_id[resubmit_temp,1] <- idx[i]
# resubmit_id[resubmit_temp,2] <- k
# }
# }
#}
# resubmit_id <- resubmit_id[1:resubmit_temp,]
# unique(resubmit_id[,1])
# k <- 1
# load(paste0("./whole_genome_age/ICOG/Intrinsic_subtypes/result/intrinsic_subytpe_icog",idx[i],"_",k))
# idx.try <- which(result[[1]]=="c")
# print(length(idx.try))
#try <- merge(icog_info,rs_id,by.x=rs_id,by.y=rs_id,all=T)
#####to get the total number of SNPs from the information files
# icog_info <- data.frame(snp_id = rep("c",num.total),rs_id = rep("c",num.total),
# position=rep(0,num.total),exp_freq_a1=rep(0,num.total),info=rep(0,num.total),
# certainty=rep(0,num.total),type=rep(0,num.total),info_type0=rep(0,num.total),
# concord_type0=rep(0,num.total),r2_type0=rep(0,num.total),stringsAsFactors=F)
# CHR <- rep(0,num.total)
# num.total <- 0
# library(data.table)
# for(i in 1:22){
# print(i)
# filedir <- paste0("/gpfs/gsfs4/users/NC_BW/icogs_onco/genotype/imputed2/icogs_info_files/chr",i)
# files <- dir(filedir,pattern="txt_info",full.names=T)
# files_num <- gsub(paste0(filedir,"/icogs_euro12_chr",i,"_phased"),
# "",files)
# files_num <- gsub(".txt_info","",files_num)
# files_num <- strsplit(files_num,"_")
# files_num <- as.integer(unlist(files_num)[seq(1,2*length(files_num),2)])
# idx <- order(files_num)
# for(j in 1:length(idx)){
# #print(j)
# data <- as.data.frame(fread(files[idx[j]],header=T,stringsAsFactors=F))
#
# temp <- nrow(data)
# icog_info[num.total+(1:temp),] <- data
# CHR[num.total+(1:temp)] <- i
# num.total <- temp+num.total
# }
#
# }
#
# icog_result_baseline <- data.frame(icog_info,score_baseline,infor_baseline,CHR)
# save(icog_result_baseline,file="/spin1/users/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2GRADE_fixed_baseline/result/Icog_result_baseline.Rdata")
# print(1)
|
#Data Cleaning and Preparation - Hints
#Identify the data quality issues and clean the data so that you can use it for analysis.
#Ensure that the dates and time are in the proper format. Derive new variables which will be useful for analysis.
uberrequestdata <- read.csv("Uber Request Data.csv",stringsAsFactors = FALSE)
#Total Count of records in the file
totalCountofRecords <- nrow(uberrequestdata)
#Count of records when there was no drop timestamp may be due to Cancellation or No car available
noDroptime <- sum(is.na(uberrequestdata$Drop.timestamp==TRUE))
#Count records when driver was not available
noDriverAvailable <- sum(is.na(uberrequestdata$Driver.id)==TRUE)
#Count of records when cab was cancelled
cancelledTripCount <- noDroptime - noDriverAvailable
#Splitting Request.timestamp into Data and Time and adding 2 new columns in the data
RequestHour <- format(as.POSIXct(strptime(uberrequestdata$Request.timestamp,"%d/%m/%y %H:%M",tz="")) ,format = "%H:%M:%S")
RequestDate <- format(as.POSIXct(strptime(uberrequestdata$Request.timestamp,"%d/%m/%y %H:%M",tz="")) ,format = "%d/%m/%Y")
uberrequestdata$RequestDate <- RequestDate
uberrequestdata$RequestHour <- RequestHour
#Splitting Request.timestamp into Data and Time and adding 2 new columns in the data
DropHour <- format(as.POSIXct(strptime(uberrequestdata$Drop.timestamp,"%d/%m/%y %H:%M",tz="")) ,format = "%H:%M:%S")
DropDate <- format(as.POSIXct(strptime(uberrequestdata$Drop.timestamp,"%d/%m/%y %H:%M",tz="")) ,format = "%d/%m/%Y")
uberrequestdata$DropDate <- DropDate
uberrequestdata$DropHour <- DropHour
# What do you think is the reason for this issue for the supply-demand gap? Write the answer in less than 100
#words. You may accompany the write-up with plot(s).
#ANS: With Vairous plots and graphs and further visulization of the same through various data available we
#cound understand that In the early Morning there is more number of requests are coming from city for Airport.
#Due to less number of requests from airport in the morning Drivers has to wait for the next ride.
#In the evening there is more number of requests coming from Airport to City but less number of cars are
#available due to less communtation from City to Airport
#4. Recommend some ways to resolve the supply-demand gap.
#Answer: Uber should do partnership with some other cab servcies if they can fulfill each others requiremnt.
| /UberSupplyDemandGap/UberSupplyDemand.R | no_license | deepak05kr/datascience | R | false | false | 2,405 | r | #Data Cleaning and Preparation - Hints
#Identify the data quality issues and clean the data so that you can use it for analysis.
#Ensure that the dates and time are in the proper format. Derive new variables which will be useful for analysis.
uberrequestdata <- read.csv("Uber Request Data.csv",stringsAsFactors = FALSE)
#Total Count of records in the file
totalCountofRecords <- nrow(uberrequestdata)
#Count of records when there was no drop timestamp may be due to Cancellation or No car available
noDroptime <- sum(is.na(uberrequestdata$Drop.timestamp==TRUE))
#Count records when driver was not available
noDriverAvailable <- sum(is.na(uberrequestdata$Driver.id)==TRUE)
#Count of records when cab was cancelled
cancelledTripCount <- noDroptime - noDriverAvailable
#Splitting Request.timestamp into Data and Time and adding 2 new columns in the data
RequestHour <- format(as.POSIXct(strptime(uberrequestdata$Request.timestamp,"%d/%m/%y %H:%M",tz="")) ,format = "%H:%M:%S")
RequestDate <- format(as.POSIXct(strptime(uberrequestdata$Request.timestamp,"%d/%m/%y %H:%M",tz="")) ,format = "%d/%m/%Y")
uberrequestdata$RequestDate <- RequestDate
uberrequestdata$RequestHour <- RequestHour
#Splitting Request.timestamp into Data and Time and adding 2 new columns in the data
DropHour <- format(as.POSIXct(strptime(uberrequestdata$Drop.timestamp,"%d/%m/%y %H:%M",tz="")) ,format = "%H:%M:%S")
DropDate <- format(as.POSIXct(strptime(uberrequestdata$Drop.timestamp,"%d/%m/%y %H:%M",tz="")) ,format = "%d/%m/%Y")
uberrequestdata$DropDate <- DropDate
uberrequestdata$DropHour <- DropHour
# What do you think is the reason for this issue for the supply-demand gap? Write the answer in less than 100
#words. You may accompany the write-up with plot(s).
#ANS: With Vairous plots and graphs and further visulization of the same through various data available we
#cound understand that In the early Morning there is more number of requests are coming from city for Airport.
#Due to less number of requests from airport in the morning Drivers has to wait for the next ride.
#In the evening there is more number of requests coming from Airport to City but less number of cars are
#available due to less communtation from City to Airport
#4. Recommend some ways to resolve the supply-demand gap.
#Answer: Uber should do partnership with some other cab servcies if they can fulfill each others requiremnt.
|
# -------------------------------------------------------------------------------
# This file is part of 'diversityForest'.
#
# 'diversityForest' is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# 'diversityForest' is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with 'diversityForest'. If not, see <http://www.gnu.org/licenses/>.
#
# NOTE: 'diversityForest' is a fork of the popular R package 'ranger', written by Marvin N. Wright.
# Most R and C++ code is identical with that of 'ranger'. The package 'diversityForest'
# was written by taking the original 'ranger' code and making any
# changes necessary to implement diversity forests.
#
# -------------------------------------------------------------------------------
##' First, both for \code{nsplits} and \code{proptry} a grid of possible values may be provided,
##' where default grids are used if no grids are provided. Second, for each pairwise combination of
##' values from these two grids a forest is constructed. Third,
##' that pair of \code{nsplits} and \code{proptry} values is used as the optimized set of parameter
##' values that is associated with the smallest out-of-bag prediction error. If several pairs of
##' parameter values are associated with the same smallest out-of-bag prediction error, the
##' pair with the smallest (parameter) values is used.
##'
##' @title Optimization of the values of the tuning parameters \code{nsplits} and \code{proptry}
##' @param formula Object of class \code{formula} or \code{character} describing the model to fit. Interaction terms supported only for numerical variables.
##' @param data Training data of class \code{data.frame}, \code{matrix}, \code{dgCMatrix} (Matrix) or \code{gwaa.data} (GenABEL).
##' @param nsplitsgrid Grid of values to consider for \code{nsplits}. Default grid: 2, 5, 10, 30, 50, 100, 200.
##' @param proptrygrid Grid of values to consider for \code{proptry}. Default grid: 0.05, 1.
##' @param num.trees.pre Number of trees used for each forest constructed during tuning parameter optimization. Default is 500.
##' @return List with elements
##' \item{\code{nsplitsopt}}{Optimized value of \code{nsplits}.}
##' \item{\code{proptryopt}}{Optimized value of \code{proptry}.}
##' \item{\code{tunegrid}}{Two-dimensional \code{data.frame}, where each row contains one pair of values considered for \code{nsplits} (first entry) and \code{proptry} (second entry).}
##' \item{\code{ooberrs}}{The out-of-bag prediction errors obtained for each pair of values considered for \code{nsplits} and \code{proptry}, where the ordering of pairs of values is the same as in \code{tunegrid} (see above).}
##' @examples
##'
##' ## Load package:
##'
##' library("diversityForest")
##'
##'
##' ## Set seed to obtain reproducible results:
##'
##' set.seed(1234)
##'
##'
##' ## Tuning parameter optimization for the iris data set:
##'
##' tuneres <- tunedivfor(formula = Species ~ ., data = iris, num.trees.pre = 20)
##' # NOTE: num.trees.pre = 20 is specified too small for practical
##' # purposes - the out-of-bag error estimates of the forests
##' # constructed during optimization will be much too variable!!
##' # In practice, num.trees.pre = 500 (default value) or a
##' # larger number should be used.
##'
##' tuneres
##'
##' tuneres$nsplitsopt
##' tuneres$proptryopt
##' tuneres$tunegrid
##' tuneres$ooberrs
##'
##' @author Roman Hornung
##' @references
##' \itemize{
##' \item Hornung, R. (2022). Diversity forests: Using split sampling to enable innovative complex split procedures in random forests. SN Computer Science 3(2):1, <\doi{10.1007/s42979-021-00920-1}>.
##' \item Wright, M. N., Ziegler, A. (2017). ranger: A fast Implementation of Random Forests for High Dimensional Data in C++ and R. Journal of Statistical Software 77:1-17, <\doi{10.18637/jss.v077.i01}>.
##' }
##' @seealso \code{\link{divfor}}
##' @encoding UTF-8
##' @useDynLib diversityForest, .registration = TRUE
##' @importFrom Rcpp evalCpp
##' @import stats
##' @import utils
##' @importFrom Matrix Matrix
##' @export
tunedivfor <- function(formula = NULL, data = NULL,
nsplitsgrid = c(2, 5, 10, 30, 50, 100, 200),
proptrygrid = c(0.05, 1),
num.trees.pre = 500) {
tunegrid <- expand.grid(proptrygrid=proptrygrid, nsplitsgrid=nsplitsgrid, stringsAsFactors = FALSE)[,2:1]
ooberrs <- 0
for(i in 1:nrow(tunegrid))
ooberrs[i] <- divfor(formula, data = data, num.trees = num.trees.pre, nsplits=tunegrid$nsplitsgrid[i], proptry=tunegrid$proptrygrid[i], write.forest=FALSE, verbose=FALSE)$prediction.error
bestind <- which.min(ooberrs)
nsplitsopt <- tunegrid$nsplitsgrid[bestind]
proptryopt <- tunegrid$proptrygrid[bestind]
result <- list(nsplitsopt=nsplitsopt, proptryopt=proptryopt, tunegrid=tunegrid, ooberrs=ooberrs)
class(result) <- "tunedivfor"
return(result)
}
| /R/tunedivfor.R | no_license | RomanHornung/diversityForest | R | false | false | 5,342 | r | # -------------------------------------------------------------------------------
# This file is part of 'diversityForest'.
#
# 'diversityForest' is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# 'diversityForest' is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with 'diversityForest'. If not, see <http://www.gnu.org/licenses/>.
#
# NOTE: 'diversityForest' is a fork of the popular R package 'ranger', written by Marvin N. Wright.
# Most R and C++ code is identical with that of 'ranger'. The package 'diversityForest'
# was written by taking the original 'ranger' code and making any
# changes necessary to implement diversity forests.
#
# -------------------------------------------------------------------------------
##' First, both for \code{nsplits} and \code{proptry} a grid of possible values may be provided,
##' where default grids are used if no grids are provided. Second, for each pairwise combination of
##' values from these two grids a forest is constructed. Third,
##' that pair of \code{nsplits} and \code{proptry} values is used as the optimized set of parameter
##' values that is associated with the smallest out-of-bag prediction error. If several pairs of
##' parameter values are associated with the same smallest out-of-bag prediction error, the
##' pair with the smallest (parameter) values is used.
##'
##' @title Optimization of the values of the tuning parameters \code{nsplits} and \code{proptry}
##' @param formula Object of class \code{formula} or \code{character} describing the model to fit. Interaction terms supported only for numerical variables.
##' @param data Training data of class \code{data.frame}, \code{matrix}, \code{dgCMatrix} (Matrix) or \code{gwaa.data} (GenABEL).
##' @param nsplitsgrid Grid of values to consider for \code{nsplits}. Default grid: 2, 5, 10, 30, 50, 100, 200.
##' @param proptrygrid Grid of values to consider for \code{proptry}. Default grid: 0.05, 1.
##' @param num.trees.pre Number of trees used for each forest constructed during tuning parameter optimization. Default is 500.
##' @return List with elements
##' \item{\code{nsplitsopt}}{Optimized value of \code{nsplits}.}
##' \item{\code{proptryopt}}{Optimized value of \code{proptry}.}
##' \item{\code{tunegrid}}{Two-dimensional \code{data.frame}, where each row contains one pair of values considered for \code{nsplits} (first entry) and \code{proptry} (second entry).}
##' \item{\code{ooberrs}}{The out-of-bag prediction errors obtained for each pair of values considered for \code{nsplits} and \code{proptry}, where the ordering of pairs of values is the same as in \code{tunegrid} (see above).}
##' @examples
##'
##' ## Load package:
##'
##' library("diversityForest")
##'
##'
##' ## Set seed to obtain reproducible results:
##'
##' set.seed(1234)
##'
##'
##' ## Tuning parameter optimization for the iris data set:
##'
##' tuneres <- tunedivfor(formula = Species ~ ., data = iris, num.trees.pre = 20)
##' # NOTE: num.trees.pre = 20 is specified too small for practical
##' # purposes - the out-of-bag error estimates of the forests
##' # constructed during optimization will be much too variable!!
##' # In practice, num.trees.pre = 500 (default value) or a
##' # larger number should be used.
##'
##' tuneres
##'
##' tuneres$nsplitsopt
##' tuneres$proptryopt
##' tuneres$tunegrid
##' tuneres$ooberrs
##'
##' @author Roman Hornung
##' @references
##' \itemize{
##' \item Hornung, R. (2022). Diversity forests: Using split sampling to enable innovative complex split procedures in random forests. SN Computer Science 3(2):1, <\doi{10.1007/s42979-021-00920-1}>.
##' \item Wright, M. N., Ziegler, A. (2017). ranger: A fast Implementation of Random Forests for High Dimensional Data in C++ and R. Journal of Statistical Software 77:1-17, <\doi{10.18637/jss.v077.i01}>.
##' }
##' @seealso \code{\link{divfor}}
##' @encoding UTF-8
##' @useDynLib diversityForest, .registration = TRUE
##' @importFrom Rcpp evalCpp
##' @import stats
##' @import utils
##' @importFrom Matrix Matrix
##' @export
tunedivfor <- function(formula = NULL, data = NULL,
nsplitsgrid = c(2, 5, 10, 30, 50, 100, 200),
proptrygrid = c(0.05, 1),
num.trees.pre = 500) {
tunegrid <- expand.grid(proptrygrid=proptrygrid, nsplitsgrid=nsplitsgrid, stringsAsFactors = FALSE)[,2:1]
ooberrs <- 0
for(i in 1:nrow(tunegrid))
ooberrs[i] <- divfor(formula, data = data, num.trees = num.trees.pre, nsplits=tunegrid$nsplitsgrid[i], proptry=tunegrid$proptrygrid[i], write.forest=FALSE, verbose=FALSE)$prediction.error
bestind <- which.min(ooberrs)
nsplitsopt <- tunegrid$nsplitsgrid[bestind]
proptryopt <- tunegrid$proptrygrid[bestind]
result <- list(nsplitsopt=nsplitsopt, proptryopt=proptryopt, tunegrid=tunegrid, ooberrs=ooberrs)
class(result) <- "tunedivfor"
return(result)
}
|
# Main figures
rm(list = ls())
# Load libraries and data -------------------------------------------------------------------------------------------------------
library(ggplot2)
library(tidyverse)
library(ggpubr)
load('age_flow_summary.RData')
# Organize data -----------------------------------------------------------------------------------------------------------------
# Organize data for alternative maturation rate scenarios
tau.df <- rbind(mod01.df %>% mutate(climate='Contemporary', age_scen = 0.7),
mod03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
mod05.df %>% mutate(climate='Contemporary', age_scen = 2.7),
mod06.df %>% mutate(climate='Longer duration', age_scen = 0.9),
mod08.df %>% mutate(climate='Longer duration', age_scen = 1.9),
mod10.df %>% mutate(climate='Longer duration', age_scen = 2.9),
mod11.df %>% mutate(climate='More frequent', age_scen = 1.1),
mod13.df %>% mutate(climate='More frequent', age_scen = 2.1),
mod15.df %>% mutate(climate='More frequent', age_scen = 3.1),
mod16.df %>% mutate(climate='More intense', age_scen = 1.3),
mod18.df %>% mutate(climate='More intense', age_scen = 2.3),
mod20.df %>% mutate(climate='More intense', age_scen = 3.3))
age.scen.df1 <- data.frame(scenario = as.character(c(1,3,5,6,8,10,11,13,15,16,18,20)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
tau.vio.df <- vio.df %>%
filter(scenario %in% as.character(c(1,3,5,6,8,10,11,13,15,16,18,20))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(1,3,5)), 'Contemporary',
ifelse(scenario %in% as.character(c(6,8,10)), 'Longer duration',
ifelse(scenario %in% as.character(c(11,13,15)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df1, by = "scenario")
tau.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(mod01.df$spawn.cv, mod03.df$spawn.cv, mod05.df$spawn.cv, mod06.df$spawn.cv, mod08.df$spawn.cv, mod10.df$spawn.cv, mod11.df$spawn.cv, mod13.df$spawn.cv, mod15.df$spawn.cv, mod16.df$spawn.cv, mod18.df$spawn.cv, mod20.df$spawn.cv),
harvest_cv = c(mod01.df$harvest.cv, mod03.df$harvest.cv, mod05.df$harvest.cv, mod06.df$harvest.cv, mod08.df$harvest.cv, mod10.df$harvest.cv, mod11.df$harvest.cv, mod13.df$harvest.cv, mod15.df$harvest.cv, mod16.df$harvest.cv, mod18.df$harvest.cv, mod20.df$harvest.cv),
totalrun_cv = c(mod01.df$total.run.cv, mod03.df$total.run.cv, mod05.df$total.run.cv, mod06.df$total.run.cv, mod08.df$total.run.cv, mod10.df$total.run.cv, mod11.df$total.run.cv, mod13.df$total.run.cv, mod15.df$total.run.cv, mod16.df$total.run.cv, mod18.df$total.run.cv, mod20.df$total.run.cv))
# Organize data for alternative natural mortality rate scenarios
eta.df <- rbind(mod02.df %>% mutate(climate='Contemporary', age_scen = 0.7),
mod03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
mod04.df %>% mutate(climate='Contemporary', age_scen = 2.7),
mod07.df %>% mutate(climate='Longer duration', age_scen=0.9),
mod08.df %>% mutate(climate='Longer duration', age_scen=1.9),
mod09.df %>% mutate(climate='Longer duration', age_scen=2.9),
mod12.df %>% mutate(climate='More frequent', age_scen=1.1),
mod13.df %>% mutate(climate='More frequent', age_scen=2.1),
mod14.df %>% mutate(climate='More frequent', age_scen=3.1),
mod17.df %>% mutate(climate='More intense', age_scen=1.3),
mod18.df %>% mutate(climate='More intense', age_scen=2.3),
mod19.df %>% mutate(climate='More intense', age_scen=3.3))
age.scen.df2 <- data.frame(scenario = as.character(c(2,3,4,7,8,9,12,13,14,17,18,19)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
eta.vio.df <- vio.df %>%
filter(scenario %in% as.character(c(2,3,4,7,8,9,12,13,14,17,18,19))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(2,3,4)), 'Contemporary',
ifelse(scenario %in% as.character(c(7,8,9)), 'Longer duration',
ifelse(scenario %in% as.character(c(12,13,14)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df2, by = "scenario")
eta.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(mod02.df$spawn.cv, mod03.df$spawn.cv, mod04.df$spawn.cv, mod07.df$spawn.cv, mod08.df$spawn.cv, mod09.df$spawn.cv, mod12.df$spawn.cv, mod13.df$spawn.cv, mod14.df$spawn.cv, mod17.df$spawn.cv, mod18.df$spawn.cv, mod19.df$spawn.cv),
harvest_cv = c(mod02.df$harvest.cv, mod03.df$harvest.cv, mod04.df$harvest.cv, mod07.df$harvest.cv, mod08.df$harvest.cv, mod09.df$harvest.cv, mod12.df$harvest.cv, mod13.df$harvest.cv, mod14.df$harvest.cv, mod17.df$harvest.cv, mod18.df$harvest.cv, mod19.df$harvest.cv),
totalrun_cv = c(mod01.df$total.run.cv, mod03.df$total.run.cv, mod05.df$total.run.cv, mod06.df$total.run.cv, mod08.df$total.run.cv, mod10.df$total.run.cv, mod11.df$total.run.cv, mod13.df$total.run.cv, mod15.df$total.run.cv, mod16.df$total.run.cv, mod18.df$total.run.cv, mod20.df$total.run.cv))
### OVERFISHING TAU AND ETA
tau.mods <- c(1,3,5,6,8,10,11,13,15,16,18,20)
tau.overfished.df <- NULL
for(index in 1:12){
i <- tau.mods[index]
tmp.name <- paste0('mod',stringr::str_pad(i, 2, pad = '0'),'.overfished')
tmp.of <- get(tmp.name)
tmp.of <- tmp.of %>%
mutate(scenario = as.character(i)) %>%
mutate(climate = ifelse(scenario %in% c(1,3,5), 'Contemporary',
ifelse(scenario %in% c(6,8,10), 'Longer duration',
ifelse(scenario %in% c(11,13,15), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df1, by = 'scenario')
tau.overfished.df <- rbind(tau.overfished.df, tmp.of)
}
eta.mods <- c(2,3,4,7,8,9,12,13,14,17,18,19)
eta.overfished.df <- NULL
for(index in 1:12){
i <- eta.mods[index]
tmp.name <- paste0('mod',stringr::str_pad(i, 2, pad = '0'),'.overfished')
tmp.of <- get(tmp.name)
tmp.of <- tmp.of %>%
mutate(scenario = as.character(i)) %>%
mutate(climate = ifelse(scenario %in% c(2,3,4), 'Contemporary',
ifelse(scenario %in% c(7,8,9), 'Longer duration',
ifelse(scenario %in% c(12,13,14), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df2, by = 'scenario')
eta.overfished.df <- rbind(eta.overfished.df, tmp.of)
}
# MAIN FIGURES ----------------------------
## FIGURE 1. Harvest control rule -----------------------------------------------------------------------------
library(ggplot2)
library(ggpubr)
tmp.si <- seq(0, 500000, length.out = 1000)
tmp.er <- sapply(tmp.si, control.rule)
plot1 <- ggplot() +
geom_line(aes(x = tmp.si/1000, y = tmp.er), size = 1) +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0), limits = c(0, 0.8)) +
labs(x = 'Sacramento Index (thousands)', y = 'Allowable exploitation rate') +
theme_classic() +
theme(text = element_text(size = 16), plot.margin = unit(c(0.5,1,0.5,0.5), 'cm'))
## FIGURE 3. Spawner escapement violin plots and CV --------------------------------------------------------------------------------
vio.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.title = element_text(hjust = 0.5),
text = element_text(size = 16),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
text = element_text(size = 16),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
spawn.tau.vio.plot <- ggplot() +
geom_violin(data = tau.vio.df, aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 570, label = '[Early maturation]', size = 5) +
annotate('text', x = 10.5, y = 570, label = '[Delayed maturation]', size = 5) +
vio.plot.settings
spawn.eta.vio.plot <- ggplot(data = eta.vio.df) +
geom_violin(aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Spawner escapement (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
annotate('text', x = 2, y = 570, label = '[High mortality]', size = 5) +
annotate('text', x = 11, y = 570, label = '[Low mortality]', size = 5) +
vio.plot.settings
spawnCV.tau.plot <- ggplot(data = tau.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
# scale_x_continuous(breaks = seq(1,3), labels = c(expression(tau[3]~"= 0.99"), 'Base case', expression(tau[3]~"= 0.25"))) +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
# scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings
spawnCV.eta.plot <- ggplot(data = eta.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of spawner escapement') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
# scale_y_continuous(limits=c(0.60, 0.7)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
spawn.tau <- ggarrange(spawn.tau.vio.plot, spawnCV.tau.plot, nrow=2, labels = c('b', 'd'))
spawn.eta <- ggarrange(spawn.eta.vio.plot, spawnCV.eta.plot, nrow=2, labels = c('a', 'c'))
spawn.final <- ggarrange(spawn.eta, spawn.tau, ncol=2)
## FIGURE 4. Harvest violin plots and CV -------------------------------------------------------------------------------------------
harvest.tau.vio.plot <- ggplot() +
geom_violin(data = tau.vio.df, aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 650, label = '[Early maturation]', size = 4) +
annotate('text', x = 10.5, y = 650, label = '[Delayed maturation]', size = 4) +
vio.plot.settings
harvest.eta.vio.plot <- ggplot(data = eta.vio.df) +
geom_violin(aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Harvest (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
annotate('text', x = 2, y = 650, label = '[High mortality]', size = 4) +
annotate('text', x = 11, y = 650, label = '[Low mortality]', size = 4) +
vio.plot.settings
harvestCV.tau.plot <- ggplot(data = tau.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
# scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
harvestCV.eta.plot <- ggplot(data = eta.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of harvest') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
# scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
harvest.tau <- ggarrange(harvest.tau.vio.plot, harvestCV.tau.plot, nrow=2, labels = c('b', 'd'))
harvest.eta <- ggarrange(harvest.eta.vio.plot, harvestCV.eta.plot, nrow=2, labels = c('a', 'c'))
harvest.final <- ggarrange(harvest.eta, harvest.tau, ncol=2)
## FIGURE 5. Percent overfished status boxplot -------------------------------------------------------------------------------------
of.theme.settings <- theme(legend.position = 'none',
legend.title = element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 16),
axis.ticks.x = element_blank(),
axis.text.x = element_text(hjust = 0, size = 16),
axis.text.y = element_text(size = 16),
axis.title.y = element_text(size = 16),
axis.title.x = element_text(size = 16),
plot.margin = unit(c(0.7,0.5,0,0.5), "cm"))
of.tau.plot <- ggplot() +
geom_boxplot(data = tau.overfished.df, aes(x = age_scen, y = prop.overfished*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '', title = 'Maturation') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.25, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.25, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.25, yend = 0) +
scale_y_continuous(expand = c(0, 0)) +
coord_cartesian(ylim = c(0,40), clip = "off") +
annotate('text', x = 2.5, y = 39, label = '[Early maturation]', size = 5) +
annotate('text', x = 10.5, y = 39, label = '[Delayed maturation]', size = 5) +
of.theme.settings
of.eta.plot <- ggplot() +
geom_boxplot (data = eta.overfished.df, aes(x = age_scen, y = prop.overfished*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '% overfished status', title = 'Natural mortality') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.25, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.25, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.25, yend = 0) +
scale_y_continuous(expand = c(0, 0)) +
coord_cartesian(ylim = c(0,40), clip = "off") +
annotate('text', x = 2, y = 39, label = '[High mortality]', size = 5) +
annotate('text', x = 11, y = 39, label = '[Low mortality]', size = 5) +
of.theme.settings +
theme(legend.position = c(0.85,0.75))
ggarrange(of.eta.plot, of.tau.plot, labels = c('a','b'))
## FIGURE 6. Percent fishery restrictions ------------------------------------------------------------------------------------------
c.plot.settings <- theme(legend.position = 'none',
plot.title = element_text(hjust = 0.5, size = 15),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
axis.title = element_text(size = 15),
text = element_text(size = 15),
plot.margin = unit(c(0.5,0.2,0,0.5), "cm"))
prop70.tau.plot <- ggplot() +
geom_boxplot(data = tau.overfished.df, aes(x = age_scen, y = prop.70*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = 49, yend = 50) +
annotate('segment', x = 6.5, xend = 6.5, y = 49, yend = 50) +
annotate('segment', x = 10.5, xend = 10.5, y = 49, yend = 50) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(50,105), clip = "off") +
annotate('text', x = 2.5, y = 100, label = '[Early maturation]', size = 4) +
annotate('text', x = 10.5, y = 100, label = '[Delayed maturation]', size = 4) +
c.plot.settings
prop70.eta.plot <- ggplot() +
geom_boxplot(data = eta.overfished.df, aes(x = age_scen, y = prop.70*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = expression(paste("% ",italic(c)," = 0.7")), title = 'Natural mortality') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = 49, yend = 50) +
annotate('segment', x = 6.5, xend = 6.5, y = 49, yend = 50) +
annotate('segment', x = 10.5, xend = 10.5, y = 49, yend = 50) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(50,105), clip = "off") +
annotate('text', x = 2.5, y = 100, label = '[High mortality]', size = 4) +
annotate('text', x = 10.5, y = 100, label = '[Low mortality]', size = 4) +
c.plot.settings
prop25.tau.plot <- ggplot() +
geom_boxplot(data = tau.overfished.df, aes(x = age_scen, y = prop.25*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = '') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.25, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.25, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.25, yend = 0) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(0,15), clip = "off") +
c.plot.settings
prop25.eta.plot <- ggplot() +
geom_boxplot(data = eta.overfished.df, aes(x = age_scen, y = prop.25*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = expression(paste("% ",italic(c)," = 0.25")), title = '') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.25, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.25, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.25, yend = 0) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(0,15), clip = "off") +
c.plot.settings +
theme(legend.position = c(0.8,0.9),
legend.title = element_blank())
prop10.tau.plot <- ggplot() +
geom_boxplot(data = tau.overfished.df, aes(x = age_scen, y = prop.10*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '', title = '') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.12, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.12, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.12, yend = 0) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(0,10), clip = "off") +
c.plot.settings +
theme(axis.text.x = element_text(size = 15, hjust = 0))
prop10.eta.plot <- ggplot() +
geom_boxplot(data = eta.overfished.df, aes(x = age_scen, y = prop.10*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = expression(paste("% ",italic(c)," = 0.10")), title = '') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.12, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.12, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.12, yend = 0) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(0,10), clip = "off") +
c.plot.settings +
theme(axis.text.x = element_text(size = 15, hjust = 0))
ggarrange(prop70.eta.plot,prop70.tau.plot,
prop25.eta.plot,prop25.tau.plot,
prop10.eta.plot,prop10.tau.plot,
nrow = 3, ncol = 2, labels = c('a','b','c','d','e','f'))
# SUPPLEMENTAL FIGURES -----------------------------
## FIGURE S10. Simulated hydrographs -------------------------------------------
hydro.df <- data.frame(year = seq(1,n.yr),
base = flow.sim(100, 'base', flow.full),
duration = flow.sim(100, 'longer duration', flow.full),
frequency = flow.sim(100, 'more frequent', flow.full),
intensity = flow.sim(100, 'more intense', flow.full))
hydro.plot.settings <- theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 14),
title = element_text(size = 14),
plot.margin = unit(c(0.5,0,0,0.1),'cm'))
hydro.contemporary <- ggplot() +
geom_line(data = hydro.df, aes(x = year, y = base), lwd = 0.5) +
geom_segment(aes(x = 0, xend = 100, y = 10712, yend = 10712), lwd = 1, lty = 'dashed') +
geom_segment(aes(x = 0, xend = 100, y = 4295, yend = 4295), lwd = 1, lty = 'dashed') +
labs(x = '', y = 'Flow (csf)', title = 'Contemporary') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(0, 120), breaks = seq(0,100,20)) +
annotate('text', x = n.yr+8, y = 10712, label = paste0(as.character((sum(hydro.df$base<10712)/n.yr)*100),'%'), size = 6) +
annotate('text', x = n.yr+8, y = 4295, label = paste0(as.character((sum(hydro.df$base<4295)/n.yr)*100),'%'), size = 6) +
hydro.plot.settings
hydro.duration <- ggplot() +
geom_line(data = hydro.df, aes(x = year, y = duration), lwd = 0.5) +
geom_segment(aes(x = 0, xend = 100, y = 10712, yend = 10712), lwd = 1, lty = 'dashed') +
geom_segment(aes(x = 0, xend = 100, y = 4295, yend = 4295), lwd = 1, lty = 'dashed') +
labs(x = '', y = '', title = 'Longer duration') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(0, 120), breaks = seq(0,100,20)) +
annotate('text', x = n.yr+8, y = 10712, label = paste0(as.character((sum(hydro.df$duration<10712)/n.yr)*100),'%'), size = 6) +
annotate('text', x = n.yr+8, y = 4295, label = paste0(as.character((sum(hydro.df$duration<4295)/n.yr)*100),'%'), size = 6) +
hydro.plot.settings
hydro.frequency <- ggplot() +
geom_line(data = hydro.df, aes(x = year, y = frequency), lwd = 0.5) +
geom_segment(aes(x = 0, xend = 100, y = 10712, yend = 10712), lwd = 1, lty = 'dashed') +
geom_segment(aes(x = 0, xend = 100, y = 4295, yend = 4295), lwd = 1, lty = 'dashed') +
labs(x = 'Simulation year', y = 'Flow (csf)', title = 'More frequent') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(0, 120), breaks = seq(0,100,20)) +
annotate('text', x = n.yr+8, y = 10712, label = paste0(as.character((sum(hydro.df$frequency<10712)/n.yr)*100),'%'), size = 6) +
annotate('text', x = n.yr+8, y = 4295, label = paste0(as.character((sum(hydro.df$frequency<4295)/n.yr)*100),'%'), size = 6) +
hydro.plot.settings
hydro.intensity <- ggplot() +
geom_line(data = hydro.df, aes(x = year, y = intensity), lwd = 0.5) +
geom_segment(aes(x = 0, xend = 100, y = 10712, yend = 10712), lwd = 1, lty = 'dashed') +
geom_segment(aes(x = 0, xend = 100, y = 4295, yend = 4295), lwd = 1, lty = 'dashed') +
labs(x = 'Simulation year', y = '', title = 'More intense') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(0, 120), breaks = seq(0,100,20)) +
annotate('text', x = n.yr+8, y = 10712, label = paste0(as.character((sum(hydro.df$intensity<10712)/n.yr)*100),'%'), size = 6) +
annotate('text', x = n.yr+8, y = 4295, label = paste0(as.character((sum(hydro.df$intensity<4295)/n.yr)*100),'%'), size = 6) +
hydro.plot.settings
ggarrange(hydro.contemporary,hydro.duration, hydro.frequency, hydro.intensity)
## FIGURE S13. Sensitivity to the CV of realized harvest rate ---------------------------------
load("cv_er_sa.RData")
cver.vio.df <- NULL
for(i in 1:20){
assign(paste0("cver.sa.",str_pad(as.character(i), width = 2, pad = "0"),".df"), model_summary(get(paste0("cver.sa.",str_pad(as.character(i), width = 2, pad = "0")))))
assign(paste0("cver.sa.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio"), violin_df(get(paste0("cver.sa.",str_pad(as.character(i), width = 2, pad = "0"))), as.character(i)))
cver.vio.df <- rbind(cver.vio.df, get(paste0("cver.sa.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio")))
}
cver.tau.df <- rbind(cver.sa.01.df %>% mutate(climate='Contemporary', age_scen = 0.7),
cver.sa.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
cver.sa.05.df %>% mutate(climate='Contemporary', age_scen = 2.7),
cver.sa.06.df %>% mutate(climate='Longer duration', age_scen = 0.9),
cver.sa.08.df %>% mutate(climate='Longer duration', age_scen = 1.9),
cver.sa.10.df %>% mutate(climate='Longer duration', age_scen = 2.9),
cver.sa.11.df %>% mutate(climate='More frequent', age_scen = 1.1),
cver.sa.13.df %>% mutate(climate='More frequent', age_scen = 2.1),
cver.sa.15.df %>% mutate(climate='More frequent', age_scen = 3.1),
cver.sa.16.df %>% mutate(climate='More intense', age_scen = 1.3),
cver.sa.18.df %>% mutate(climate='More intense', age_scen = 2.3),
cver.sa.20.df %>% mutate(climate='More intense', age_scen = 3.3))
age.scen.df1 <- data.frame(scenario = as.character(c(1,3,5,6,8,10,11,13,15,16,18,20)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
cver.tau.vio.df <- cver.vio.df %>%
filter(scenario %in% as.character(c(1,3,5,6,8,10,11,13,15,16,18,20))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(1,3,5)), 'Contemporary',
ifelse(scenario %in% as.character(c(6,8,10)), 'Longer duration',
ifelse(scenario %in% as.character(c(11,13,15)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df1, by = "scenario")
cver.tau.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(cver.sa.01.df$spawn.cv, cver.sa.03.df$spawn.cv, cver.sa.05.df$spawn.cv, cver.sa.06.df$spawn.cv, cver.sa.08.df$spawn.cv, cver.sa.10.df$spawn.cv, cver.sa.11.df$spawn.cv, cver.sa.13.df$spawn.cv, cver.sa.15.df$spawn.cv, cver.sa.16.df$spawn.cv, cver.sa.18.df$spawn.cv, cver.sa.20.df$spawn.cv),
harvest_cv = c(cver.sa.01.df$harvest.cv, cver.sa.03.df$harvest.cv, cver.sa.05.df$harvest.cv, cver.sa.06.df$harvest.cv, cver.sa.08.df$harvest.cv, cver.sa.10.df$harvest.cv, cver.sa.11.df$harvest.cv, cver.sa.13.df$harvest.cv, cver.sa.15.df$harvest.cv, cver.sa.16.df$harvest.cv, cver.sa.18.df$harvest.cv, cver.sa.20.df$harvest.cv))
cver.eta.df <- rbind(cver.sa.02.df %>% mutate(climate='Contemporary', age_scen = 0.7),
cver.sa.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
cver.sa.04.df %>% mutate(climate='Contemporary', age_scen = 2.7),
cver.sa.07.df %>% mutate(climate='Longer duration', age_scen=0.9),
cver.sa.08.df %>% mutate(climate='Longer duration', age_scen=1.9),
cver.sa.09.df %>% mutate(climate='Longer duration', age_scen=2.9),
cver.sa.12.df %>% mutate(climate='More frequent', age_scen=1.1),
cver.sa.13.df %>% mutate(climate='More frequent', age_scen=2.1),
cver.sa.14.df %>% mutate(climate='More frequent', age_scen=3.1),
cver.sa.17.df %>% mutate(climate='More intense', age_scen=1.3),
cver.sa.18.df %>% mutate(climate='More intense', age_scen=2.3),
cver.sa.19.df %>% mutate(climate='More intense', age_scen=3.3))
age.scen.df2 <- data.frame(scenario = as.character(c(2,3,4,7,8,9,12,13,14,17,18,19)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
cver.eta.vio.df <- cver.vio.df %>%
filter(scenario %in% as.character(c(2,3,4,7,8,9,12,13,14,17,18,19))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(2,3,4)), 'Contemporary',
ifelse(scenario %in% as.character(c(7,8,9)), 'Longer duration',
ifelse(scenario %in% as.character(c(12,13,14)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df2, by = "scenario")
cver.eta.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(cver.sa.02.df$spawn.cv, cver.sa.03.df$spawn.cv, cver.sa.04.df$spawn.cv, cver.sa.07.df$spawn.cv, cver.sa.08.df$spawn.cv, cver.sa.09.df$spawn.cv, cver.sa.12.df$spawn.cv, cver.sa.13.df$spawn.cv, cver.sa.14.df$spawn.cv, cver.sa.17.df$spawn.cv, cver.sa.18.df$spawn.cv, cver.sa.19.df$spawn.cv),
harvest_cv = c(cver.sa.02.df$harvest.cv, cver.sa.03.df$harvest.cv, cver.sa.04.df$harvest.cv, cver.sa.07.df$harvest.cv, cver.sa.08.df$harvest.cv, cver.sa.09.df$harvest.cv, cver.sa.12.df$harvest.cv, cver.sa.13.df$harvest.cv, cver.sa.14.df$harvest.cv, cver.sa.17.df$harvest.cv, cver.sa.18.df$harvest.cv, cver.sa.19.df$harvest.cv))
##
## CVER PLOTS
##
vio.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.title = element_text(hjust = 0.5),
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cver.spawn.tau.vio.plot <- ggplot() +
geom_violin(data = cver.tau.vio.df, aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 570, label = '[Early maturation]', size = 3) +
annotate('text', x = 10.5, y = 570, label = '[Delayed maturation]', size = 3) +
vio.plot.settings
cver.spawn.eta.vio.plot <- ggplot(data = cver.eta.vio.df) +
geom_violin(aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Spawner escapement (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
annotate('text', x = 2, y = 570, label = '[High mortality]', size = 3) +
annotate('text', x = 11, y = 570, label = '[Low mortality]', size = 3) +
vio.plot.settings
cver.spawnCV.tau.plot <- ggplot(data = cver.tau.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings
cver.spawnCV.eta.plot <- ggplot(data = cver.eta.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of spawner escapement') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
#harvest
cver.harvest.tau.vio.plot <- ggplot() +
geom_violin(data = cver.tau.vio.df, aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
scale_x_discrete(expand = c(0,0)) +
vio.plot.settings
cver.harvest.eta.vio.plot <- ggplot(data = cver.eta.vio.df) +
geom_violin(aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Harvest (thousands)', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
vio.plot.settings
cver.harvestCV.tau.plot <- ggplot(data = cver.tau.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
cver.harvestCV.eta.plot <- ggplot(data = cver.eta.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of harvest') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
ggarrange(cver.spawn.eta.vio.plot, cver.spawn.tau.vio.plot,
cver.spawnCV.eta.plot, cver.spawnCV.tau.plot,
cver.harvest.eta.vio.plot, cver.harvest.tau.vio.plot,
cver.harvestCV.eta.plot, cver.harvestCV.tau.plot,
nrow = 4, ncol = 2)
## FIGURE S14. Sensitivity to CV of recruitment stochasticity ------------------
load("cv_j_sa.RData")
cvj.vio.df <- NULL
for(i in 1:20){
assign(paste0("cv.j.",str_pad(as.character(i), width = 2, pad = "0"),".df"), model_summary(get(paste0("cv.j.",str_pad(as.character(i), width = 2, pad = "0")))))
assign(paste0("cv.j.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio"), violin_df(get(paste0("cv.j.",str_pad(as.character(i), width = 2, pad = "0"))), as.character(i)))
cvj.vio.df <- rbind(cvj.vio.df, get(paste0("cv.j.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio")))
}
cv.j.tau.df <- rbind(cv.j.01.df %>% mutate(climate='Contemporary', age_scen = 0.7),
cv.j.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
cv.j.05.df %>% mutate(climate='Contemporary', age_scen = 2.7),
cv.j.06.df %>% mutate(climate='Longer duration', age_scen = 0.9),
cv.j.08.df %>% mutate(climate='Longer duration', age_scen = 1.9),
cv.j.10.df %>% mutate(climate='Longer duration', age_scen = 2.9),
cv.j.11.df %>% mutate(climate='More frequent', age_scen = 1.1),
cv.j.13.df %>% mutate(climate='More frequent', age_scen = 2.1),
cv.j.15.df %>% mutate(climate='More frequent', age_scen = 3.1),
cv.j.16.df %>% mutate(climate='More intense', age_scen = 1.3),
cv.j.18.df %>% mutate(climate='More intense', age_scen = 2.3),
cv.j.20.df %>% mutate(climate='More intense', age_scen = 3.3))
age.scen.df1 <- data.frame(scenario = as.character(c(1,3,5,6,8,10,11,13,15,16,18,20)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
cv.j.tau.vio.df <- cvj.vio.df %>%
filter(scenario %in% as.character(c(1,3,5,6,8,10,11,13,15,16,18,20))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(1,3,5)), 'Contemporary',
ifelse(scenario %in% as.character(c(6,8,10)), 'Longer duration',
ifelse(scenario %in% as.character(c(11,13,15)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df1, by = "scenario")
cv.j.tau.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(cv.j.01.df$spawn.cv, cv.j.03.df$spawn.cv, cv.j.05.df$spawn.cv, cv.j.06.df$spawn.cv, cv.j.08.df$spawn.cv, cv.j.10.df$spawn.cv, cv.j.11.df$spawn.cv, cv.j.13.df$spawn.cv, cv.j.15.df$spawn.cv, cv.j.16.df$spawn.cv, cv.j.18.df$spawn.cv, cv.j.20.df$spawn.cv),
harvest_cv = c(cv.j.01.df$harvest.cv, cv.j.03.df$harvest.cv, cv.j.05.df$harvest.cv, cv.j.06.df$harvest.cv, cv.j.08.df$harvest.cv, cv.j.10.df$harvest.cv, cv.j.11.df$harvest.cv, cv.j.13.df$harvest.cv, cv.j.15.df$harvest.cv, cv.j.16.df$harvest.cv, cv.j.18.df$harvest.cv, cv.j.20.df$harvest.cv))
cv.j.eta.df <- rbind(cv.j.02.df %>% mutate(climate='Contemporary', age_scen = 0.7),
cv.j.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
cv.j.04.df %>% mutate(climate='Contemporary', age_scen = 2.7),
cv.j.07.df %>% mutate(climate='Longer duration', age_scen=0.9),
cv.j.08.df %>% mutate(climate='Longer duration', age_scen=1.9),
cv.j.09.df %>% mutate(climate='Longer duration', age_scen=2.9),
cv.j.12.df %>% mutate(climate='More frequent', age_scen=1.1),
cv.j.13.df %>% mutate(climate='More frequent', age_scen=2.1),
cv.j.14.df %>% mutate(climate='More frequent', age_scen=3.1),
cv.j.17.df %>% mutate(climate='More intense', age_scen=1.3),
cv.j.18.df %>% mutate(climate='More intense', age_scen=2.3),
cv.j.19.df %>% mutate(climate='More intense', age_scen=3.3))
age.scen.df2 <- data.frame(scenario = as.character(c(2,3,4,7,8,9,12,13,14,17,18,19)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
cv.j.eta.vio.df <- cvj.vio.df %>%
filter(scenario %in% as.character(c(2,3,4,7,8,9,12,13,14,17,18,19))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(2,3,4)), 'Contemporary',
ifelse(scenario %in% as.character(c(7,8,9)), 'Longer duration',
ifelse(scenario %in% as.character(c(12,13,14)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df2, by = "scenario")
cv.j.eta.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(cv.j.02.df$spawn.cv, cv.j.03.df$spawn.cv, cv.j.04.df$spawn.cv, cv.j.07.df$spawn.cv, cv.j.08.df$spawn.cv, cv.j.09.df$spawn.cv, cv.j.12.df$spawn.cv, cv.j.13.df$spawn.cv, cv.j.14.df$spawn.cv, cv.j.17.df$spawn.cv, cv.j.18.df$spawn.cv, cv.j.19.df$spawn.cv),
harvest_cv = c(cv.j.02.df$harvest.cv, cv.j.03.df$harvest.cv, cv.j.04.df$harvest.cv, cv.j.07.df$harvest.cv, cv.j.08.df$harvest.cv, cv.j.09.df$harvest.cv, cv.j.12.df$harvest.cv, cv.j.13.df$harvest.cv, cv.j.14.df$harvest.cv, cv.j.17.df$harvest.cv, cv.j.18.df$harvest.cv, cv.j.19.df$harvest.cv))
##
## CVJ PLOTS
##
vio.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.title = element_text(hjust = 0.5),
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.j.spawn.tau.vio.plot <- ggplot() +
geom_violin(data = cv.j.tau.vio.df, aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 570, label = '[Early maturation]', size = 3) +
annotate('text', x = 10.5, y = 570, label = '[Delayed maturation]', size = 3) +
vio.plot.settings
cv.j.spawn.eta.vio.plot <- ggplot(data = cv.j.eta.vio.df) +
geom_violin(aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Spawner escapement (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
annotate('text', x = 2, y = 570, label = '[High mortality]', size = 3) +
annotate('text', x = 11, y = 570, label = '[Low mortality]', size = 3) +
vio.plot.settings
cv.j.spawnCV.tau.plot <- ggplot(data = cv.j.tau.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings
cv.j.spawnCV.eta.plot <- ggplot(data = cv.j.eta.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of spawner escapement') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
#harvest
cv.j.harvest.tau.vio.plot <- ggplot() +
geom_violin(data = cv.j.tau.vio.df, aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
scale_x_discrete(expand = c(0,0)) +
vio.plot.settings
cv.j.harvest.eta.vio.plot <- ggplot(data = cv.j.eta.vio.df) +
geom_violin(aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Harvest (thousands)', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
vio.plot.settings
cv.j.harvestCV.tau.plot <- ggplot(data = cv.j.tau.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
cv.j.harvestCV.eta.plot <- ggplot(data = cv.j.eta.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of harvest') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
ggarrange(cv.j.spawn.eta.vio.plot, cv.j.spawn.tau.vio.plot,
cv.j.spawnCV.eta.plot, cv.j.spawnCV.tau.plot,
cv.j.harvest.eta.vio.plot, cv.j.harvest.tau.vio.plot,
cv.j.harvestCV.eta.plot, cv.j.harvestCV.tau.plot,
nrow = 4, ncol = 2)
## FIGURE S15. Sensitivity to mean NPGO effect -------------------
load("npgo_sa.RData")
npgo.vio.df <- NULL
for(i in 1:20){
assign(paste0("npgo.",str_pad(as.character(i), width = 2, pad = "0"),".df"), model_summary(get(paste0("npgo.",str_pad(as.character(i), width = 2, pad = "0")))))
assign(paste0("npgo.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio"), violin_df(get(paste0("npgo.",str_pad(as.character(i), width = 2, pad = "0"))), as.character(i)))
npgo.vio.df <- rbind(npgo.vio.df, get(paste0("npgo.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio")))
}
npgo.tau.df <- rbind(npgo.01.df %>% mutate(climate='Contemporary', age_scen = 0.7),
npgo.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
npgo.05.df %>% mutate(climate='Contemporary', age_scen = 2.7),
npgo.06.df %>% mutate(climate='Longer duration', age_scen = 0.9),
npgo.08.df %>% mutate(climate='Longer duration', age_scen = 1.9),
npgo.10.df %>% mutate(climate='Longer duration', age_scen = 2.9),
npgo.11.df %>% mutate(climate='More frequent', age_scen = 1.1),
npgo.13.df %>% mutate(climate='More frequent', age_scen = 2.1),
npgo.15.df %>% mutate(climate='More frequent', age_scen = 3.1),
npgo.16.df %>% mutate(climate='More intense', age_scen = 1.3),
npgo.18.df %>% mutate(climate='More intense', age_scen = 2.3),
npgo.20.df %>% mutate(climate='More intense', age_scen = 3.3))
age.scen.df1 <- data.frame(scenario = as.character(c(1,3,5,6,8,10,11,13,15,16,18,20)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
npgo.tau.vio.df <- npgo.vio.df %>%
filter(scenario %in% as.character(c(1,3,5,6,8,10,11,13,15,16,18,20))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(1,3,5)), 'Contemporary',
ifelse(scenario %in% as.character(c(6,8,10)), 'Longer duration',
ifelse(scenario %in% as.character(c(11,13,15)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df1, by = "scenario")
npgo.tau.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(npgo.01.df$spawn.cv, npgo.03.df$spawn.cv, npgo.05.df$spawn.cv, npgo.06.df$spawn.cv, npgo.08.df$spawn.cv, npgo.10.df$spawn.cv, npgo.11.df$spawn.cv, npgo.13.df$spawn.cv, npgo.15.df$spawn.cv, npgo.16.df$spawn.cv, npgo.18.df$spawn.cv, npgo.20.df$spawn.cv),
harvest_cv = c(npgo.01.df$harvest.cv, npgo.03.df$harvest.cv, npgo.05.df$harvest.cv, npgo.06.df$harvest.cv, npgo.08.df$harvest.cv, npgo.10.df$harvest.cv, npgo.11.df$harvest.cv, npgo.13.df$harvest.cv, npgo.15.df$harvest.cv, npgo.16.df$harvest.cv, npgo.18.df$harvest.cv, npgo.20.df$harvest.cv))
npgo.eta.df <- rbind(npgo.02.df %>% mutate(climate='Contemporary', age_scen = 0.7),
npgo.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
npgo.04.df %>% mutate(climate='Contemporary', age_scen = 2.7),
npgo.07.df %>% mutate(climate='Longer duration', age_scen=0.9),
npgo.08.df %>% mutate(climate='Longer duration', age_scen=1.9),
npgo.09.df %>% mutate(climate='Longer duration', age_scen=2.9),
npgo.12.df %>% mutate(climate='More frequent', age_scen=1.1),
npgo.13.df %>% mutate(climate='More frequent', age_scen=2.1),
npgo.14.df %>% mutate(climate='More frequent', age_scen=3.1),
npgo.17.df %>% mutate(climate='More intense', age_scen=1.3),
npgo.18.df %>% mutate(climate='More intense', age_scen=2.3),
npgo.19.df %>% mutate(climate='More intense', age_scen=3.3))
age.scen.df2 <- data.frame(scenario = as.character(c(2,3,4,7,8,9,12,13,14,17,18,19)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
npgo.eta.vio.df <- npgo.vio.df %>%
filter(scenario %in% as.character(c(2,3,4,7,8,9,12,13,14,17,18,19))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(2,3,4)), 'Contemporary',
ifelse(scenario %in% as.character(c(7,8,9)), 'Longer duration',
ifelse(scenario %in% as.character(c(12,13,14)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df2, by = "scenario")
npgo.eta.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(npgo.02.df$spawn.cv, npgo.03.df$spawn.cv, npgo.04.df$spawn.cv, npgo.07.df$spawn.cv, npgo.08.df$spawn.cv, npgo.09.df$spawn.cv, npgo.12.df$spawn.cv, npgo.13.df$spawn.cv, npgo.14.df$spawn.cv, npgo.17.df$spawn.cv, npgo.18.df$spawn.cv, npgo.19.df$spawn.cv),
harvest_cv = c(npgo.02.df$harvest.cv, npgo.03.df$harvest.cv, npgo.04.df$harvest.cv, npgo.07.df$harvest.cv, npgo.08.df$harvest.cv, npgo.09.df$harvest.cv, npgo.12.df$harvest.cv, npgo.13.df$harvest.cv, npgo.14.df$harvest.cv, npgo.17.df$harvest.cv, npgo.18.df$harvest.cv, npgo.19.df$harvest.cv))
##
## NPGO PLOTS
##
vio.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.title = element_text(hjust = 0.5),
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
npgo.spawn.tau.vio.plot <- ggplot() +
geom_violin(data = npgo.tau.vio.df, aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 570, label = '[Early maturation]', size = 3) +
annotate('text', x = 10.5, y = 570, label = '[Delayed maturation]', size = 3) +
vio.plot.settings
npgo.spawn.eta.vio.plot <- ggplot(data = npgo.eta.vio.df) +
geom_violin(aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Spawner escapement (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
annotate('text', x = 2, y = 570, label = '[High mortality]', size = 3) +
annotate('text', x = 11, y = 570, label = '[Low mortality]', size = 3) +
vio.plot.settings
npgo.spawnCV.tau.plot <- ggplot(data = npgo.tau.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings
npgo.spawnCV.eta.plot <- ggplot(data = npgo.eta.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of spawner escapement') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
#harvest
npgo.harvest.tau.vio.plot <- ggplot() +
geom_violin(data = npgo.tau.vio.df, aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
scale_x_discrete(expand = c(0,0)) +
vio.plot.settings
npgo.harvest.eta.vio.plot <- ggplot(data = npgo.eta.vio.df) +
geom_violin(aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Harvest (thousands)', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
vio.plot.settings
npgo.harvestCV.tau.plot <- ggplot(data = npgo.tau.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
npgo.harvestCV.eta.plot <- ggplot(data = npgo.eta.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of harvest') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
ggarrange(npgo.spawn.eta.vio.plot, npgo.spawn.tau.vio.plot,
npgo.spawnCV.eta.plot, npgo.spawnCV.tau.plot,
npgo.harvest.eta.vio.plot, npgo.harvest.tau.vio.plot,
npgo.harvestCV.eta.plot, npgo.harvestCV.tau.plot,
nrow = 4, ncol = 2)
## FIGURE S16. total escapement plots ----------
vio.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.title = element_text(hjust = 0.5),
text = element_text(size = 16),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
text = element_text(size = 16),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
total.run.tau.vio.plot <- ggplot() +
geom_violin(data = tau.vio.df, aes(x = age_scen, y = (spawn+harvest)/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 1100)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 1000, label = '[Early maturation]', size = 4) +
annotate('text', x = 10.5, y = 1000, label = '[Delayed maturation]', size = 4) +
vio.plot.settings
total.run.eta.vio.plot <- ggplot(data = eta.vio.df) +
geom_violin(aes(x = age_scen, y = (spawn+harvest)/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Total run size (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 1100)) +
annotate('text', x = 2, y = 1000, label = '[High mortality]', size = 4) +
annotate('text', x = 11, y = 1000, label = '[Low mortality]', size = 4) +
vio.plot.settings
total.run.CV.tau.plot <- ggplot(data = tau.cv.df) +
geom_point(aes(x = age_struct, y = totalrun_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
# scale_x_continuous(breaks = seq(1,3), labels = c(expression(tau[3]~"= 0.99"), 'Base case', expression(tau[3]~"= 0.25"))) +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.5, 0.65)) +
cv.plot.settings
total.run.CV.eta.plot <- ggplot(data = eta.cv.df) +
geom_point(aes(x = age_struct, y = totalrun_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of total run size') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.5, 0.65)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
totalrun.tau <- ggarrange(total.run.tau.vio.plot, total.run.CV.tau.plot, nrow=2, labels = c('b', 'd'))
totalrun.eta <- ggarrange(total.run.eta.vio.plot, total.run.CV.eta.plot, nrow=2, labels = c('a', 'c'))
totalrun.final <- ggarrange(totalrun.eta, totalrun.tau, ncol=2)
# 100-YEAR MODEL VALIDATION ---------------------------------------------------------------------------------------------
base.mod.df <- model_summary(mod.03)
# plots
sim.nums <- n.sim
base.mod1 <- base.mod #%>% filter(sim %in% sim.nums)
hundo.spawn <- ggplot() +
geom_line(data = base.mod1, aes(x = year, y = Spawn.est, group = sim), color = 'gray70', alpha = 0.3) +
# geom_line(data = base.mod2, aes(x = year, y = Spawn.est), color = 'black') +
# geom_line(aes(x = 1:26, y = catch.esc$total.esc), color = 'red') +
geom_hline(yintercept = base.mod.df$spawn.mean, color = 'black') +
geom_hline(yintercept = base.mod.df$spawn.median, color = 'black', lty = 'dashed') +
geom_hline(yintercept = mean(catch.esc$total.esc), color = 'blue') +
geom_hline(yintercept = median(catch.esc$total.esc), color = 'blue', lty = 'dashed') +
geom_hline(yintercept = 91500, color = 'red') +
# geom_hline(yintercept = 122000, color = 'red', lty = 'dashed') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(1,100)) +
scale_y_continuous(expand = c(0,0), limits = c(0, max(base.mod1$Spawn.est))) +
labs(x = 'Year', y = 'Total escapement') +
theme(plot.margin = unit(c(0.5,0.5,0.5,0.5), 'cm'))
hundo.harvest <- ggplot() +
geom_line(data = base.mod1, aes(x = year, y = harvest, group = sim), color = 'gray70', alpha = 0.3) +
# geom_line(data = base.mod2, aes(x = year, y = harvest), color = 'black') +
# geom_line(aes(x = 1:26, y = catch.esc$total.esc), color = 'red') +
geom_hline(yintercept = base.mod.df$harvest.mean, color = 'black') +
geom_hline(yintercept = base.mod.df$harvest.median, color = 'black', lty = 'dashed') +
geom_hline(yintercept = mean(catch.esc$total.ocean.harvest + catch.esc$river.harvest), color = 'blue') +
geom_hline(yintercept = median(catch.esc$total.ocean.harvest + catch.esc$river.harvest), color = 'blue', lty = 'dashed') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(1,100)) +
scale_y_continuous(expand = c(0,0), limits = c(0, max(base.mod1$harvest))) +
labs(x = 'Year', y = 'Harvest') +
theme(plot.margin = unit(c(0.5,0.5,0.5,0.5), 'cm'))
ggarrange(hundo.spawn, hundo.harvest, nrow = 2, ncol = 1)
# Check age-composition of spawners
hundo.age.comp <- data.frame(age = c('2','3','4','5'),
mean = as.numeric(base.mod %>% filter(year >= 30) %>% dplyr::select(spawn.2, spawn.3, spawn.4, spawn.5) %>% summarise(across(1:4, mean))))
hundo.age.comp$prop <- hundo.age.comp$mean/sum(hundo.age.comp$mean)
hundo.age.comp$source <- 'Simulated'
wills.data <- data.frame(age = c('1-2', '3', '4', '5+'),
prop = c(0.13, 0.65, 0.22, 0.003))
sim.age.com <- ggplot() +
geom_histogram(aes(x = hundo.age.comp$age, y = hundo.age.comp$prop), stat = 'identity') +
labs(x = 'Age', y = 'Proportion of spawners', title = 'Simulated') +
scale_y_continuous(expand = c(0,0), limits = c(0, 0.7)) +
theme_classic()
will.age.com <- ggplot() +
geom_histogram(aes(x = wills.data$age, y = wills.data$prop), stat = 'identity') +
labs(x = 'Age', y = 'Proportion of spawners', title = 'Satterthwaite et al. 2017') +
scale_y_continuous(expand = c(0,0), limits = c(0, 0.7)) +
theme_classic()
# Check age-composition of harvest
harv.age.comp <- data.frame(age = c('2','3','4','5'),
mean = as.numeric(base.mod %>% filter(year >= 30) %>% dplyr::select(harvest.2, harvest.3, harvest.4, harvest.5) %>% summarise(across(1:4, mean))))
harv.age.comp$prop <- (harv.age.comp$mean/sum(harv.age.comp$mean))
melodies.data <- data.frame(age = c('2','3','4','5','2','3','4','5'),
year = c(rep('1998 BY', times = 4), rep('1999 BY', times = 4)),
prop = c(0.06, 0.82, 0.17, 0.0005, 0.008, 0.632, 0.352, 0.004))
sim.harv.plot <- ggplot() +
geom_bar(aes(x = harv.age.comp$age, y = harv.age.comp$prop), stat = 'identity') +
labs(x = 'Age', y = 'Proportio of harvest', title = 'Simulated') +
scale_y_continuous(expand = c(0,0), limits = c(0, 0.9), breaks = seq(0, 0.8, by = 0.2)) +
theme_classic()
melodi.harv.plot <- ggplot() +
geom_bar(aes(x = melodies.data$age, y = melodies.data$prop, fill = melodies.data$year), stat = 'identity', position = 'dodge') +
scale_fill_manual("legend", values = c("1998 BY" = "grey35", "1999 BY" = "grey")) +
scale_y_continuous(expand = c(0,0), limits = c(0, 0.9), breaks = seq(0, 0.8, by = 0.2)) +
labs(x = 'Age', y = 'Proportion of Feather River hatchery ocean impacts', title = 'Palmer-Zwahlen et al. 2006') +
theme_classic() +
theme(legend.title = element_blank(), legend.position = c(0.8, 0.8))
ggarrange(sim.age.com, will.age.com, sim.harv.plot, melodi.harv.plot, nrow=2, ncol=2)
# Check autocorrelation
tmp.acf <- acf(catch.esc$total.esc, 6)
tmp.acf <- data.frame(lag = 0:6, acf = tmp.acf$acf)
tmp.acf3 <- NULL
for(i in 1:n.sim){
tmp.acf1 <- base.mod %>% filter(sim == paste0('s',i)) %>% filter(year < 100 & year > 29)
tmp.acf2 <- acf(tmp.acf1$Spawn.est, 6, plot=FALSE)
tmp.acf2 <- data.frame(lag = 0:6, acf = tmp.acf2$acf)
tmp.acf3 <- rbind(tmp.acf3, tmp.acf2)
}
tmp4 <- tmp.acf3 %>% group_by(lag) %>% summarise(acf = mean(acf))
plot(tmp4$lag, tmp4$acf)
| /plots.R | no_license | CVFC-MSE/age_climate_model | R | false | false | 65,592 | r | # Main figures
rm(list = ls())
# Load libraries and data -------------------------------------------------------------------------------------------------------
library(ggplot2)
library(tidyverse)
library(ggpubr)
load('age_flow_summary.RData')
# Organize data -----------------------------------------------------------------------------------------------------------------
# Organize data for alternative maturation rate scenarios
tau.df <- rbind(mod01.df %>% mutate(climate='Contemporary', age_scen = 0.7),
mod03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
mod05.df %>% mutate(climate='Contemporary', age_scen = 2.7),
mod06.df %>% mutate(climate='Longer duration', age_scen = 0.9),
mod08.df %>% mutate(climate='Longer duration', age_scen = 1.9),
mod10.df %>% mutate(climate='Longer duration', age_scen = 2.9),
mod11.df %>% mutate(climate='More frequent', age_scen = 1.1),
mod13.df %>% mutate(climate='More frequent', age_scen = 2.1),
mod15.df %>% mutate(climate='More frequent', age_scen = 3.1),
mod16.df %>% mutate(climate='More intense', age_scen = 1.3),
mod18.df %>% mutate(climate='More intense', age_scen = 2.3),
mod20.df %>% mutate(climate='More intense', age_scen = 3.3))
age.scen.df1 <- data.frame(scenario = as.character(c(1,3,5,6,8,10,11,13,15,16,18,20)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
tau.vio.df <- vio.df %>%
filter(scenario %in% as.character(c(1,3,5,6,8,10,11,13,15,16,18,20))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(1,3,5)), 'Contemporary',
ifelse(scenario %in% as.character(c(6,8,10)), 'Longer duration',
ifelse(scenario %in% as.character(c(11,13,15)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df1, by = "scenario")
tau.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(mod01.df$spawn.cv, mod03.df$spawn.cv, mod05.df$spawn.cv, mod06.df$spawn.cv, mod08.df$spawn.cv, mod10.df$spawn.cv, mod11.df$spawn.cv, mod13.df$spawn.cv, mod15.df$spawn.cv, mod16.df$spawn.cv, mod18.df$spawn.cv, mod20.df$spawn.cv),
harvest_cv = c(mod01.df$harvest.cv, mod03.df$harvest.cv, mod05.df$harvest.cv, mod06.df$harvest.cv, mod08.df$harvest.cv, mod10.df$harvest.cv, mod11.df$harvest.cv, mod13.df$harvest.cv, mod15.df$harvest.cv, mod16.df$harvest.cv, mod18.df$harvest.cv, mod20.df$harvest.cv),
totalrun_cv = c(mod01.df$total.run.cv, mod03.df$total.run.cv, mod05.df$total.run.cv, mod06.df$total.run.cv, mod08.df$total.run.cv, mod10.df$total.run.cv, mod11.df$total.run.cv, mod13.df$total.run.cv, mod15.df$total.run.cv, mod16.df$total.run.cv, mod18.df$total.run.cv, mod20.df$total.run.cv))
# Organize data for alternative natural mortality rate scenarios
eta.df <- rbind(mod02.df %>% mutate(climate='Contemporary', age_scen = 0.7),
mod03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
mod04.df %>% mutate(climate='Contemporary', age_scen = 2.7),
mod07.df %>% mutate(climate='Longer duration', age_scen=0.9),
mod08.df %>% mutate(climate='Longer duration', age_scen=1.9),
mod09.df %>% mutate(climate='Longer duration', age_scen=2.9),
mod12.df %>% mutate(climate='More frequent', age_scen=1.1),
mod13.df %>% mutate(climate='More frequent', age_scen=2.1),
mod14.df %>% mutate(climate='More frequent', age_scen=3.1),
mod17.df %>% mutate(climate='More intense', age_scen=1.3),
mod18.df %>% mutate(climate='More intense', age_scen=2.3),
mod19.df %>% mutate(climate='More intense', age_scen=3.3))
age.scen.df2 <- data.frame(scenario = as.character(c(2,3,4,7,8,9,12,13,14,17,18,19)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
eta.vio.df <- vio.df %>%
filter(scenario %in% as.character(c(2,3,4,7,8,9,12,13,14,17,18,19))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(2,3,4)), 'Contemporary',
ifelse(scenario %in% as.character(c(7,8,9)), 'Longer duration',
ifelse(scenario %in% as.character(c(12,13,14)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df2, by = "scenario")
eta.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(mod02.df$spawn.cv, mod03.df$spawn.cv, mod04.df$spawn.cv, mod07.df$spawn.cv, mod08.df$spawn.cv, mod09.df$spawn.cv, mod12.df$spawn.cv, mod13.df$spawn.cv, mod14.df$spawn.cv, mod17.df$spawn.cv, mod18.df$spawn.cv, mod19.df$spawn.cv),
harvest_cv = c(mod02.df$harvest.cv, mod03.df$harvest.cv, mod04.df$harvest.cv, mod07.df$harvest.cv, mod08.df$harvest.cv, mod09.df$harvest.cv, mod12.df$harvest.cv, mod13.df$harvest.cv, mod14.df$harvest.cv, mod17.df$harvest.cv, mod18.df$harvest.cv, mod19.df$harvest.cv),
totalrun_cv = c(mod01.df$total.run.cv, mod03.df$total.run.cv, mod05.df$total.run.cv, mod06.df$total.run.cv, mod08.df$total.run.cv, mod10.df$total.run.cv, mod11.df$total.run.cv, mod13.df$total.run.cv, mod15.df$total.run.cv, mod16.df$total.run.cv, mod18.df$total.run.cv, mod20.df$total.run.cv))
### OVERFISHING TAU AND ETA
tau.mods <- c(1,3,5,6,8,10,11,13,15,16,18,20)
tau.overfished.df <- NULL
for(index in 1:12){
i <- tau.mods[index]
tmp.name <- paste0('mod',stringr::str_pad(i, 2, pad = '0'),'.overfished')
tmp.of <- get(tmp.name)
tmp.of <- tmp.of %>%
mutate(scenario = as.character(i)) %>%
mutate(climate = ifelse(scenario %in% c(1,3,5), 'Contemporary',
ifelse(scenario %in% c(6,8,10), 'Longer duration',
ifelse(scenario %in% c(11,13,15), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df1, by = 'scenario')
tau.overfished.df <- rbind(tau.overfished.df, tmp.of)
}
eta.mods <- c(2,3,4,7,8,9,12,13,14,17,18,19)
eta.overfished.df <- NULL
for(index in 1:12){
i <- eta.mods[index]
tmp.name <- paste0('mod',stringr::str_pad(i, 2, pad = '0'),'.overfished')
tmp.of <- get(tmp.name)
tmp.of <- tmp.of %>%
mutate(scenario = as.character(i)) %>%
mutate(climate = ifelse(scenario %in% c(2,3,4), 'Contemporary',
ifelse(scenario %in% c(7,8,9), 'Longer duration',
ifelse(scenario %in% c(12,13,14), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df2, by = 'scenario')
eta.overfished.df <- rbind(eta.overfished.df, tmp.of)
}
# MAIN FIGURES ----------------------------
## FIGURE 1. Harvest control rule -----------------------------------------------------------------------------
library(ggplot2)
library(ggpubr)
tmp.si <- seq(0, 500000, length.out = 1000)
tmp.er <- sapply(tmp.si, control.rule)
plot1 <- ggplot() +
geom_line(aes(x = tmp.si/1000, y = tmp.er), size = 1) +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0), limits = c(0, 0.8)) +
labs(x = 'Sacramento Index (thousands)', y = 'Allowable exploitation rate') +
theme_classic() +
theme(text = element_text(size = 16), plot.margin = unit(c(0.5,1,0.5,0.5), 'cm'))
## FIGURE 3. Spawner escapement violin plots and CV --------------------------------------------------------------------------------
vio.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.title = element_text(hjust = 0.5),
text = element_text(size = 16),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
text = element_text(size = 16),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
spawn.tau.vio.plot <- ggplot() +
geom_violin(data = tau.vio.df, aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 570, label = '[Early maturation]', size = 5) +
annotate('text', x = 10.5, y = 570, label = '[Delayed maturation]', size = 5) +
vio.plot.settings
spawn.eta.vio.plot <- ggplot(data = eta.vio.df) +
geom_violin(aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Spawner escapement (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
annotate('text', x = 2, y = 570, label = '[High mortality]', size = 5) +
annotate('text', x = 11, y = 570, label = '[Low mortality]', size = 5) +
vio.plot.settings
spawnCV.tau.plot <- ggplot(data = tau.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
# scale_x_continuous(breaks = seq(1,3), labels = c(expression(tau[3]~"= 0.99"), 'Base case', expression(tau[3]~"= 0.25"))) +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
# scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings
spawnCV.eta.plot <- ggplot(data = eta.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of spawner escapement') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
# scale_y_continuous(limits=c(0.60, 0.7)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
spawn.tau <- ggarrange(spawn.tau.vio.plot, spawnCV.tau.plot, nrow=2, labels = c('b', 'd'))
spawn.eta <- ggarrange(spawn.eta.vio.plot, spawnCV.eta.plot, nrow=2, labels = c('a', 'c'))
spawn.final <- ggarrange(spawn.eta, spawn.tau, ncol=2)
## FIGURE 4. Harvest violin plots and CV -------------------------------------------------------------------------------------------
harvest.tau.vio.plot <- ggplot() +
geom_violin(data = tau.vio.df, aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 650, label = '[Early maturation]', size = 4) +
annotate('text', x = 10.5, y = 650, label = '[Delayed maturation]', size = 4) +
vio.plot.settings
harvest.eta.vio.plot <- ggplot(data = eta.vio.df) +
geom_violin(aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Harvest (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
annotate('text', x = 2, y = 650, label = '[High mortality]', size = 4) +
annotate('text', x = 11, y = 650, label = '[Low mortality]', size = 4) +
vio.plot.settings
harvestCV.tau.plot <- ggplot(data = tau.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
# scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
harvestCV.eta.plot <- ggplot(data = eta.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of harvest') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
# scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
harvest.tau <- ggarrange(harvest.tau.vio.plot, harvestCV.tau.plot, nrow=2, labels = c('b', 'd'))
harvest.eta <- ggarrange(harvest.eta.vio.plot, harvestCV.eta.plot, nrow=2, labels = c('a', 'c'))
harvest.final <- ggarrange(harvest.eta, harvest.tau, ncol=2)
## FIGURE 5. Percent overfished status boxplot -------------------------------------------------------------------------------------
of.theme.settings <- theme(legend.position = 'none',
legend.title = element_blank(),
legend.text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 16),
axis.ticks.x = element_blank(),
axis.text.x = element_text(hjust = 0, size = 16),
axis.text.y = element_text(size = 16),
axis.title.y = element_text(size = 16),
axis.title.x = element_text(size = 16),
plot.margin = unit(c(0.7,0.5,0,0.5), "cm"))
of.tau.plot <- ggplot() +
geom_boxplot(data = tau.overfished.df, aes(x = age_scen, y = prop.overfished*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '', title = 'Maturation') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.25, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.25, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.25, yend = 0) +
scale_y_continuous(expand = c(0, 0)) +
coord_cartesian(ylim = c(0,40), clip = "off") +
annotate('text', x = 2.5, y = 39, label = '[Early maturation]', size = 5) +
annotate('text', x = 10.5, y = 39, label = '[Delayed maturation]', size = 5) +
of.theme.settings
of.eta.plot <- ggplot() +
geom_boxplot (data = eta.overfished.df, aes(x = age_scen, y = prop.overfished*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '% overfished status', title = 'Natural mortality') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.25, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.25, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.25, yend = 0) +
scale_y_continuous(expand = c(0, 0)) +
coord_cartesian(ylim = c(0,40), clip = "off") +
annotate('text', x = 2, y = 39, label = '[High mortality]', size = 5) +
annotate('text', x = 11, y = 39, label = '[Low mortality]', size = 5) +
of.theme.settings +
theme(legend.position = c(0.85,0.75))
ggarrange(of.eta.plot, of.tau.plot, labels = c('a','b'))
## FIGURE 6. Percent fishery restrictions ------------------------------------------------------------------------------------------
c.plot.settings <- theme(legend.position = 'none',
plot.title = element_text(hjust = 0.5, size = 15),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
axis.title = element_text(size = 15),
text = element_text(size = 15),
plot.margin = unit(c(0.5,0.2,0,0.5), "cm"))
prop70.tau.plot <- ggplot() +
geom_boxplot(data = tau.overfished.df, aes(x = age_scen, y = prop.70*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = 49, yend = 50) +
annotate('segment', x = 6.5, xend = 6.5, y = 49, yend = 50) +
annotate('segment', x = 10.5, xend = 10.5, y = 49, yend = 50) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(50,105), clip = "off") +
annotate('text', x = 2.5, y = 100, label = '[Early maturation]', size = 4) +
annotate('text', x = 10.5, y = 100, label = '[Delayed maturation]', size = 4) +
c.plot.settings
prop70.eta.plot <- ggplot() +
geom_boxplot(data = eta.overfished.df, aes(x = age_scen, y = prop.70*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = expression(paste("% ",italic(c)," = 0.7")), title = 'Natural mortality') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = 49, yend = 50) +
annotate('segment', x = 6.5, xend = 6.5, y = 49, yend = 50) +
annotate('segment', x = 10.5, xend = 10.5, y = 49, yend = 50) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(50,105), clip = "off") +
annotate('text', x = 2.5, y = 100, label = '[High mortality]', size = 4) +
annotate('text', x = 10.5, y = 100, label = '[Low mortality]', size = 4) +
c.plot.settings
prop25.tau.plot <- ggplot() +
geom_boxplot(data = tau.overfished.df, aes(x = age_scen, y = prop.25*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = '') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.25, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.25, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.25, yend = 0) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(0,15), clip = "off") +
c.plot.settings
prop25.eta.plot <- ggplot() +
geom_boxplot(data = eta.overfished.df, aes(x = age_scen, y = prop.25*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = expression(paste("% ",italic(c)," = 0.25")), title = '') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.25, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.25, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.25, yend = 0) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(0,15), clip = "off") +
c.plot.settings +
theme(legend.position = c(0.8,0.9),
legend.title = element_blank())
prop10.tau.plot <- ggplot() +
geom_boxplot(data = tau.overfished.df, aes(x = age_scen, y = prop.10*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '', title = '') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.12, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.12, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.12, yend = 0) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(0,10), clip = "off") +
c.plot.settings +
theme(axis.text.x = element_text(size = 15, hjust = 0))
prop10.eta.plot <- ggplot() +
geom_boxplot(data = eta.overfished.df, aes(x = age_scen, y = prop.10*100, fill = climate), outlier.shape = NA) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = expression(paste("% ",italic(c)," = 0.10")), title = '') +
scale_x_discrete(breaks = c('0.9','1.9','2.9'), labels = c("Low","Base","High")) +
annotate('segment', x = 2.5, xend = 2.5, y = -0.12, yend = 0) +
annotate('segment', x = 6.5, xend = 6.5, y = -0.12, yend = 0) +
annotate('segment', x = 10.5, xend = 10.5, y = -0.12, yend = 0) +
scale_y_continuous(expand = c(0,0)) +
coord_cartesian(ylim = c(0,10), clip = "off") +
c.plot.settings +
theme(axis.text.x = element_text(size = 15, hjust = 0))
ggarrange(prop70.eta.plot,prop70.tau.plot,
prop25.eta.plot,prop25.tau.plot,
prop10.eta.plot,prop10.tau.plot,
nrow = 3, ncol = 2, labels = c('a','b','c','d','e','f'))
# SUPPLEMENTAL FIGURES -----------------------------
## FIGURE S10. Simulated hydrographs -------------------------------------------
hydro.df <- data.frame(year = seq(1,n.yr),
base = flow.sim(100, 'base', flow.full),
duration = flow.sim(100, 'longer duration', flow.full),
frequency = flow.sim(100, 'more frequent', flow.full),
intensity = flow.sim(100, 'more intense', flow.full))
hydro.plot.settings <- theme(axis.text = element_text(size = 14),
axis.title = element_text(size = 14),
title = element_text(size = 14),
plot.margin = unit(c(0.5,0,0,0.1),'cm'))
hydro.contemporary <- ggplot() +
geom_line(data = hydro.df, aes(x = year, y = base), lwd = 0.5) +
geom_segment(aes(x = 0, xend = 100, y = 10712, yend = 10712), lwd = 1, lty = 'dashed') +
geom_segment(aes(x = 0, xend = 100, y = 4295, yend = 4295), lwd = 1, lty = 'dashed') +
labs(x = '', y = 'Flow (csf)', title = 'Contemporary') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(0, 120), breaks = seq(0,100,20)) +
annotate('text', x = n.yr+8, y = 10712, label = paste0(as.character((sum(hydro.df$base<10712)/n.yr)*100),'%'), size = 6) +
annotate('text', x = n.yr+8, y = 4295, label = paste0(as.character((sum(hydro.df$base<4295)/n.yr)*100),'%'), size = 6) +
hydro.plot.settings
hydro.duration <- ggplot() +
geom_line(data = hydro.df, aes(x = year, y = duration), lwd = 0.5) +
geom_segment(aes(x = 0, xend = 100, y = 10712, yend = 10712), lwd = 1, lty = 'dashed') +
geom_segment(aes(x = 0, xend = 100, y = 4295, yend = 4295), lwd = 1, lty = 'dashed') +
labs(x = '', y = '', title = 'Longer duration') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(0, 120), breaks = seq(0,100,20)) +
annotate('text', x = n.yr+8, y = 10712, label = paste0(as.character((sum(hydro.df$duration<10712)/n.yr)*100),'%'), size = 6) +
annotate('text', x = n.yr+8, y = 4295, label = paste0(as.character((sum(hydro.df$duration<4295)/n.yr)*100),'%'), size = 6) +
hydro.plot.settings
hydro.frequency <- ggplot() +
geom_line(data = hydro.df, aes(x = year, y = frequency), lwd = 0.5) +
geom_segment(aes(x = 0, xend = 100, y = 10712, yend = 10712), lwd = 1, lty = 'dashed') +
geom_segment(aes(x = 0, xend = 100, y = 4295, yend = 4295), lwd = 1, lty = 'dashed') +
labs(x = 'Simulation year', y = 'Flow (csf)', title = 'More frequent') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(0, 120), breaks = seq(0,100,20)) +
annotate('text', x = n.yr+8, y = 10712, label = paste0(as.character((sum(hydro.df$frequency<10712)/n.yr)*100),'%'), size = 6) +
annotate('text', x = n.yr+8, y = 4295, label = paste0(as.character((sum(hydro.df$frequency<4295)/n.yr)*100),'%'), size = 6) +
hydro.plot.settings
hydro.intensity <- ggplot() +
geom_line(data = hydro.df, aes(x = year, y = intensity), lwd = 0.5) +
geom_segment(aes(x = 0, xend = 100, y = 10712, yend = 10712), lwd = 1, lty = 'dashed') +
geom_segment(aes(x = 0, xend = 100, y = 4295, yend = 4295), lwd = 1, lty = 'dashed') +
labs(x = 'Simulation year', y = '', title = 'More intense') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(0, 120), breaks = seq(0,100,20)) +
annotate('text', x = n.yr+8, y = 10712, label = paste0(as.character((sum(hydro.df$intensity<10712)/n.yr)*100),'%'), size = 6) +
annotate('text', x = n.yr+8, y = 4295, label = paste0(as.character((sum(hydro.df$intensity<4295)/n.yr)*100),'%'), size = 6) +
hydro.plot.settings
ggarrange(hydro.contemporary,hydro.duration, hydro.frequency, hydro.intensity)
## FIGURE S13. Sensitivity to the CV of realized harvest rate ---------------------------------
load("cv_er_sa.RData")
cver.vio.df <- NULL
for(i in 1:20){
assign(paste0("cver.sa.",str_pad(as.character(i), width = 2, pad = "0"),".df"), model_summary(get(paste0("cver.sa.",str_pad(as.character(i), width = 2, pad = "0")))))
assign(paste0("cver.sa.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio"), violin_df(get(paste0("cver.sa.",str_pad(as.character(i), width = 2, pad = "0"))), as.character(i)))
cver.vio.df <- rbind(cver.vio.df, get(paste0("cver.sa.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio")))
}
cver.tau.df <- rbind(cver.sa.01.df %>% mutate(climate='Contemporary', age_scen = 0.7),
cver.sa.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
cver.sa.05.df %>% mutate(climate='Contemporary', age_scen = 2.7),
cver.sa.06.df %>% mutate(climate='Longer duration', age_scen = 0.9),
cver.sa.08.df %>% mutate(climate='Longer duration', age_scen = 1.9),
cver.sa.10.df %>% mutate(climate='Longer duration', age_scen = 2.9),
cver.sa.11.df %>% mutate(climate='More frequent', age_scen = 1.1),
cver.sa.13.df %>% mutate(climate='More frequent', age_scen = 2.1),
cver.sa.15.df %>% mutate(climate='More frequent', age_scen = 3.1),
cver.sa.16.df %>% mutate(climate='More intense', age_scen = 1.3),
cver.sa.18.df %>% mutate(climate='More intense', age_scen = 2.3),
cver.sa.20.df %>% mutate(climate='More intense', age_scen = 3.3))
age.scen.df1 <- data.frame(scenario = as.character(c(1,3,5,6,8,10,11,13,15,16,18,20)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
cver.tau.vio.df <- cver.vio.df %>%
filter(scenario %in% as.character(c(1,3,5,6,8,10,11,13,15,16,18,20))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(1,3,5)), 'Contemporary',
ifelse(scenario %in% as.character(c(6,8,10)), 'Longer duration',
ifelse(scenario %in% as.character(c(11,13,15)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df1, by = "scenario")
cver.tau.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(cver.sa.01.df$spawn.cv, cver.sa.03.df$spawn.cv, cver.sa.05.df$spawn.cv, cver.sa.06.df$spawn.cv, cver.sa.08.df$spawn.cv, cver.sa.10.df$spawn.cv, cver.sa.11.df$spawn.cv, cver.sa.13.df$spawn.cv, cver.sa.15.df$spawn.cv, cver.sa.16.df$spawn.cv, cver.sa.18.df$spawn.cv, cver.sa.20.df$spawn.cv),
harvest_cv = c(cver.sa.01.df$harvest.cv, cver.sa.03.df$harvest.cv, cver.sa.05.df$harvest.cv, cver.sa.06.df$harvest.cv, cver.sa.08.df$harvest.cv, cver.sa.10.df$harvest.cv, cver.sa.11.df$harvest.cv, cver.sa.13.df$harvest.cv, cver.sa.15.df$harvest.cv, cver.sa.16.df$harvest.cv, cver.sa.18.df$harvest.cv, cver.sa.20.df$harvest.cv))
cver.eta.df <- rbind(cver.sa.02.df %>% mutate(climate='Contemporary', age_scen = 0.7),
cver.sa.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
cver.sa.04.df %>% mutate(climate='Contemporary', age_scen = 2.7),
cver.sa.07.df %>% mutate(climate='Longer duration', age_scen=0.9),
cver.sa.08.df %>% mutate(climate='Longer duration', age_scen=1.9),
cver.sa.09.df %>% mutate(climate='Longer duration', age_scen=2.9),
cver.sa.12.df %>% mutate(climate='More frequent', age_scen=1.1),
cver.sa.13.df %>% mutate(climate='More frequent', age_scen=2.1),
cver.sa.14.df %>% mutate(climate='More frequent', age_scen=3.1),
cver.sa.17.df %>% mutate(climate='More intense', age_scen=1.3),
cver.sa.18.df %>% mutate(climate='More intense', age_scen=2.3),
cver.sa.19.df %>% mutate(climate='More intense', age_scen=3.3))
age.scen.df2 <- data.frame(scenario = as.character(c(2,3,4,7,8,9,12,13,14,17,18,19)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
cver.eta.vio.df <- cver.vio.df %>%
filter(scenario %in% as.character(c(2,3,4,7,8,9,12,13,14,17,18,19))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(2,3,4)), 'Contemporary',
ifelse(scenario %in% as.character(c(7,8,9)), 'Longer duration',
ifelse(scenario %in% as.character(c(12,13,14)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df2, by = "scenario")
cver.eta.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(cver.sa.02.df$spawn.cv, cver.sa.03.df$spawn.cv, cver.sa.04.df$spawn.cv, cver.sa.07.df$spawn.cv, cver.sa.08.df$spawn.cv, cver.sa.09.df$spawn.cv, cver.sa.12.df$spawn.cv, cver.sa.13.df$spawn.cv, cver.sa.14.df$spawn.cv, cver.sa.17.df$spawn.cv, cver.sa.18.df$spawn.cv, cver.sa.19.df$spawn.cv),
harvest_cv = c(cver.sa.02.df$harvest.cv, cver.sa.03.df$harvest.cv, cver.sa.04.df$harvest.cv, cver.sa.07.df$harvest.cv, cver.sa.08.df$harvest.cv, cver.sa.09.df$harvest.cv, cver.sa.12.df$harvest.cv, cver.sa.13.df$harvest.cv, cver.sa.14.df$harvest.cv, cver.sa.17.df$harvest.cv, cver.sa.18.df$harvest.cv, cver.sa.19.df$harvest.cv))
##
## CVER PLOTS
##
vio.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.title = element_text(hjust = 0.5),
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cver.spawn.tau.vio.plot <- ggplot() +
geom_violin(data = cver.tau.vio.df, aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 570, label = '[Early maturation]', size = 3) +
annotate('text', x = 10.5, y = 570, label = '[Delayed maturation]', size = 3) +
vio.plot.settings
cver.spawn.eta.vio.plot <- ggplot(data = cver.eta.vio.df) +
geom_violin(aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Spawner escapement (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
annotate('text', x = 2, y = 570, label = '[High mortality]', size = 3) +
annotate('text', x = 11, y = 570, label = '[Low mortality]', size = 3) +
vio.plot.settings
cver.spawnCV.tau.plot <- ggplot(data = cver.tau.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings
cver.spawnCV.eta.plot <- ggplot(data = cver.eta.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of spawner escapement') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
#harvest
cver.harvest.tau.vio.plot <- ggplot() +
geom_violin(data = cver.tau.vio.df, aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
scale_x_discrete(expand = c(0,0)) +
vio.plot.settings
cver.harvest.eta.vio.plot <- ggplot(data = cver.eta.vio.df) +
geom_violin(aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Harvest (thousands)', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
vio.plot.settings
cver.harvestCV.tau.plot <- ggplot(data = cver.tau.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
cver.harvestCV.eta.plot <- ggplot(data = cver.eta.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of harvest') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
ggarrange(cver.spawn.eta.vio.plot, cver.spawn.tau.vio.plot,
cver.spawnCV.eta.plot, cver.spawnCV.tau.plot,
cver.harvest.eta.vio.plot, cver.harvest.tau.vio.plot,
cver.harvestCV.eta.plot, cver.harvestCV.tau.plot,
nrow = 4, ncol = 2)
## FIGURE S14. Sensitivity to CV of recruitment stochasticity ------------------
load("cv_j_sa.RData")
cvj.vio.df <- NULL
for(i in 1:20){
assign(paste0("cv.j.",str_pad(as.character(i), width = 2, pad = "0"),".df"), model_summary(get(paste0("cv.j.",str_pad(as.character(i), width = 2, pad = "0")))))
assign(paste0("cv.j.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio"), violin_df(get(paste0("cv.j.",str_pad(as.character(i), width = 2, pad = "0"))), as.character(i)))
cvj.vio.df <- rbind(cvj.vio.df, get(paste0("cv.j.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio")))
}
cv.j.tau.df <- rbind(cv.j.01.df %>% mutate(climate='Contemporary', age_scen = 0.7),
cv.j.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
cv.j.05.df %>% mutate(climate='Contemporary', age_scen = 2.7),
cv.j.06.df %>% mutate(climate='Longer duration', age_scen = 0.9),
cv.j.08.df %>% mutate(climate='Longer duration', age_scen = 1.9),
cv.j.10.df %>% mutate(climate='Longer duration', age_scen = 2.9),
cv.j.11.df %>% mutate(climate='More frequent', age_scen = 1.1),
cv.j.13.df %>% mutate(climate='More frequent', age_scen = 2.1),
cv.j.15.df %>% mutate(climate='More frequent', age_scen = 3.1),
cv.j.16.df %>% mutate(climate='More intense', age_scen = 1.3),
cv.j.18.df %>% mutate(climate='More intense', age_scen = 2.3),
cv.j.20.df %>% mutate(climate='More intense', age_scen = 3.3))
age.scen.df1 <- data.frame(scenario = as.character(c(1,3,5,6,8,10,11,13,15,16,18,20)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
cv.j.tau.vio.df <- cvj.vio.df %>%
filter(scenario %in% as.character(c(1,3,5,6,8,10,11,13,15,16,18,20))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(1,3,5)), 'Contemporary',
ifelse(scenario %in% as.character(c(6,8,10)), 'Longer duration',
ifelse(scenario %in% as.character(c(11,13,15)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df1, by = "scenario")
cv.j.tau.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(cv.j.01.df$spawn.cv, cv.j.03.df$spawn.cv, cv.j.05.df$spawn.cv, cv.j.06.df$spawn.cv, cv.j.08.df$spawn.cv, cv.j.10.df$spawn.cv, cv.j.11.df$spawn.cv, cv.j.13.df$spawn.cv, cv.j.15.df$spawn.cv, cv.j.16.df$spawn.cv, cv.j.18.df$spawn.cv, cv.j.20.df$spawn.cv),
harvest_cv = c(cv.j.01.df$harvest.cv, cv.j.03.df$harvest.cv, cv.j.05.df$harvest.cv, cv.j.06.df$harvest.cv, cv.j.08.df$harvest.cv, cv.j.10.df$harvest.cv, cv.j.11.df$harvest.cv, cv.j.13.df$harvest.cv, cv.j.15.df$harvest.cv, cv.j.16.df$harvest.cv, cv.j.18.df$harvest.cv, cv.j.20.df$harvest.cv))
cv.j.eta.df <- rbind(cv.j.02.df %>% mutate(climate='Contemporary', age_scen = 0.7),
cv.j.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
cv.j.04.df %>% mutate(climate='Contemporary', age_scen = 2.7),
cv.j.07.df %>% mutate(climate='Longer duration', age_scen=0.9),
cv.j.08.df %>% mutate(climate='Longer duration', age_scen=1.9),
cv.j.09.df %>% mutate(climate='Longer duration', age_scen=2.9),
cv.j.12.df %>% mutate(climate='More frequent', age_scen=1.1),
cv.j.13.df %>% mutate(climate='More frequent', age_scen=2.1),
cv.j.14.df %>% mutate(climate='More frequent', age_scen=3.1),
cv.j.17.df %>% mutate(climate='More intense', age_scen=1.3),
cv.j.18.df %>% mutate(climate='More intense', age_scen=2.3),
cv.j.19.df %>% mutate(climate='More intense', age_scen=3.3))
age.scen.df2 <- data.frame(scenario = as.character(c(2,3,4,7,8,9,12,13,14,17,18,19)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
cv.j.eta.vio.df <- cvj.vio.df %>%
filter(scenario %in% as.character(c(2,3,4,7,8,9,12,13,14,17,18,19))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(2,3,4)), 'Contemporary',
ifelse(scenario %in% as.character(c(7,8,9)), 'Longer duration',
ifelse(scenario %in% as.character(c(12,13,14)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df2, by = "scenario")
cv.j.eta.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(cv.j.02.df$spawn.cv, cv.j.03.df$spawn.cv, cv.j.04.df$spawn.cv, cv.j.07.df$spawn.cv, cv.j.08.df$spawn.cv, cv.j.09.df$spawn.cv, cv.j.12.df$spawn.cv, cv.j.13.df$spawn.cv, cv.j.14.df$spawn.cv, cv.j.17.df$spawn.cv, cv.j.18.df$spawn.cv, cv.j.19.df$spawn.cv),
harvest_cv = c(cv.j.02.df$harvest.cv, cv.j.03.df$harvest.cv, cv.j.04.df$harvest.cv, cv.j.07.df$harvest.cv, cv.j.08.df$harvest.cv, cv.j.09.df$harvest.cv, cv.j.12.df$harvest.cv, cv.j.13.df$harvest.cv, cv.j.14.df$harvest.cv, cv.j.17.df$harvest.cv, cv.j.18.df$harvest.cv, cv.j.19.df$harvest.cv))
##
## CVJ PLOTS
##
vio.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.title = element_text(hjust = 0.5),
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.j.spawn.tau.vio.plot <- ggplot() +
geom_violin(data = cv.j.tau.vio.df, aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 570, label = '[Early maturation]', size = 3) +
annotate('text', x = 10.5, y = 570, label = '[Delayed maturation]', size = 3) +
vio.plot.settings
cv.j.spawn.eta.vio.plot <- ggplot(data = cv.j.eta.vio.df) +
geom_violin(aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Spawner escapement (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
annotate('text', x = 2, y = 570, label = '[High mortality]', size = 3) +
annotate('text', x = 11, y = 570, label = '[Low mortality]', size = 3) +
vio.plot.settings
cv.j.spawnCV.tau.plot <- ggplot(data = cv.j.tau.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings
cv.j.spawnCV.eta.plot <- ggplot(data = cv.j.eta.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of spawner escapement') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
#harvest
cv.j.harvest.tau.vio.plot <- ggplot() +
geom_violin(data = cv.j.tau.vio.df, aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
scale_x_discrete(expand = c(0,0)) +
vio.plot.settings
cv.j.harvest.eta.vio.plot <- ggplot(data = cv.j.eta.vio.df) +
geom_violin(aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Harvest (thousands)', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
vio.plot.settings
cv.j.harvestCV.tau.plot <- ggplot(data = cv.j.tau.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
cv.j.harvestCV.eta.plot <- ggplot(data = cv.j.eta.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of harvest') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
ggarrange(cv.j.spawn.eta.vio.plot, cv.j.spawn.tau.vio.plot,
cv.j.spawnCV.eta.plot, cv.j.spawnCV.tau.plot,
cv.j.harvest.eta.vio.plot, cv.j.harvest.tau.vio.plot,
cv.j.harvestCV.eta.plot, cv.j.harvestCV.tau.plot,
nrow = 4, ncol = 2)
## FIGURE S15. Sensitivity to mean NPGO effect -------------------
load("npgo_sa.RData")
npgo.vio.df <- NULL
for(i in 1:20){
assign(paste0("npgo.",str_pad(as.character(i), width = 2, pad = "0"),".df"), model_summary(get(paste0("npgo.",str_pad(as.character(i), width = 2, pad = "0")))))
assign(paste0("npgo.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio"), violin_df(get(paste0("npgo.",str_pad(as.character(i), width = 2, pad = "0"))), as.character(i)))
npgo.vio.df <- rbind(npgo.vio.df, get(paste0("npgo.",str_pad(as.character(i), width = 2, pad = "0"),".df.vio")))
}
npgo.tau.df <- rbind(npgo.01.df %>% mutate(climate='Contemporary', age_scen = 0.7),
npgo.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
npgo.05.df %>% mutate(climate='Contemporary', age_scen = 2.7),
npgo.06.df %>% mutate(climate='Longer duration', age_scen = 0.9),
npgo.08.df %>% mutate(climate='Longer duration', age_scen = 1.9),
npgo.10.df %>% mutate(climate='Longer duration', age_scen = 2.9),
npgo.11.df %>% mutate(climate='More frequent', age_scen = 1.1),
npgo.13.df %>% mutate(climate='More frequent', age_scen = 2.1),
npgo.15.df %>% mutate(climate='More frequent', age_scen = 3.1),
npgo.16.df %>% mutate(climate='More intense', age_scen = 1.3),
npgo.18.df %>% mutate(climate='More intense', age_scen = 2.3),
npgo.20.df %>% mutate(climate='More intense', age_scen = 3.3))
age.scen.df1 <- data.frame(scenario = as.character(c(1,3,5,6,8,10,11,13,15,16,18,20)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
npgo.tau.vio.df <- npgo.vio.df %>%
filter(scenario %in% as.character(c(1,3,5,6,8,10,11,13,15,16,18,20))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(1,3,5)), 'Contemporary',
ifelse(scenario %in% as.character(c(6,8,10)), 'Longer duration',
ifelse(scenario %in% as.character(c(11,13,15)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df1, by = "scenario")
npgo.tau.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(npgo.01.df$spawn.cv, npgo.03.df$spawn.cv, npgo.05.df$spawn.cv, npgo.06.df$spawn.cv, npgo.08.df$spawn.cv, npgo.10.df$spawn.cv, npgo.11.df$spawn.cv, npgo.13.df$spawn.cv, npgo.15.df$spawn.cv, npgo.16.df$spawn.cv, npgo.18.df$spawn.cv, npgo.20.df$spawn.cv),
harvest_cv = c(npgo.01.df$harvest.cv, npgo.03.df$harvest.cv, npgo.05.df$harvest.cv, npgo.06.df$harvest.cv, npgo.08.df$harvest.cv, npgo.10.df$harvest.cv, npgo.11.df$harvest.cv, npgo.13.df$harvest.cv, npgo.15.df$harvest.cv, npgo.16.df$harvest.cv, npgo.18.df$harvest.cv, npgo.20.df$harvest.cv))
npgo.eta.df <- rbind(npgo.02.df %>% mutate(climate='Contemporary', age_scen = 0.7),
npgo.03.df %>% mutate(climate='Contemporary', age_scen = 1.7),
npgo.04.df %>% mutate(climate='Contemporary', age_scen = 2.7),
npgo.07.df %>% mutate(climate='Longer duration', age_scen=0.9),
npgo.08.df %>% mutate(climate='Longer duration', age_scen=1.9),
npgo.09.df %>% mutate(climate='Longer duration', age_scen=2.9),
npgo.12.df %>% mutate(climate='More frequent', age_scen=1.1),
npgo.13.df %>% mutate(climate='More frequent', age_scen=2.1),
npgo.14.df %>% mutate(climate='More frequent', age_scen=3.1),
npgo.17.df %>% mutate(climate='More intense', age_scen=1.3),
npgo.18.df %>% mutate(climate='More intense', age_scen=2.3),
npgo.19.df %>% mutate(climate='More intense', age_scen=3.3))
age.scen.df2 <- data.frame(scenario = as.character(c(2,3,4,7,8,9,12,13,14,17,18,19)),
age_scen = as.character(c(0.7,1.7,2.7,0.9,1.9,2.9,1.1,2.1,3.1,1.3,2.3,3.3)))
npgo.eta.vio.df <- npgo.vio.df %>%
filter(scenario %in% as.character(c(2,3,4,7,8,9,12,13,14,17,18,19))) %>%
mutate(climate = ifelse(scenario %in% as.character(c(2,3,4)), 'Contemporary',
ifelse(scenario %in% as.character(c(7,8,9)), 'Longer duration',
ifelse(scenario %in% as.character(c(12,13,14)), 'More frequent',
'More intense')))) %>%
left_join(., age.scen.df2, by = "scenario")
npgo.eta.cv.df <- data.frame(climate_scenario = rep(c('Contemporary','Duration','Frequency','Intensity'), each=3),
age_struct = c(seq(0.7,2.7,by=1),seq(0.9,2.9,by=1),seq(1.1,3.1,by=1),seq(1.3,3.3,by=1)),
spawn_cv = c(npgo.02.df$spawn.cv, npgo.03.df$spawn.cv, npgo.04.df$spawn.cv, npgo.07.df$spawn.cv, npgo.08.df$spawn.cv, npgo.09.df$spawn.cv, npgo.12.df$spawn.cv, npgo.13.df$spawn.cv, npgo.14.df$spawn.cv, npgo.17.df$spawn.cv, npgo.18.df$spawn.cv, npgo.19.df$spawn.cv),
harvest_cv = c(npgo.02.df$harvest.cv, npgo.03.df$harvest.cv, npgo.04.df$harvest.cv, npgo.07.df$harvest.cv, npgo.08.df$harvest.cv, npgo.09.df$harvest.cv, npgo.12.df$harvest.cv, npgo.13.df$harvest.cv, npgo.14.df$harvest.cv, npgo.17.df$harvest.cv, npgo.18.df$harvest.cv, npgo.19.df$harvest.cv))
##
## NPGO PLOTS
##
vio.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.title = element_text(hjust = 0.5),
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
text = element_text(size = 12),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
npgo.spawn.tau.vio.plot <- ggplot() +
geom_violin(data = npgo.tau.vio.df, aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 570, label = '[Early maturation]', size = 3) +
annotate('text', x = 10.5, y = 570, label = '[Delayed maturation]', size = 3) +
vio.plot.settings
npgo.spawn.eta.vio.plot <- ggplot(data = npgo.eta.vio.df) +
geom_violin(aes(x = age_scen, y = spawn/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Spawner escapement (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 600)) +
annotate('text', x = 2, y = 570, label = '[High mortality]', size = 3) +
annotate('text', x = 11, y = 570, label = '[Low mortality]', size = 3) +
vio.plot.settings
npgo.spawnCV.tau.plot <- ggplot(data = npgo.tau.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings
npgo.spawnCV.eta.plot <- ggplot(data = npgo.eta.cv.df) +
geom_point(aes(x = age_struct, y = spawn_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of spawner escapement') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.65, 0.8)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
#harvest
npgo.harvest.tau.vio.plot <- ggplot() +
geom_violin(data = npgo.tau.vio.df, aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
scale_x_discrete(expand = c(0,0)) +
vio.plot.settings
npgo.harvest.eta.vio.plot <- ggplot(data = npgo.eta.vio.df) +
geom_violin(aes(x = age_scen, y = harvest/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Harvest (thousands)', title = '') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 700), breaks = seq(0,700,100)) +
vio.plot.settings
npgo.harvestCV.tau.plot <- ggplot(data = npgo.tau.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
npgo.harvestCV.eta.plot <- ggplot(data = npgo.eta.cv.df) +
geom_point(aes(x = age_struct, y = harvest_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of harvest') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.7, 0.825), breaks = seq(0.7,0.825,0.025)) +
cv.plot.settings
ggarrange(npgo.spawn.eta.vio.plot, npgo.spawn.tau.vio.plot,
npgo.spawnCV.eta.plot, npgo.spawnCV.tau.plot,
npgo.harvest.eta.vio.plot, npgo.harvest.tau.vio.plot,
npgo.harvestCV.eta.plot, npgo.harvestCV.tau.plot,
nrow = 4, ncol = 2)
## FIGURE S16. total escapement plots ----------
vio.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.title = element_text(hjust = 0.5),
text = element_text(size = 16),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
cv.plot.settings <- theme(legend.title = element_blank(),
legend.position = 'none',
text = element_text(size = 16),
plot.margin = unit(c(0.5,0,0,0.7),'cm'))
total.run.tau.vio.plot <- ggplot() +
geom_violin(data = tau.vio.df, aes(x = age_scen, y = (spawn+harvest)/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey75", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = '', title = 'Maturation') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 1100)) +
scale_x_discrete(expand = c(0,0)) +
annotate('text', x = 2.5, y = 1000, label = '[Early maturation]', size = 4) +
annotate('text', x = 10.5, y = 1000, label = '[Delayed maturation]', size = 4) +
vio.plot.settings
total.run.eta.vio.plot <- ggplot(data = eta.vio.df) +
geom_violin(aes(x = age_scen, y = (spawn+harvest)/1000, fill = climate), draw_quantiles = 0.5) +
scale_fill_manual(values = c("grey", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = '', y = 'Total run size (thousands)', title = 'Natural mortality') +
scale_y_continuous(expand = c(0, 0), limits = c(0, 1100)) +
annotate('text', x = 2, y = 1000, label = '[High mortality]', size = 4) +
annotate('text', x = 11, y = 1000, label = '[Low mortality]', size = 4) +
vio.plot.settings
total.run.CV.tau.plot <- ggplot(data = tau.cv.df) +
geom_point(aes(x = age_struct, y = totalrun_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = '') +
# scale_x_continuous(breaks = seq(1,3), labels = c(expression(tau[3]~"= 0.99"), 'Base case', expression(tau[3]~"= 0.25"))) +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.5, 0.65)) +
cv.plot.settings
total.run.CV.eta.plot <- ggplot(data = eta.cv.df) +
geom_point(aes(x = age_struct, y = totalrun_cv, color = climate_scenario), size = 3) +
scale_color_manual(values = c("black", "#E69F00", "#56B4E9", "#009E73")) +
theme_classic() +
labs(x = 'Age structure scenario', y = 'CV of total run size') +
scale_x_continuous(breaks = seq(1,3), labels = c('Low', 'Base case', 'High')) +
scale_y_continuous(limits=c(0.5, 0.65)) +
cv.plot.settings +
theme(legend.position = c(0.8, 0.9))
totalrun.tau <- ggarrange(total.run.tau.vio.plot, total.run.CV.tau.plot, nrow=2, labels = c('b', 'd'))
totalrun.eta <- ggarrange(total.run.eta.vio.plot, total.run.CV.eta.plot, nrow=2, labels = c('a', 'c'))
totalrun.final <- ggarrange(totalrun.eta, totalrun.tau, ncol=2)
# 100-YEAR MODEL VALIDATION ---------------------------------------------------------------------------------------------
base.mod.df <- model_summary(mod.03)
# plots
sim.nums <- n.sim
base.mod1 <- base.mod #%>% filter(sim %in% sim.nums)
hundo.spawn <- ggplot() +
geom_line(data = base.mod1, aes(x = year, y = Spawn.est, group = sim), color = 'gray70', alpha = 0.3) +
# geom_line(data = base.mod2, aes(x = year, y = Spawn.est), color = 'black') +
# geom_line(aes(x = 1:26, y = catch.esc$total.esc), color = 'red') +
geom_hline(yintercept = base.mod.df$spawn.mean, color = 'black') +
geom_hline(yintercept = base.mod.df$spawn.median, color = 'black', lty = 'dashed') +
geom_hline(yintercept = mean(catch.esc$total.esc), color = 'blue') +
geom_hline(yintercept = median(catch.esc$total.esc), color = 'blue', lty = 'dashed') +
geom_hline(yintercept = 91500, color = 'red') +
# geom_hline(yintercept = 122000, color = 'red', lty = 'dashed') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(1,100)) +
scale_y_continuous(expand = c(0,0), limits = c(0, max(base.mod1$Spawn.est))) +
labs(x = 'Year', y = 'Total escapement') +
theme(plot.margin = unit(c(0.5,0.5,0.5,0.5), 'cm'))
hundo.harvest <- ggplot() +
geom_line(data = base.mod1, aes(x = year, y = harvest, group = sim), color = 'gray70', alpha = 0.3) +
# geom_line(data = base.mod2, aes(x = year, y = harvest), color = 'black') +
# geom_line(aes(x = 1:26, y = catch.esc$total.esc), color = 'red') +
geom_hline(yintercept = base.mod.df$harvest.mean, color = 'black') +
geom_hline(yintercept = base.mod.df$harvest.median, color = 'black', lty = 'dashed') +
geom_hline(yintercept = mean(catch.esc$total.ocean.harvest + catch.esc$river.harvest), color = 'blue') +
geom_hline(yintercept = median(catch.esc$total.ocean.harvest + catch.esc$river.harvest), color = 'blue', lty = 'dashed') +
theme_classic() +
scale_x_continuous(expand = c(0,0), limits = c(1,100)) +
scale_y_continuous(expand = c(0,0), limits = c(0, max(base.mod1$harvest))) +
labs(x = 'Year', y = 'Harvest') +
theme(plot.margin = unit(c(0.5,0.5,0.5,0.5), 'cm'))
ggarrange(hundo.spawn, hundo.harvest, nrow = 2, ncol = 1)
# Check age-composition of spawners
hundo.age.comp <- data.frame(age = c('2','3','4','5'),
mean = as.numeric(base.mod %>% filter(year >= 30) %>% dplyr::select(spawn.2, spawn.3, spawn.4, spawn.5) %>% summarise(across(1:4, mean))))
hundo.age.comp$prop <- hundo.age.comp$mean/sum(hundo.age.comp$mean)
hundo.age.comp$source <- 'Simulated'
wills.data <- data.frame(age = c('1-2', '3', '4', '5+'),
prop = c(0.13, 0.65, 0.22, 0.003))
sim.age.com <- ggplot() +
geom_histogram(aes(x = hundo.age.comp$age, y = hundo.age.comp$prop), stat = 'identity') +
labs(x = 'Age', y = 'Proportion of spawners', title = 'Simulated') +
scale_y_continuous(expand = c(0,0), limits = c(0, 0.7)) +
theme_classic()
will.age.com <- ggplot() +
geom_histogram(aes(x = wills.data$age, y = wills.data$prop), stat = 'identity') +
labs(x = 'Age', y = 'Proportion of spawners', title = 'Satterthwaite et al. 2017') +
scale_y_continuous(expand = c(0,0), limits = c(0, 0.7)) +
theme_classic()
# Check age-composition of harvest
harv.age.comp <- data.frame(age = c('2','3','4','5'),
mean = as.numeric(base.mod %>% filter(year >= 30) %>% dplyr::select(harvest.2, harvest.3, harvest.4, harvest.5) %>% summarise(across(1:4, mean))))
harv.age.comp$prop <- (harv.age.comp$mean/sum(harv.age.comp$mean))
melodies.data <- data.frame(age = c('2','3','4','5','2','3','4','5'),
year = c(rep('1998 BY', times = 4), rep('1999 BY', times = 4)),
prop = c(0.06, 0.82, 0.17, 0.0005, 0.008, 0.632, 0.352, 0.004))
sim.harv.plot <- ggplot() +
geom_bar(aes(x = harv.age.comp$age, y = harv.age.comp$prop), stat = 'identity') +
labs(x = 'Age', y = 'Proportio of harvest', title = 'Simulated') +
scale_y_continuous(expand = c(0,0), limits = c(0, 0.9), breaks = seq(0, 0.8, by = 0.2)) +
theme_classic()
melodi.harv.plot <- ggplot() +
geom_bar(aes(x = melodies.data$age, y = melodies.data$prop, fill = melodies.data$year), stat = 'identity', position = 'dodge') +
scale_fill_manual("legend", values = c("1998 BY" = "grey35", "1999 BY" = "grey")) +
scale_y_continuous(expand = c(0,0), limits = c(0, 0.9), breaks = seq(0, 0.8, by = 0.2)) +
labs(x = 'Age', y = 'Proportion of Feather River hatchery ocean impacts', title = 'Palmer-Zwahlen et al. 2006') +
theme_classic() +
theme(legend.title = element_blank(), legend.position = c(0.8, 0.8))
ggarrange(sim.age.com, will.age.com, sim.harv.plot, melodi.harv.plot, nrow=2, ncol=2)
# Check autocorrelation
tmp.acf <- acf(catch.esc$total.esc, 6)
tmp.acf <- data.frame(lag = 0:6, acf = tmp.acf$acf)
tmp.acf3 <- NULL
for(i in 1:n.sim){
tmp.acf1 <- base.mod %>% filter(sim == paste0('s',i)) %>% filter(year < 100 & year > 29)
tmp.acf2 <- acf(tmp.acf1$Spawn.est, 6, plot=FALSE)
tmp.acf2 <- data.frame(lag = 0:6, acf = tmp.acf2$acf)
tmp.acf3 <- rbind(tmp.acf3, tmp.acf2)
}
tmp4 <- tmp.acf3 %>% group_by(lag) %>% summarise(acf = mean(acf))
plot(tmp4$lag, tmp4$acf)
|
library(data.table)
library(ggplot2)
library(RColorBrewer)
rm(list=ls())
theme_set(theme_minimal(base_size = 18))
main_dir <- file.path(Sys.getenv("HOME"),
"Dropbox (IDM)/Malaria Team Folder/projects/map_intervention_impact/lookup_tables/interactions")
plot_dir <- file.path(Sys.getenv("HOME"),
"Dropbox (IDM)/Malaria Team Folder/projects/map_intervention_impact/",
"writing_and_presentations/ii_paper/symposium/images/pdfs_from_r")
get_smooth <- function(x, y){
if (max(y)<0.05){
return(y)
}else{
lo <- loess(y[y>0]~x[y>0])
predictions <- c(y[y==0], predict(lo))
return(pmax(predictions, rep(0, length(predictions))))
}
}
anthro_endo_map <- data.table(Site_Name=c("aba", "kananga", "kasama", "djibo", "gode", "moine", "bajonapo", "karen"),
anthro=c(74.45, 65.02, 79.04, 76.6, 75, 75.78, 50, 50),
endo=c(80, 85, 80.38, 55.6, 50, 52.73, 60, 24.6),
map_color=c("#00A08A", "#D71B5A", "#F2AD00", "#F98400", "#902E57", "#5392C2", "#7DB548", "#8971B3"))
anthro_endo_map[, human_indoor:= round((anthro*endo)/100, 1)]
atsb_runs <- c("MAP_For_Symposium_ATSB_Higher_Existing_Intervention.csv",
"MAP_For_Symposium_ATSB_Lower_Intervention.csv",
"MAP_For_Symposium_ATSB_Lower_Existing_Intervention.csv",
"MAP_For_Symposium_ATSB_No_Existing_Intervention.csv")
initial <- fread(file.path(main_dir, "../initial/MAP_II_New_Sites_Burnin.csv"))
prelim_data <- rbindlist(lapply(atsb_runs, function(fname){fread(file.path(main_dir, fname))}))
all_data <- merge(prelim_data[ATSB_Initial_Effect<=0.05], initial, by=c("Site_Name", "Run_Number", "x_Temporary_Larval_Habitat"), all=T)
all_data[, Run_Number:=factor(Run_Number)]
all_data[, Intervention:= paste0("Baseline:", ITN_Coverage*100, "%, ", "ATSB Initial Kill:", ATSB_Initial_Effect*100, "%")]
all_data[, mean_initial:= mean(initial_prev), by=list(Site_Name, x_Temporary_Larval_Habitat, Intervention)]
all_data[, mean_final:=mean(final_prev), by=list(Site_Name, x_Temporary_Larval_Habitat, Intervention)]
all_data = merge(all_data, anthro_endo_map, by="Site_Name", all.x=T)
minmaxes <- all_data[, list(mean_initial=unique(mean_initial),
mean_final=unique(mean_final),
min_final=min(final_prev),
max_final=max(final_prev)),
by=list(Site_Name, x_Temporary_Larval_Habitat, Intervention)]
minmaxes_smooth <- lapply(unique(minmaxes$Site_Name), function(site_name){
sub_list <- lapply(unique(minmaxes$Intervention), function(int_name){
subset <- minmaxes[Site_Name==site_name & Intervention==int_name]
subset[, smooth_min:= get_smooth(mean_initial, min_final)]
subset[, smooth_max:= get_smooth(mean_initial, max_final)]
subset[, smooth_mean:= get_smooth(mean_initial, mean_final)]
})
sub_list <- rbindlist(sub_list)
})
minmaxes_smooth <- rbindlist(minmaxes_smooth)
all_data <- merge(all_data, minmaxes_smooth, by=c("Site_Name", "x_Temporary_Larval_Habitat", "Intervention",
"mean_initial", "mean_final"), all=T)
all_data[, human_indoor:=as.factor(human_indoor)]
these_colors <- unique(all_data[!Site_Name %in% c("karen", "bajonapo"), list(human_indoor, map_color)])
these_colors <- these_colors[order(human_indoor)]$map_color
x_temps <- unique(all_data$x_Temporary_Larval_Habitat)
pdf(file.path(plot_dir, "overview.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 & !Site_Name %in% c("karen", "bajonapo")], aes(x=mean_initial, y=mean_final)) +
geom_abline(size=1.5, alpha=0.5)+
geom_ribbon(aes(ymin=smooth_min, ymax=smooth_max, fill=human_indoor, group=Site_Name), alpha=0.25) +
geom_line(aes(color=human_indoor, group=Site_Name), size=1.25) +
scale_color_manual(values=these_colors, name="Indoor Biting %") +
scale_fill_manual(values=these_colors, name="Indoor Biting %") +
xlim(0,0.85) +
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir,"overview_points.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 & !Site_Name %in% c("karen", "bajonapo")], aes(x=initial_prev, y=final_prev)) +
geom_abline(size=1.5, alpha=0.5)+
geom_point(aes(color=human_indoor, group=Site_Name), size=1.5, alpha=0.75) +
scale_color_manual(values=these_colors, name="Indoor Biting %") +
xlim(0,0.85) +
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir,"twosite_points.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 & Site_Name %in% c("aba", "gode")], aes(x=initial_prev, y=final_prev)) +
geom_abline(size=1.5, alpha=0.5)+
geom_point(aes(color=human_indoor, group=Site_Name), size=1.5, alpha=0.75) +
scale_color_manual(values=c("#902E57","#00A08A"), name="Indoor Biting %") +
xlim(0,0.85) +
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir, "aba_point_lower_init.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 &
Site_Name=="aba" & x_Temporary_Larval_Habitat==x_temps[20]], aes(x=initial_prev, y=final_prev)) +
geom_abline(size=1.5, alpha=0.5)+
geom_point(aes(color=human_indoor, group=Run_Number), size=1.5, alpha=0.75) +
scale_color_manual(values=unique(all_data[Site_Name=="aba"]$map_color), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir, "aba_point_higher_init.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 &
Site_Name=="aba" & x_Temporary_Larval_Habitat==x_temps[30]], aes(x=initial_prev, y=final_prev)) +
geom_abline(size=1.5, alpha=0.5)+
geom_point(aes(color=human_indoor, group=Run_Number), size=1.5, alpha=0.75) +
scale_color_manual(values=unique(all_data[Site_Name=="aba"]$map_color), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir, "aba_point_all.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 &
Site_Name=="aba"], aes(x=initial_prev, y=final_prev)) +
geom_abline(size=1.5, alpha=0.5)+
geom_point(aes(color=human_indoor, group=Run_Number), size=1.5, alpha=0.75) +
scale_color_manual(values=unique(all_data[Site_Name=="aba"]$map_color), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir, "aba_line.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 &
Site_Name=="aba"], aes(x=mean_initial, y=mean_final)) +
geom_abline(size=1.5, alpha=0.5)+
geom_line(aes(color=human_indoor), size=1.25) +
geom_ribbon(aes(ymin=smooth_min, ymax=smooth_max, fill=human_indoor), alpha=0.25) +
scale_color_manual(values=unique(all_data[Site_Name=="aba"]$map_color), name="Indoor Biting %") +
scale_fill_manual(values=unique(all_data[Site_Name=="aba"]$map_color), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir, "baseline_40.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0], aes(x=mean_initial, y=mean_final)) +
geom_abline(size=1.5, alpha=0.5)+
geom_ribbon(aes(ymin=smooth_min, ymax=smooth_max, fill=human_indoor, group=Site_Name), alpha=0.25) +
geom_line(aes(color=human_indoor, group=Site_Name), size=1.25) +
scale_color_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
scale_fill_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="right") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence")
graphics.off()
pdf(file.path(plot_dir, "atsb_5.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0 & ATSB_Initial_Effect==0.05], aes(x=mean_initial, y=mean_final)) +
geom_abline(size=1.5, alpha=0.5)+
geom_ribbon(aes(ymin=smooth_min, ymax=smooth_max, fill=human_indoor, group=Site_Name), alpha=0.25) +
geom_line(aes(color=human_indoor, group=Site_Name), size=1.25) +
scale_color_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
scale_fill_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="right") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence")
graphics.off()
pdf(file.path(plot_dir, "baseline_40_atsb_5.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0.05], aes(x=mean_initial, y=mean_final)) +
geom_abline(size=1.5, alpha=0.5)+
geom_ribbon(aes(ymin=smooth_min, ymax=smooth_max, fill=human_indoor, group=Site_Name), alpha=0.25) +
geom_line(aes(color=human_indoor, group=Site_Name), size=1.25) +
scale_color_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
scale_fill_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="right") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence")
graphics.off()
| /intervention_impact/visualize_results/older_plotting/plots_for_symposium.r | no_license | InstituteforDiseaseModeling/archetypes-intervention-impact | R | false | false | 10,111 | r | library(data.table)
library(ggplot2)
library(RColorBrewer)
rm(list=ls())
theme_set(theme_minimal(base_size = 18))
main_dir <- file.path(Sys.getenv("HOME"),
"Dropbox (IDM)/Malaria Team Folder/projects/map_intervention_impact/lookup_tables/interactions")
plot_dir <- file.path(Sys.getenv("HOME"),
"Dropbox (IDM)/Malaria Team Folder/projects/map_intervention_impact/",
"writing_and_presentations/ii_paper/symposium/images/pdfs_from_r")
get_smooth <- function(x, y){
if (max(y)<0.05){
return(y)
}else{
lo <- loess(y[y>0]~x[y>0])
predictions <- c(y[y==0], predict(lo))
return(pmax(predictions, rep(0, length(predictions))))
}
}
anthro_endo_map <- data.table(Site_Name=c("aba", "kananga", "kasama", "djibo", "gode", "moine", "bajonapo", "karen"),
anthro=c(74.45, 65.02, 79.04, 76.6, 75, 75.78, 50, 50),
endo=c(80, 85, 80.38, 55.6, 50, 52.73, 60, 24.6),
map_color=c("#00A08A", "#D71B5A", "#F2AD00", "#F98400", "#902E57", "#5392C2", "#7DB548", "#8971B3"))
anthro_endo_map[, human_indoor:= round((anthro*endo)/100, 1)]
atsb_runs <- c("MAP_For_Symposium_ATSB_Higher_Existing_Intervention.csv",
"MAP_For_Symposium_ATSB_Lower_Intervention.csv",
"MAP_For_Symposium_ATSB_Lower_Existing_Intervention.csv",
"MAP_For_Symposium_ATSB_No_Existing_Intervention.csv")
initial <- fread(file.path(main_dir, "../initial/MAP_II_New_Sites_Burnin.csv"))
prelim_data <- rbindlist(lapply(atsb_runs, function(fname){fread(file.path(main_dir, fname))}))
all_data <- merge(prelim_data[ATSB_Initial_Effect<=0.05], initial, by=c("Site_Name", "Run_Number", "x_Temporary_Larval_Habitat"), all=T)
all_data[, Run_Number:=factor(Run_Number)]
all_data[, Intervention:= paste0("Baseline:", ITN_Coverage*100, "%, ", "ATSB Initial Kill:", ATSB_Initial_Effect*100, "%")]
all_data[, mean_initial:= mean(initial_prev), by=list(Site_Name, x_Temporary_Larval_Habitat, Intervention)]
all_data[, mean_final:=mean(final_prev), by=list(Site_Name, x_Temporary_Larval_Habitat, Intervention)]
all_data = merge(all_data, anthro_endo_map, by="Site_Name", all.x=T)
minmaxes <- all_data[, list(mean_initial=unique(mean_initial),
mean_final=unique(mean_final),
min_final=min(final_prev),
max_final=max(final_prev)),
by=list(Site_Name, x_Temporary_Larval_Habitat, Intervention)]
minmaxes_smooth <- lapply(unique(minmaxes$Site_Name), function(site_name){
sub_list <- lapply(unique(minmaxes$Intervention), function(int_name){
subset <- minmaxes[Site_Name==site_name & Intervention==int_name]
subset[, smooth_min:= get_smooth(mean_initial, min_final)]
subset[, smooth_max:= get_smooth(mean_initial, max_final)]
subset[, smooth_mean:= get_smooth(mean_initial, mean_final)]
})
sub_list <- rbindlist(sub_list)
})
minmaxes_smooth <- rbindlist(minmaxes_smooth)
all_data <- merge(all_data, minmaxes_smooth, by=c("Site_Name", "x_Temporary_Larval_Habitat", "Intervention",
"mean_initial", "mean_final"), all=T)
all_data[, human_indoor:=as.factor(human_indoor)]
these_colors <- unique(all_data[!Site_Name %in% c("karen", "bajonapo"), list(human_indoor, map_color)])
these_colors <- these_colors[order(human_indoor)]$map_color
x_temps <- unique(all_data$x_Temporary_Larval_Habitat)
pdf(file.path(plot_dir, "overview.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 & !Site_Name %in% c("karen", "bajonapo")], aes(x=mean_initial, y=mean_final)) +
geom_abline(size=1.5, alpha=0.5)+
geom_ribbon(aes(ymin=smooth_min, ymax=smooth_max, fill=human_indoor, group=Site_Name), alpha=0.25) +
geom_line(aes(color=human_indoor, group=Site_Name), size=1.25) +
scale_color_manual(values=these_colors, name="Indoor Biting %") +
scale_fill_manual(values=these_colors, name="Indoor Biting %") +
xlim(0,0.85) +
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir,"overview_points.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 & !Site_Name %in% c("karen", "bajonapo")], aes(x=initial_prev, y=final_prev)) +
geom_abline(size=1.5, alpha=0.5)+
geom_point(aes(color=human_indoor, group=Site_Name), size=1.5, alpha=0.75) +
scale_color_manual(values=these_colors, name="Indoor Biting %") +
xlim(0,0.85) +
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir,"twosite_points.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 & Site_Name %in% c("aba", "gode")], aes(x=initial_prev, y=final_prev)) +
geom_abline(size=1.5, alpha=0.5)+
geom_point(aes(color=human_indoor, group=Site_Name), size=1.5, alpha=0.75) +
scale_color_manual(values=c("#902E57","#00A08A"), name="Indoor Biting %") +
xlim(0,0.85) +
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir, "aba_point_lower_init.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 &
Site_Name=="aba" & x_Temporary_Larval_Habitat==x_temps[20]], aes(x=initial_prev, y=final_prev)) +
geom_abline(size=1.5, alpha=0.5)+
geom_point(aes(color=human_indoor, group=Run_Number), size=1.5, alpha=0.75) +
scale_color_manual(values=unique(all_data[Site_Name=="aba"]$map_color), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir, "aba_point_higher_init.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 &
Site_Name=="aba" & x_Temporary_Larval_Habitat==x_temps[30]], aes(x=initial_prev, y=final_prev)) +
geom_abline(size=1.5, alpha=0.5)+
geom_point(aes(color=human_indoor, group=Run_Number), size=1.5, alpha=0.75) +
scale_color_manual(values=unique(all_data[Site_Name=="aba"]$map_color), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir, "aba_point_all.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 &
Site_Name=="aba"], aes(x=initial_prev, y=final_prev)) +
geom_abline(size=1.5, alpha=0.5)+
geom_point(aes(color=human_indoor, group=Run_Number), size=1.5, alpha=0.75) +
scale_color_manual(values=unique(all_data[Site_Name=="aba"]$map_color), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir, "aba_line.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0 &
Site_Name=="aba"], aes(x=mean_initial, y=mean_final)) +
geom_abline(size=1.5, alpha=0.5)+
geom_line(aes(color=human_indoor), size=1.25) +
geom_ribbon(aes(ymin=smooth_min, ymax=smooth_max, fill=human_indoor), alpha=0.25) +
scale_color_manual(values=unique(all_data[Site_Name=="aba"]$map_color), name="Indoor Biting %") +
scale_fill_manual(values=unique(all_data[Site_Name=="aba"]$map_color), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="none") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence",
title="")
graphics.off()
pdf(file.path(plot_dir, "baseline_40.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0], aes(x=mean_initial, y=mean_final)) +
geom_abline(size=1.5, alpha=0.5)+
geom_ribbon(aes(ymin=smooth_min, ymax=smooth_max, fill=human_indoor, group=Site_Name), alpha=0.25) +
geom_line(aes(color=human_indoor, group=Site_Name), size=1.25) +
scale_color_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
scale_fill_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="right") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence")
graphics.off()
pdf(file.path(plot_dir, "atsb_5.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0 & ATSB_Initial_Effect==0.05], aes(x=mean_initial, y=mean_final)) +
geom_abline(size=1.5, alpha=0.5)+
geom_ribbon(aes(ymin=smooth_min, ymax=smooth_max, fill=human_indoor, group=Site_Name), alpha=0.25) +
geom_line(aes(color=human_indoor, group=Site_Name), size=1.25) +
scale_color_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
scale_fill_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="right") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence")
graphics.off()
pdf(file.path(plot_dir, "baseline_40_atsb_5.pdf"), width=7, height=5)
ggplot(all_data[ITN_Coverage==0.4 & ATSB_Initial_Effect==0.05], aes(x=mean_initial, y=mean_final)) +
geom_abline(size=1.5, alpha=0.5)+
geom_ribbon(aes(ymin=smooth_min, ymax=smooth_max, fill=human_indoor, group=Site_Name), alpha=0.25) +
geom_line(aes(color=human_indoor, group=Site_Name), size=1.25) +
scale_color_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
scale_fill_manual(values=brewer.pal(8, "Spectral"), name="Indoor Biting %") +
xlim(0,0.85)+
ylim(0,0.85) +
theme(legend.position="right") +
coord_fixed() +
labs(x="Initial Prevalence",
y="Final Prevalence")
graphics.off()
|
# Submission Prog2
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
invx <- NULL
set <- function(y) {
x <<- y
invx <<- NULL
}
get <- function() x
setinverse <- function(Inverse) invx <<- Inverse
getinverse<- function() invx
list(set = set, get = get,
setinverse= setinverse,
getinverse= getinverse)
}
##cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
#
cacheSolve <- function(x, ...) {
invx <- x$getinverse()
if(!is.null(invx)) {
message("getting cached data")
return(invx)
}
data <- x$get()
#For this assignment, assume that the matrix supplied is always invertible.
#Otherwise we should test.
invx <- solve(data, ...)
x$setinverse(invx)
invx
}
#Test with diag(3)
# ma <- diag(3)
# ma
# maa <- makeCacheMatrix(ma)
# maa$get()
# maa$getinverse()
# maa$setinverse()
# cacheSolve(maa)
# maa$getinverse()
| /cachematrix.R | no_license | tofguerrier/ProgrammingAssignment2 | R | false | false | 1,166 | r | # Submission Prog2
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
invx <- NULL
set <- function(y) {
x <<- y
invx <<- NULL
}
get <- function() x
setinverse <- function(Inverse) invx <<- Inverse
getinverse<- function() invx
list(set = set, get = get,
setinverse= setinverse,
getinverse= getinverse)
}
##cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
#
cacheSolve <- function(x, ...) {
invx <- x$getinverse()
if(!is.null(invx)) {
message("getting cached data")
return(invx)
}
data <- x$get()
#For this assignment, assume that the matrix supplied is always invertible.
#Otherwise we should test.
invx <- solve(data, ...)
x$setinverse(invx)
invx
}
#Test with diag(3)
# ma <- diag(3)
# ma
# maa <- makeCacheMatrix(ma)
# maa$get()
# maa$getinverse()
# maa$setinverse()
# cacheSolve(maa)
# maa$getinverse()
|
library(shiny)
# Define UI for miles per gallon application
shinyUI(fluidPage(
# Application title
headerPanel("Hi friends, this is my first shiny application ! "),
#h3(""),
h4("Using the Motor Trend Car Road Tests dataset, this application builds scatter plots of Mile Per Gallon values in fonction of a user selected variable
, displaying cylinder categories with colors;"),
h4("Then, it permits to predicted miles per gallon in fonction of weight, using a simple linear model,
with equation : mile per gallon = 37.28 - 5.34 * weight"),
# Sidebar with controls to select the variable to plot against mpg
# and to specify whether outliers should be included
sidebarPanel(
h3('Choose the x variable to plot againts mpg:'),
radioButtons("variable", "",
list(
"Weight"="wt",
"Displacement"="disp",
"HorsePower" = "hp",
"Rear"="drat",
"Qsec"="qsec",
"V/S"="vs",
"Transmission"="am",
"Gears" = "gear",
"Carburators"="carb"
)),
h3('Prediction of Miles per Gallon in fonction of desired weight'),
numericInput('WT', 'Enter the desired weight (lb/1000) between 1 and 6', 3, min = 1, max = 6, step = 0.1),
#numericInput('id1', 'Numeric input, labeled id1', 0, min = 0, max = 10, step = 1),
h5('Predicted miles per gallon (mpg) '),
verbatimTextOutput("prediction")
),
# Show the caption and plot of the requested variable against mpg
mainPanel(
h3(textOutput("")),
plotOutput("mpgPlot")
)
))
| /ui.R | no_license | cdv04/FirstCdvShinyApp | R | false | false | 1,689 | r | library(shiny)
# Define UI for miles per gallon application
shinyUI(fluidPage(
# Application title
headerPanel("Hi friends, this is my first shiny application ! "),
#h3(""),
h4("Using the Motor Trend Car Road Tests dataset, this application builds scatter plots of Mile Per Gallon values in fonction of a user selected variable
, displaying cylinder categories with colors;"),
h4("Then, it permits to predicted miles per gallon in fonction of weight, using a simple linear model,
with equation : mile per gallon = 37.28 - 5.34 * weight"),
# Sidebar with controls to select the variable to plot against mpg
# and to specify whether outliers should be included
sidebarPanel(
h3('Choose the x variable to plot againts mpg:'),
radioButtons("variable", "",
list(
"Weight"="wt",
"Displacement"="disp",
"HorsePower" = "hp",
"Rear"="drat",
"Qsec"="qsec",
"V/S"="vs",
"Transmission"="am",
"Gears" = "gear",
"Carburators"="carb"
)),
h3('Prediction of Miles per Gallon in fonction of desired weight'),
numericInput('WT', 'Enter the desired weight (lb/1000) between 1 and 6', 3, min = 1, max = 6, step = 0.1),
#numericInput('id1', 'Numeric input, labeled id1', 0, min = 0, max = 10, step = 1),
h5('Predicted miles per gallon (mpg) '),
verbatimTextOutput("prediction")
),
# Show the caption and plot of the requested variable against mpg
mainPanel(
h3(textOutput("")),
plotOutput("mpgPlot")
)
))
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/combining.R
\name{names_all_same}
\alias{names_all_same}
\title{Check that names are all identical}
\usage{
names_all_same(datalist)
}
\arguments{
\item{datalist}{list of dataframes whose names must all be identical}
}
\value{
are the names identical? TRUE or FALSE
}
\description{
Check that names are all identical
}
| /man/names_all_same.Rd | permissive | yosaralu/bwgtools | R | false | false | 406 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/combining.R
\name{names_all_same}
\alias{names_all_same}
\title{Check that names are all identical}
\usage{
names_all_same(datalist)
}
\arguments{
\item{datalist}{list of dataframes whose names must all be identical}
}
\value{
are the names identical? TRUE or FALSE
}
\description{
Check that names are all identical
}
|
########################################################################################
## GLLVM fourth corner model, with estimation done via Laplace and Variational approximation using TMB-package
## Original author: Jenni Niku
##########################################################################################
trait.TMB <- function(
y, X = NULL,TR=NULL,formula=NULL, num.lv = 2, family = "poisson",
Lambda.struc = "unstructured", Ab.struct = "unstructured", row.eff = FALSE, reltol = 1e-6, seed = NULL,
maxit = 1000, start.lvs = NULL, offset=NULL, sd.errors = FALSE,trace=FALSE,
link="logit",n.init=1,start.params=NULL,start0=FALSE,optimizer="optim",
starting.val="res",method="VA",randomX=NULL,Power=1.5,diag.iter=1, Ab.diag.iter = 0, dependent.row = FALSE,
Lambda.start=c(0.2, 0.5), jitter.var=0, yXT = NULL, scale.X = FALSE, randomX.start = "zero", beta0com = FALSE
,zeta.struc = "species", quad.start=0.01, start.struc="LV",quadratic=FALSE) {
if(is.null(X) && !is.null(TR)) stop("Unable to fit a model that includes only trait covariates")
if(!is.null(start.params)) starting.val <- "zero"
objrFinal <- optrFinal <- NULL
term <- NULL
n <- dim(y)[1]; p <- dim(y)[2];
y <- as.data.frame(y)
formula1 <- formula
beta0com0 = beta0com
if(method=="VA"){ link <- "probit"}
jitter.var.r <- 0
if(length(jitter.var)>1){
jitter.var.r <- jitter.var[2]
jitter.var <- jitter.var[1]
}
if(NCOL(X) < 1) stop("No covariates in the model, fit the model using gllvm(y,family=",family,"...)")
# change categorical variables to dummy variables
num.X <- 0
X.new <- NULL
if(!is.null(X)) {
num.X <- dim(X)[2]
for (i in 1:num.X) {
if(!is.factor(X[,i])) {
if(length(unique(X[,i]))>2){ Xi <- scale(X[,i], scale = scale.X, center = scale.X) } else { Xi <- X[,i] }
X[,i] <- Xi
X.new <- cbind(X.new,Xi); if(!is.null(colnames(X)[i])) colnames(X.new)[dim(X.new)[2]] <- colnames(X)[i]
} else {
dum <- model.matrix( ~ X[,i])
dum <- as.matrix(dum[, !(colnames(dum) %in% c("(Intercept)"))])
colnames(dum) <- paste(colnames(X)[i], levels(X[,i])[ - 1], sep = "")
X.new <- cbind(X.new, dum)
}
}
X.new <- data.frame(X.new);
}
num.T <- 0
T.new <- NULL
if(!is.null(TR)) {
num.T <- dim(TR)[2]
T.new <- matrix(0, p, 0)
if(num.T > 0){
for (i in 1 : num.T) {
#if(!is.factor(TR[,i]) && length(unique(TR[,i])) > 2) { #!!!
if(is.numeric(TR[,i]) && length(unique(TR[,i])) > 2) {
TR[,i] <- scale(TR[,i])
T.new <- cbind(T.new,scale(TR[,i], scale = scale.X, center = scale.X)); colnames(T.new)[dim(T.new)[2]] <- colnames(TR)[i]
} else {
if(!is.factor(TR[,i])) TR[,i] <- factor(TR[,i]) #!!!
dum <- model.matrix(~TR[,i]-1)
colnames(dum) <- paste(colnames(TR)[i],levels(TR[,i]),sep="")
T.new <- cbind(T.new,dum)
}
}
T.new <- data.matrix(T.new);
}
}
if(is.null(formula)){
n1 <- colnames(X)
n2 <- colnames(TR)
form1 <- paste("",n1[1],sep = "")
if(length(n1)>1){
for(i1 in 2:length(n1)){
form1 <- paste(form1,n1[i1],sep = "+")
}}
formula <- paste("y~",form1,sep = "")
formula <- paste(formula, form1,sep = " + (")
formula <- paste(formula, ") : (", sep = "")
formula <- paste(formula, n2[1], sep = "")
if(length(n2) > 1){
for(i2 in 2:length(n2)){
formula <- paste(formula, n2[i2], sep = "+")
}}
formula1 <- paste(formula, ")", sep = "")
formula <- formula(formula1)
}
if(!is.null(X) || !is.null(TR)){
yX <- cbind(cbind(X,id = 1:nrow(y))[rep(1:nrow(X), times=ncol(y)),], time = rep(1:ncol(y), each= nrow(y)), y = c(as.matrix(y))) #reshape(data.frame(cbind(y, X)), direction = "long", varying = colnames(y), v.names = "y")
TR2 <- data.frame(time = 1:p, TR)
if(is.null(yXT)){
yXT <- merge(yX, TR2, by = "time")
}
data <- yXT
m1 <- model.frame(formula, data = data)
term <- terms(m1)
Xd <- as.matrix(model.matrix(formula, data = data))
nXd <- colnames(Xd)
Xd <- as.matrix(Xd[, !(nXd %in% c("(Intercept)"))])
colnames(Xd) <- nXd[!(nXd %in% c("(Intercept)"))]
if(!is.null(X.new)) fx <- apply(matrix(sapply(colnames(X.new), function(x){grepl(x, colnames(Xd))}), ncol(Xd), ncol(X.new)), 2, any)
ft <- NULL;
if(NCOL(T.new) > 0) {
ft <- apply(matrix(sapply(colnames(T.new), function(x){ grepl(x, colnames(Xd)) }), ncol(Xd), ncol(T.new)), 2, any)
}
X1 <- as.matrix(X.new[,fx]);
TR1 <- as.matrix(T.new[,ft]);
colnames(X1) <- colnames(X.new)[fx]; colnames(TR1)<-colnames(T.new)[ft];
nxd <- colnames(Xd)
formulab <- paste("~",nxd[1],sep = "");
if(length(nxd)>1) for(i in 2:length(nxd)) formulab <- paste(formulab,nxd[i],sep = "+")
formula1 <- formulab
}
if(!(family %in% c("poisson","negative.binomial","binomial","tweedie","ZIP", "gaussian", "ordinal", "gamma", "exponential")))
stop("Selected family not permitted...sorry!")
if(!(Lambda.struc %in% c("unstructured","diagonal")))
stop("Lambda matrix (covariance of variational distribution for latent variable) not permitted...sorry!")
if(num.lv == 1) Lambda.struc <- "diagonal" ## Prevents it going to "unstructured" loops and causing chaos
trial.size <- 1
y <- as.matrix(y)
if(!is.numeric(y)) stop("y must a numeric. If ordinal data, please convert to numeric with lowest level equal to 1. Thanks")
if(family == "ordinal") {
y00<-y
if(min(y)==0){ y=y+1}
max.levels <- apply(y,2,function(x) length(min(x):max(x)))
if(any(max.levels == 1) || all(max.levels == 2))
stop("Ordinal data requires all columns to have at least has two levels. If all columns only have two levels, please use family == binomial instead. Thanks")
if(any(!apply(y,2,function(x)all(diff(sort(unique(x)))==1)))&zeta.struc=="species")
stop("Can't fit ordinal model if there are species with missing classes. Please reclassify per species or use zeta.struc = `common` ")
if(any(diff(sort(unique(c(y))))!=1)&zeta.struc=="common")
stop("Can't fit ordinal model if there are missing classes. Please reclassify.")
}
if(is.null(rownames(y))) rownames(y) <- paste("Row",1:n,sep="")
if(is.null(colnames(y))) colnames(y) <- paste("Col",1:p,sep="")
if(!is.null(X)) { if(is.null(colnames(X))) colnames(X) <- paste("x",1:ncol(X),sep="") }
out <- list(y = y, X = X1, TR = TR1, num.lv = num.lv, row.eff = row.eff, logL = Inf, family = family, offset=offset,randomX=randomX,X.design=Xd,terms=term, method = method)
if(is.null(formula) && is.null(X) && is.null(TR)){formula ="~ 1"}
n.i <- 1;
if(n.init > 1) seed <- sample(1:10000, n.init)
while(n.i <= n.init){
randomXb <- NULL
if(!is.null(randomX)){
#
if(num.lv>0 && randomX.start == "res" && starting.val == "res") {randomXb <- randomX}
#
xb <- as.matrix(model.matrix(randomX, data = data.frame(X)))
rnam <- colnames(xb)[!(colnames(xb) %in% c("(Intercept)"))]
xb <- as.matrix(xb[, rnam]); #as.matrix(X.new[, rnam])
if(NCOL(xb) == 1) colnames(xb) <- rnam
bstart <- start.values.randomX(y, xb, family, starting.val = randomX.start, power = Power)
Br <- bstart$Br
sigmaB <- bstart$sigmaB
sigmaij <- rep(0,(ncol(xb)-1)*ncol(xb)/2)
# method <- "LA"
# xb <- as.matrix(model.matrix(randomX,data = X.new))
# xb <- as.matrix(xb[,!(colnames(xb) %in% c("(Intercept)"))])
# Br <- matrix(0, ncol(xb), p)
# sigmaB <- diag(ncol(xb))
} else {
xb <- Br <- matrix(0); sigmaB <- diag(1); sigmaij <- 0; Abb <- 0
}
num.X <- dim(X)[2]
num.T <- dim(TR)[2]
phi<-phis <- NULL
sigma <- 1
phi <- phis <- NULL;
if(n.init > 1 && trace) cat("initial run ",n.i,"\n");
res <- start.values.gllvm.TMB(y = y, X = X1, TR = TR1, family = family, offset=offset, trial.size = trial.size, num.lv = num.lv, start.lvs = start.lvs, seed = seed[n.i],starting.val=starting.val,power=Power,formula = formula, jitter.var=jitter.var, #!!!
yXT=yXT, row.eff = row.eff, TMB=TRUE, link=link, randomX=randomXb, beta0com = beta0com0, zeta.struc = zeta.struc)
if(is.null(start.params)){
beta0 <- res$params[,1]
# common env params or different env response for each spp
B <- NULL
if(!is.null(TR) && !is.null(X)) {
B <- c(res$B)[1:ncol(Xd)]
if(any(is.na(B))) B[is.na(B)] <- 0
}
row.params <- NULL;
if(row.eff!=FALSE){
row.params <- res$row.params
if (row.eff == "random") {
sigma <- sd(row.params);
}
}
vameans <- theta <- lambda <- NULL
if(num.lv > 0) {
if(!is.null(randomXb) && family != "ordinal"){
Br <- res$Br
sigmaB <- (res$sigmaB)
if(length(sigmaB)>1) sigmaij <- rep(0,length(res$sigmaij))
if(randomX.start == "res" && !is.null(res$fitstart)) { ##!!!
res$sigmaij <- sigmaij <- res$fitstart$TMBfnpar[names(res$fitstart$TMBfnpar) == "sigmaij"]
}
}
if(start.struc=="LV"&quadratic!=FALSE){
lambda2 <- matrix(quad.start, ncol = num.lv, nrow = 1)
}else if(start.struc=="all"&quadratic!=FALSE){
lambda2 <- matrix(quad.start, ncol = num.lv, nrow = p)
}else if(quadratic==FALSE){
lambda2 <- 0
}
if(quadratic != FALSE){
res$params <- cbind(res$params, matrix(lambda2,nrow=p,ncol=num.lv))
}else{
res$params <- res$params
}
vameans <- res$index
theta <- as.matrix(res$params[,(ncol(res$params) - num.lv + 1):ncol(res$params)])#fts$coef$theta#
theta[upper.tri(theta)] <- 0
if(Lambda.struc == "unstructured") {
lambda <- array(NA,dim=c(n,num.lv,num.lv))
for(i in 1:n) { lambda[i,,] <- diag(rep(1,num.lv)) }
}
if(Lambda.struc == "diagonal") {
lambda <- matrix(1,n,num.lv)
}
zero.cons <- which(theta == 0)
if(n.init > 1 && !is.null(res$mu) && starting.val == "res" && family != "tweedie") {
if(family=="ZIP") {
lastart <- FAstart(res$mu, family="poisson", y=y, num.lv = num.lv, jitter.var = jitter.var[1])
} else {
lastart <- FAstart(res$mu, family=family, y=y, num.lv = num.lv, phis = res$phi, jitter.var = jitter.var[1])
}
theta <- lastart$gamma#/lastart$gamma
vameans<-lastart$index#/max(lastart$index)
}
}
} else{
if(all(dim(start.params$y)==dim(y)) && is.null(X)==is.null(start.params$X) && is.null(T)==is.null(start.params$TR) && row.eff == start.params$row.eff){
beta0 <- start.params$params$beta0
# common env params or different env response for each spp
B <- NULL
if(!is.null(TR) && !is.null(X)) {
B <- start.params$params$B;
}
fourth <- inter <- NULL; if(!is.null(TR) ) inter <- start.params$params$fourth # let's treat this as a vector (vec(B'))'
vameans <- theta <- lambda <- NULL
row.params <- NULL
if(row.eff %in% c("fixed","random",TRUE)) {
if(row.eff == start.params$row.eff){
res$row.params <- row.params <- start.params$params$row.params
if(row.eff %in% c("random")) res$sigma <- sigma <- start.params$params$sigma
} else {
row.params <- res$row.params
}
}
if(num.lv > 0) {
theta <- (start.params$params$theta) ## LV coefficients
vameans <- matrix(start.params$lvs, ncol = num.lv);
lambda <- start.params$A
if(class(start.params)[2]=="gllvm.quadratic" && quadratic != FALSE){
lambda2 <- start.params$params$theta[,-c(1:start.params$num.lv),drop=F]
}else if(class(start.params)[1]=="gllvm" && quadratic != FALSE){
if(start.struc=="LV"|quadratic=="LV"){
lambda2 <- matrix(quad.start, ncol = num.lv, nrow = 1)
}else if(start.struc=="all"&quadratic=="all"){
lambda2 <- matrix(quad.start, ncol = num.lv, nrow = p)
}
}
}
if(family == "negative.binomial" && start.params$family == "negative.binomial" && !is.null(start.params$params$phi)) {res$phi<-start.params$params$phi}
} else { stop("Model which is set as starting parameters isn't the suitable you are trying to fit. Check that attributes y, X, TR and row.eff match to each other.");}
}
if (is.null(offset)) offset <- matrix(0, nrow = n, ncol = p)
if(family == "negative.binomial") {
phis <- res$phi
if (any(phis > 10))
phis[phis > 50] <- 50
if (any(phis < 0.02))
phis[phis < 0.02] <- 0.02
res$phi <- phis
phis <- 1/phis
}
if(family == "tweedie") {
phis <- res$phi;
if(any(phis>10)) phis[phis>10]=10;
if(any(phis<0.10))phis[phis<0.10]=0.10;
phis= (phis)
}
if (family == "ZIP") {
phis <- (colMeans(y == 0) * 0.98) + 0.01;
phis <- phis / (1 - phis)
} # ZIP probability
# if (family %in% c("gaussian", "gamma")) {
# phis <- res$phi
# }
if(family=="ordinal"){
K = max(y00)-min(y00)
if(zeta.struc=="species"){
zeta <- c(t(res$zeta[,-1]))
zeta <- zeta[!is.na(zeta)]
}else{
zeta <- res$zeta[-1]
}
}else{
zeta = 0
}
if(jitter.var.r>0){
if(row.eff == "random") row.params <- row.params + rnorm(n, 0, sd = sqrt(jitter.var.r));
if(!is.null(randomX)) Br <- Br + t(mvtnorm::rmvnorm(p, rep(0, nrow(Br)),diag(nrow(Br))*jitter.var.r));
}
q <- num.lv
a <- c(beta0)
if(num.lv > 0) {
# diag(theta) <- log(diag(theta)) # !!!
theta <- theta[lower.tri(theta, diag = TRUE)]
u <- vameans
}
if(!is.null(phis)) {phi=(phis)} else {phi <- rep(1,p)}
q <- num.lv
if(!is.null(row.params)){ r0 <- row.params} else {r0 <- rep(0, n)}
if(row.eff == "random"){ nlvr<-num.lv+1 } else {nlvr=num.lv}
if(row.eff=="fixed"){xr <- matrix(1,1,p)} else {xr <- matrix(0,1,p)}
# set starting values for variational distribution covariances
if(nlvr > 0){
if(Lambda.struc=="diagonal" || diag.iter>0){
Au <- log(rep(Lambda.start[1],nlvr*n)) #
} else{
Au <- c(log(rep(Lambda.start[1],nlvr*n)),rep(0,nlvr*(nlvr-1)/2*n))
}
} else { Au <- 0}
if(length(Lambda.start)<2){ Ar <- rep(1,n)} else {Ar <- rep(Lambda.start[2],n)}
if(!is.null(randomX)){
if(length(Lambda.start)>2) {
a.var <- Lambda.start[3];
} else {a.var <- 0.5;}
if(randomX.start == "res"){ # !!!! && !is.null(res$fitstart$Ab)
if(Ab.struct == "diagonal" || Ab.diag.iter>0){
Abb <- c(log(c(apply(res$fitstart$Ab,1, diag))))
} else {
Abb <- c(log(c(apply(res$fitstart$Ab,1, diag))), rep(0, ncol(xb) * (ncol(xb) - 1) / 2 * p))
}
res$Br <- Br
res$Ab <- c(apply(res$fitstart$Ab,1, diag))
} else{ #!!!
if(Ab.struct == "diagonal" || Ab.diag.iter>0){
Abb <- c(log(rep(a.var, ncol(xb) * p)))
} else {
Abb <- c(log(rep(a.var, ncol(xb) * p)), rep(0, ncol(xb) * (ncol(xb) - 1) / 2 * p))
}
} #!!!
} else { Abb <- 0 }
optr<-NULL
timeo<-NULL
se <- NULL
map.list <- list()
# if(row.eff==FALSE) map.list$r0 <- factor(rep(NA,n))
if(family %in% c("poisson","binomial","ordinal","exponential")) map.list$lg_phi <- factor(rep(NA,p))
if(family != "ordinal") map.list$zeta <- factor(NA)
randoml=c(0,0)
# For Laplace method, specify random paramteters to randomp
randomp= NULL #c("u","Br")
if(num.lv>0 || row.eff == "random") {randomp <- c(randomp,"u")}
# family settings
extra <- c(0,1)
if(family == "poisson") { familyn=0}
if(family == "negative.binomial") { familyn=1}
if(family == "binomial") {
familyn <- 2;
if(link=="probit") extra[1] <- 1
}
if(family == "gaussian") {familyn=3}
if(family == "gamma") {familyn=4}
if(family == "tweedie"){ familyn <- 5; extra[1] <- Power}
if(family == "ZIP"){ familyn <- 6;}
if(family == "ordinal") {familyn=7}
if(family == "exponential") {familyn=8}
if(beta0com){
extra[2] <- 0
Xd<-cbind(1,Xd)
a <- a*0
B<-c(mean(a),B)
}
# Specify parameter list, data.list and map.list
if(!is.null(randomX)){
randoml[2]=1
randomp <- c(randomp,"Br")
res$Br <- Br
res$sigmaB <- sigmaB
} else {
map.list$Br = factor(NA)
map.list$sigmaB = factor(NA)
map.list$sigmaij = factor(NA)
map.list$Abb = factor(NA)
}
if(quadratic==FALSE){
map.list$lambda2 <- factor(NA)
}
if(row.eff=="random"){
randoml[1] <- 1
if(dependent.row) sigma<-c(sigma[1], rep(0, num.lv))
if(num.lv>0){
u<-cbind(r0,u)
} else {
u<-cbind(r0)
}
} else {
sigma=0
map.list$log_sigma <- factor(NA)
}
if(num.lv==0) {
theta = 0;
lambda2 <- 0
map.list$lambda = factor(NA)
map.list$lambda2 = factor(NA)
if(row.eff != "random") {
u = matrix(0)
map.list$u = factor(NA)
map.list$Au = factor(NA)
}
}
if(starting.val!="zero" && start.struc != "LV" && quadratic == TRUE && num.lv>0 && method == "VA"){
map.list2 <- map.list
map.list2$r0 = factor(rep(NA, length(r0)))
map.list2$b = factor(rep(NA, length(rbind(a))))
map.list2$B = factor(rep(NA, length(B)))
map.list2$Br = factor(rep(NA,length(Br)))
map.list2$lambda = factor(rep(NA, length(theta)))
map.list2$u = factor(rep(NA, length(u)))
map.list2$lg_phi = factor(rep(NA, length(phi)))
map.list2$sigmaB = factor(rep(NA,length(sigmaB)))
map.list2$sigmaij = factor(rep(NA,length(sigmaij)))
map.list2$log_sigma = factor(rep(NA, length(sigma)))
map.list2$Au = factor(rep(NA, length(Au)))
map.list2$Abb = factor(rep(NA, length(Abb)))
map.list2$zeta = factor(rep(NA, length(zeta)))
parameter.list = list(r0=matrix(r0), b = rbind(a), B=matrix(B), Br=Br, lambda = theta, lambda2 = t(lambda2), u = u, lg_phi=log(phi), sigmaB=log(sqrt(diag(sigmaB))), sigmaij=sigmaij, log_sigma=c(sigma), Au=Au, Abb=Abb, zeta=zeta)
objr <- TMB::MakeADFun(
data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, quadratic = 1, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0)), silent=!trace,
parameters = parameter.list, map = map.list2,
inner.control=list(mgcmax = 1e+200,maxit = maxit),
DLL = "gllvm")
if(optimizer=="nlminb") {
timeo <- system.time(optr <- try(nlminb(objr$par, objr$fn, objr$gr,control = list(rel.tol=reltol,iter.max=maxit,eval.max=maxit)),silent = TRUE))
}
if(optimizer=="optim") {
timeo <- system.time(optr <- try(optim(objr$par, objr$fn, objr$gr,method = "BFGS",control = list(reltol=reltol,maxit=maxit),hessian = FALSE),silent = TRUE))
}
lambda2 <- matrix(optr$par, byrow = T, ncol = num.lv, nrow = p)
if(inherits(optr,"try-error")) warning(optr[1]);
}
# Call makeADFun
if(method == "VA" && (num.lv>0 || row.eff=="random" || !is.null(randomX))){
parameter.list = list(r0=matrix(r0), b = rbind(a), B=matrix(B), Br=Br, lambda = theta, lambda2 = t(lambda2), u = u, lg_phi=log(phi), sigmaB=log(sqrt(diag(sigmaB))), sigmaij=sigmaij, log_sigma=c(sigma), Au=Au, Abb=Abb, zeta=zeta)
objr <- TMB::MakeADFun(
data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, quadratic = ifelse(quadratic!=FALSE,1,0), family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0)), silent=!trace,
parameters = parameter.list, map = map.list,
inner.control=list(mgcmax = 1e+200,maxit = maxit),
DLL = "gllvm")
} else {
Au=0; Abb=0
map.list$Au <- map.list$Abb <- factor(NA)
parameter.list = list(r0=matrix(r0), b = rbind(a), B=matrix(B), Br=Br, lambda = theta, lambda2 = t(lambda2), u = u, lg_phi=log(phi), sigmaB=log(sqrt(diag(sigmaB))), sigmaij=sigmaij, log_sigma=c(sigma), Au=Au, Abb=Abb, zeta=zeta)
objr <- TMB::MakeADFun(
data = list(y = y, x = Xd,xr=xr, xb=xb, offset=offset, num_lv = num.lv, quadratic = 0, family=familyn,extra=extra,method=1,model=1,random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0)), silent=!trace,
parameters = parameter.list, map = map.list,
inner.control=list(mgcmax = 1e+200,maxit = maxit,tol10=0.01),
random = randomp, DLL = "gllvm")
}
if(optimizer=="nlminb") {
timeo <- system.time(optr <- try(nlminb(objr$par, objr$fn, objr$gr,control = list(rel.tol=reltol,iter.max=maxit,eval.max=maxit)),silent = TRUE))
}
if(optimizer=="optim") {
timeo <- system.time(optr <- try(optim(objr$par, objr$fn, objr$gr,method = "BFGS",control = list(reltol=reltol,maxit=maxit),hessian = FALSE),silent = TRUE))
}
if(inherits(optr,"try-error")) warning(optr[1]);
if(diag.iter>0 && Lambda.struc=="unstructured" && method =="VA" && (nlvr>0 || !is.null(randomX)) && !inherits(optr,"try-error")){
objr1 <- objr
optr1 <- optr
param1 <- optr$par
nam <- names(param1)
r1 <- matrix(param1[nam=="r0"])
b1 <- rbind(param1[nam=="b"])
B1 <- matrix(param1[nam=="B"])
if(!is.null(randomX)) {
Br1 <- matrix(param1[nam=="Br"], ncol(xb), p) #!!!
sigmaB1 <- param1[nam=="sigmaB"]
sigmaij1 <- param1[nam=="sigmaij"]*0
Abb <- param1[nam=="Abb"]
if(Ab.diag.iter>0 && Ab.struct == "unstructured")
Abb <- c(Abb, rep(0,ncol(xb)*(ncol(xb)-1)/2*p))
} else {
Br1 <- Br
sigmaB1 <- sigmaB
sigmaij1 <- sigmaij
}
if(nlvr>0) {
lambda1 <- param1[nam=="lambda"];
u1 <- matrix(param1[nam=="u"],n,nlvr)
Au<- c(pmax(param1[nam=="Au"],rep(log(1e-6), nlvr*n)), rep(0,nlvr*(nlvr-1)/2*n))
if (quadratic=="LV" | quadratic == T && start.struc == "LV"){
lambda2 <- matrix(param1[nam == "lambda2"], byrow = T, ncol = num.lv, nrow = 1)#In this scenario we have estimated two quadratic coefficients before
}else if(quadratic == T){
lambda2 <- matrix(param1[nam == "lambda2"], byrow = T, ncol = num.lv, nrow = p)
}
} else {u1 <- u}
if(num.lv==0) {lambda1 <- 0; }
if(family %in% c("poisson","binomial","ordinal","exponential")){ lg_phi1 <- log(phi)} else {lg_phi1 <- param1[nam=="lg_phi"]}
if(row.eff == "random"){lg_sigma1 <- param1[nam=="log_sigma"]} else {lg_sigma1 = 0}
if(family == "ordinal"){ zeta <- param1[nam=="zeta"] } else { zeta <- 0 }
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au1, Abb=Abb1, zeta=zeta)
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = 0,u = matrix(0), lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=0, Abb=Abb1, zeta=zeta)
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=0, Au=Au1, Abb=Abb1, zeta=zeta)
parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, lambda2 = t(lambda2), u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au, Abb=Abb, zeta=zeta)
# if(nlvr>0 || !is.null(randomX)){
# if(nlvr>0){
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au1, Abb=Abb1, zeta=zeta)
# } else {
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br, lambda = 0,u = matrix(0), lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=0, Abb=Abb1, zeta=zeta)
# }
# } else {
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=0, Au=Au1, Abb=Abb1, zeta=zeta)
# }
data.list = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, quadratic = ifelse(quadratic!=FALSE&num.lv>0,1,0), family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
objr <- TMB::MakeADFun(
data = data.list, silent=!trace,
parameters = parameter.list, map = map.list,
inner.control=list(mgcmax = 1e+200,maxit = 1000),
DLL = "gllvm")
if(optimizer=="nlminb") {
timeo <- system.time(optr <- try(nlminb(objr$par, objr$fn, objr$gr,control = list(rel.tol=reltol,iter.max=maxit,eval.max=maxit)),silent = TRUE))
}
if(optimizer=="optim") {
timeo <- system.time(optr <- try(optim(objr$par, objr$fn, objr$gr,method = "BFGS",control = list(reltol=reltol,maxit=maxit),hessian = FALSE),silent = TRUE))
}
if(inherits(optr, "try-error")){optr <- optr1; objr <- objr1; Lambda.struc <- "diagonal"}
}
if(!inherits(optr,"try-error") && start.struc=="LV" && quadratic == TRUE && method == "VA"){
objr1 <- objr
optr1 <- optr
param1 <- optr$par
nam <- names(param1)
r1 <- matrix(param1[nam=="r0"])
b1 <- rbind(param1[nam=="b"])
B1 <- matrix(param1[nam=="B"])
if(!is.null(randomX)) {
Br1 <- matrix(param1[nam=="Br"], ncol(xb), p) #!!!
sigmaB1 <- param1[nam=="sigmaB"]
sigmaij1 <- param1[nam=="sigmaij"]*0
Abb <- param1[nam=="Abb"]
if(Ab.diag.iter>0 && Ab.struct == "unstructured")
Abb <- c(Abb, rep(0,ncol(xb)*(ncol(xb)-1)/2*p))
} else {
Br1 <- Br
sigmaB1 <- sigmaB
sigmaij1 <- sigmaij
}
lambda1 <- param1[nam=="lambda"];
u1 <- matrix(param1[nam=="u"],n,nlvr)
Au<- param1[nam=="Au"]
lambda2 <- abs(matrix(param1[nam == "lambda2"], byrow = T, ncol = num.lv, nrow = p))
if(family %in% c("poisson","binomial","ordinal","exponential")){ lg_phi1 <- log(phi)} else {lg_phi1 <- param1[nam=="lg_phi"]}
if(row.eff == "random"){lg_sigma1 <- param1[nam=="log_sigma"]} else {lg_sigma1 = 0}
if(family == "ordinal"){ zeta <- param1[nam=="zeta"] } else { zeta <- 0 }
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au1, Abb=Abb1, zeta=zeta)
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = 0,u = matrix(0), lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=0, Abb=Abb1, zeta=zeta)
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=0, Au=Au1, Abb=Abb1, zeta=zeta)
parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, lambda2 = t(lambda2), u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au, Abb=Abb, zeta=zeta)
# if(nlvr>0 || !is.null(randomX)){
# if(nlvr>0){
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au1, Abb=Abb1, zeta=zeta)
# } else {
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br, lambda = 0,u = matrix(0), lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=0, Abb=Abb1, zeta=zeta)
# }
# } else {
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=0, Au=Au1, Abb=Abb1, zeta=zeta)
# }
data.list = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, quadratic = 1, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
objr <- TMB::MakeADFun(
data = data.list, silent=!trace,
parameters = parameter.list, map = map.list,
inner.control=list(mgcmax = 1e+200,maxit = 1000),
DLL = "gllvm")
if(optimizer=="nlminb") {
timeo <- system.time(optr <- try(nlminb(objr$par, objr$fn, objr$gr,control = list(rel.tol=reltol,iter.max=maxit,eval.max=maxit)),silent = TRUE))
}
if(optimizer=="optim") {
timeo <- system.time(optr <- try(optim(objr$par, objr$fn, objr$gr,method = "BFGS",control = list(reltol=reltol,maxit=maxit),hessian = FALSE),silent = TRUE))
}
#quick check to see if something actually happened
flag <- 1
if(all(round(lambda2,0)==round(matrix(abs(optr$par[names(optr$par)=="lambda2"]),byrow=T,ncol=num.lv,nrow=p),0))){
flag <- 0
warning("Full quadratic model did not properly converge or all quadratic coefficients are close to zero. Try changing 'start.struc' in 'control.start'. /n")
}
if(inherits(optr, "try-error") || flag == 0){optr <- optr1; objr <- objr1; quadratic <- "LV";}
}
param <- objr$env$last.par.best
if(family %in% c("negative.binomial", "tweedie", "gaussian", "gamma")) {
phis=exp(param[names(param)=="lg_phi"])
}
if(family=="ZIP") {
lp0 <- param[names(param)=="lg_phi"]; out$lp0=lp0
phis <- exp(lp0)/(1+exp(lp0));#log(phis); #
}
if(family == "ordinal"){
zetas <- param[names(param)=="zeta"]
if(zeta.struc=="species"){
zetanew <- matrix(NA,nrow=p,ncol=K)
idx<-0
for(j in 1:ncol(y)){
k<-max(y[,j])-2
if(k>0){
for(l in 1:k){
zetanew[j,l+1]<-zetas[idx+l]
}
}
idx<-idx+k
}
zetanew[,1] <- 0
row.names(zetanew) <- colnames(y00); colnames(zetanew) <- paste(min(y):(max(y00)-1),"|",(min(y00)+1):max(y00),sep="")
}else{
zetanew <- c(0,zetas)
names(zetanew) <- paste(min(y00):(max(y00)-1),"|",(min(y00)+1):max(y00),sep="")
}
zetas<-zetanew
out$y<-y00
}
bi<-names(param)=="b"
Bi<-names(param)=="B"
li<-names(param)=="lambda"
li2 <- names(param)=="lambda2"
ui<-names(param)=="u"
if(nlvr > 0){
lvs <- (matrix(param[ui],n,nlvr))
theta <- matrix(0,p,num.lv)
if(p>1) {
theta[lower.tri(theta,diag=TRUE)] <- param[li];
if(quadratic!=FALSE){
theta<-cbind(theta,matrix(-abs(param[li2]),ncol=num.lv,nrow=p,byrow=T))
}
} else {theta <- c(param[li],-abs(param[li2]))}
# diag(theta) <- exp(diag(theta))#!!!
}
if(row.eff!=FALSE) {
ri <- names(param)=="r0"
if(method=="LA" || row.eff=="random"){ row.params=param[ri] } else {row.params <- param[ri]}
if(row.eff=="random") {
row.params <- lvs[,1]; lvs<- as.matrix(lvs[,-1])
sigma<-exp(param["log_sigma"])[1]
if(nlvr>1 && dependent.row) sigma <- c(exp(param[names(param)=="log_sigma"])[1],(param[names(param)=="log_sigma"])[-1])
}
}
if(!is.null(randomX)){
Bri <- names(param)=="Br"
Br <- matrix(param[Bri],ncol(xb),p)
Sri <- names(param)=="sigmaB"
L <- diag(ncol(xb))
if(ncol(xb)>1){
sigmaB <- diag(exp(param[Sri]))
Srij <- names(param)=="sigmaij"
Sr <- param[Srij]
L[upper.tri(L)] <- Sr
D <- diag(diag(t(L)%*%L))
} else{
D <- 1
sigmaB <- (exp(param[Sri]))
}
sigmaB_ <- solve(sqrt(D))%*%(t(L)%*%L)%*%solve(sqrt(D))
sigmaB <- sigmaB%*%sigmaB_%*%t(sigmaB)
}
beta0 <- param[bi]
B <- param[Bi]
if(beta0com){
beta0=B[1]
B = B[-1]
cn<-colnames(Xd)
Xd<-as.matrix(Xd[,-1])
colnames(Xd)<-cn[-1]
}
new.loglik<-objr$env$value.best[1]
if((n.i==1 || out$logL > abs(new.loglik)) && is.finite(new.loglik) && !inherits(optr, "try-error") && new.loglik>0){ #
objrFinal<-objr1 <- objr; optrFinal<-optr1 <- optr;
out$logL <- new.loglik
if(num.lv > 0) {
out$lvs <- lvs
out$params$theta <- theta
rownames(out$lvs) <- rownames(out$y);
rownames(out$params$theta) <- colnames(out$y)
if(quadratic==FALSE)colnames(out$params$theta) <- colnames(out$lvs) <- paste("LV", 1:num.lv, sep="");
if(quadratic!=FALSE){
colnames(out$lvs) <- paste("LV", 1:num.lv, sep="");
colnames(out$params$theta)<- c(paste("LV", 1:num.lv, sep=""),paste("LV", 1:num.lv, "^2",sep=""));
}
}
if(!beta0com) names(beta0) <- colnames(out$y);
if(beta0com) names(beta0) <- "Community intercept";
out$params$beta0 <- beta0;
out$params$B <- B; names(out$params$B)=colnames(Xd)
if(row.eff!=FALSE) {
if(row.eff=="random"){
out$params$sigma <- sigma;
names(out$params$sigma) <- "sigma"
if(num.lv>0 && dependent.row) names(out$params$sigma) <- paste("sigma",c("",1:num.lv), sep = "")
}
out$params$row.params <- row.params;
names(out$params$row.params) <- rownames(out$y)
}
if(family %in% c("negative.binomial")) {
out$params$phi <- 1/phis; names(out$params$phi) <- colnames(out$y);
out$params$inv.phi <- phis; names(out$params$inv.phi) <- colnames(out$y);
}
if(family %in% c("gaussian","tweedie","gamma")) {
out$params$phi <- phis; names(out$params$phi) <- colnames(out$y);
}
if(family =="ZIP") {
out$params$phi <- phis; names(out$params$phi) <- colnames(out$y);
}
if (family == "ordinal") {
out$params$zeta <- zetas
}
if(!is.null(randomX)){
out$params$Br <- Br
out$params$sigmaB <- sigmaB
out$corr <- sigmaB_ #!!!!
rownames(out$params$Br) <- rownames(out$params$sigmaB) <- colnames(out$params$sigmaB) <- colnames(xb)
}
if(family == "binomial") out$link <- link;
out$row.eff <- row.eff
out$time <- timeo
out$start <- res
out$Power <- Power
pars <- optr$par
if(method=="VA" && num.lv>0){
param <- objr$env$last.par.best
Au <- param[names(param)=="Au"]
A <- array(0, dim=c(n, nlvr, nlvr))
for (d in 1:nlvr){
for(i in 1:n){
A[i,d,d] <- exp(Au[(d-1)*n+i]);
}
}
if(length(Au) > nlvr*n){
k <- 0;
for (c1 in 1:nlvr){
r <- c1 + 1;
while (r <= nlvr){
for(i in 1:n){
A[i,r,c1] <- Au[nlvr*n+k*n+i];
A[i,c1,r] <- A[i,r,c1];
}
k <- k+1; r <- r+1;
}
}
}
for(i in 1:n){
A[i,,] <- A[i,,]%*%t(A[i,,])
}
out$A <- A
}
if(method == "VA" && !is.null(randomX)){
Abb <- param[names(param) == "Abb"]
dr <- ncol(xb)
Ab <- array(0,dim=c(p,dr,dr))
for (d in 1:dr){
for(j in 1:p){
Ab[j,d,d] <- exp(Abb[(d-1)*p + j]);
}
}
if(length(Abb)>dr*p){
k <- 0;
for (c1 in 1:dr){
r <- c1+1;
while (r <= dr){
for(j in 1:p){
Ab[j,r,c1] <- Abb[dr*p+k*p+j];
Ab[j,c1,r] <- Ab[j,r,c1];
}
k <- k+1; r <- r+1;
}
}
}
for(j in 1:p){
Ab[j,,] <- Ab[j,,]%*%t(Ab[j,,])
}
out$Ab <- Ab
}
}
n.i <- n.i+1;
}
if(is.null(formula1)){ out$formula <- formula} else {out$formula <- formula1}
out$Xrandom <- xb
out$D <- Xd
out$TMBfn <- objrFinal
out$TMBfn$par <- optrFinal$par #ensure params in this fn take final values
out$convergence <- optrFinal$convergence == 0
out$quadratic <- quadratic
out$logL <- -out$logL
out$zeta.struc <- zeta.struc
out$beta0com <- beta0com
if(method == "VA"){
if(num.lv > 0) out$logL = out$logL + n*0.5*num.lv
if(row.eff == "random") out$logL = out$logL + n*0.5
if(!is.null(randomX)) out$logL = out$logL + p*0.5*ncol(xb)
if(family=="gaussian") {
out$logL <- out$logL - n*p*log(pi)/2
}
}
tr<-try({
if(sd.errors && is.finite(out$logL)) {
if(trace) cat("Calculating standard errors for parameters...\n")
out$TMB <- TRUE
# out <- c(out, se.gllvm(out))
if(method == "VA"){
sdr <- objrFinal$he(optrFinal$par)
}
if(method == "LA"){
sdr <- optimHess(optrFinal$par, objrFinal$fn, objrFinal$gr,control = list(reltol=reltol,maxit=maxit))
}
m <- dim(sdr)[1]; incl <- rep(TRUE,m); incld <- rep(FALSE,m)
incl[names(objrFinal$par)=="Abb"] <- FALSE;
incl[names(objrFinal$par)=="Au"] <- FALSE;
if(quadratic == FALSE){incl[names(objrFinal$par)=="lambda2"]<-FALSE}
if(nlvr > 0) incld[names(objrFinal$par)=="Au"] <- TRUE
if(beta0com){
incl[names(objrFinal$par)=="b"] <- FALSE
}
if(row.eff=="random") {
incl[names(objrFinal$par)=="r0"] <- FALSE; incld[names(objrFinal$par)=="r0"] <- FALSE
} else {
incl[names(objrFinal$par)=="log_sigma"] <- FALSE
}
if(row.eff==FALSE) incl[names(objrFinal$par)=="r0"] <- FALSE
if(row.eff=="fixed") incl[1] <- FALSE
if(is.null(randomX)) {
incl[names(objrFinal$par)%in%c("Br","sigmaB","sigmaij")] <- FALSE
} else {
incl[names(objrFinal$par)=="Abb"] <- FALSE; incld[names(objrFinal$par)=="Abb"] <- TRUE
incl[names(objrFinal$par)=="Br"] <- FALSE; incld[names(objrFinal$par)=="Br"] <- TRUE
if(NCOL(xb)==1) incl[names(objrFinal$par) == "sigmaij"] <- FALSE
}
incl[names(objrFinal$par)=="Au"] <- FALSE; if(num.lv>0) incld[names(objrFinal$par)=="Au"] <- TRUE
incl[names(objrFinal$par)=="u"] <- FALSE; incld[names(objrFinal$par)=="u"] <- TRUE
if(familyn==0 || familyn==2 || familyn==7 || familyn==8) incl[names(objrFinal$par)=="lg_phi"] <- FALSE
if(familyn!=7) incl[names(objrFinal$par)=="zeta"] <- FALSE
if(familyn==7) incl[names(objrFinal$par)=="zeta"] <- TRUE
if(nlvr==0){
incl[names(objrFinal$par)=="u"] <- FALSE;
incld[names(objrFinal$par)=="u"] <- FALSE;
incl[names(objrFinal$par)=="lambda"] <- FALSE;
incl[names(objrFinal$par)=="lambda2"] <- FALSE;
incl[names(objrFinal$par)=="Au"] <- FALSE;
}
if(method=="LA" || (num.lv==0 && (row.eff!="random" && is.null(randomX)))){
incl[names(objrFinal$par)=="Au"] <- FALSE;
covM <- try(MASS::ginv(sdr[incl,incl]))
se <- try(sqrt(diag(abs(covM))))
if(num.lv > 0 || row.eff == "random" || !is.null(randomX)) {
sd.random <- sdrandom(objrFinal, covM, incl)
prediction.errors <- list()
if(!is.null(randomX)){
prediction.errors$Br <- matrix(diag(as.matrix(sd.random))[1:(ncol(xb)*ncol(y))], ncol(xb), ncol(y));
sd.random <- sd.random[-(1:(ncol(xb)*ncol(y))),-(1:(ncol(xb)*ncol(y)))]
}
if(row.eff=="random"){
prediction.errors$row.params <- diag(as.matrix(sd.random))[1:n];
sd.random <- sd.random[-(1:n),-(1:n)]
}
if(num.lv > 0){
cov.lvs <- array(0, dim = c(n, num.lv, num.lv))
for (i in 1:n) {
cov.lvs[i,,] <- as.matrix(sd.random[(0:(num.lv-1)*n+i),(0:(num.lv-1)*n+i)])
}
prediction.errors$lvs <- cov.lvs
}
out$prediction.errors <- prediction.errors
}
} else {
A.mat <- sdr[incl,incl] # a x a
D.mat <- sdr[incld,incld] # d x d
B.mat <- sdr[incl,incld] # a x d
cov.mat.mod <- try(MASS::ginv(A.mat-B.mat%*%solve(D.mat)%*%t(B.mat)),silent=T)
se <- sqrt(diag(abs(cov.mat.mod)))
incla<-rep(FALSE, length(incl))
incla[names(objrFinal$par)=="u"] <- TRUE
out$Hess <- list(Hess.full=sdr, incla = incla, incl=incl, incld=incld, cov.mat.mod=cov.mat.mod)
}
if(row.eff=="fixed") {
se.row.params <- c(0,se[1:(n-1)]);
names(se.row.params) = rownames(out$y); se <- se[-(1:(n-1))]
}
if(beta0com){
se.beta0 <- se[1]; se <- se[-1];
} else {
se.beta0 <- se[1:p]; se <- se[-(1:p)];
}
se.B <- se[1:length(B)]; se <- se[-(1:length(B))];
if(num.lv>0) {
se.theta <- matrix(0,p,num.lv); se.theta[lower.tri(se.theta, diag = TRUE)]<-se[1:(p * num.lv - sum(0:(num.lv-1)))];
colnames(se.theta) <- paste("LV", 1:num.lv, sep="");
rownames(se.theta) <- colnames(out$y)
out$sd$theta <- se.theta; se <- se[-(1:(p * num.lv - sum(0:(num.lv-1))))];
# diag(out$sd$theta) <- diag(out$sd$theta)*diag(out$params$theta) !!!
if(quadratic==TRUE){
se.lambdas2 <- matrix(se[1:(p * num.lv)], p, num.lv, byrow = T)
colnames(se.lambdas2) <- paste("LV", 1:num.lv, "^2", sep = "")
se <- se[-(1:(num.lv*p))]
out$sd$theta <- cbind(out$sd$theta,se.lambdas2)
}else if(quadratic=="LV"){
se.lambdas2 <- matrix(se[1:num.lv], p, num.lv, byrow = T)
colnames(se.lambdas2) <- paste("LV", 1:num.lv, "^2", sep = "")
se <- se[-(1:num.lv)]
out$sd$theta <- cbind(out$sd$theta,se.lambdas2)
}
}
out$sd$beta0 <- se.beta0;
if(!beta0com){ names(out$sd$beta0) <- colnames(out$y);}
out$sd$B <- se.B; names(out$sd$B) <- colnames(Xd)
if(row.eff=="fixed") {out$sd$row.params <- se.row.params}
if(family %in% c("negative.binomial")) {
se.lphis <- se[1:p]; out$sd$inv.phi <- se.lphis*out$params$inv.phi;
out$sd$phi <- se.lphis*out$params$phi;
names(out$sd$inv.phi) <- names(out$sd$phi) <- colnames(y); se <- se[-(1:p)]
}
if(family %in% c("gaussian","tweedie","gamma")) {
se.lphis <- se[1:p];
out$sd$phi <- se.lphis*out$params$phi;
names(out$sd$phi) <- colnames(y); se <- se[-(1:p)]
}
if(family %in% c("ZIP")) {
se.phis <- se[1:p];
out$sd$phi <- se.phis*exp(lp0)/(1+exp(lp0))^2;#
names(out$sd$phi) <- colnames(y); se <- se[-(1:p)]
}
if(!is.null(randomX)){
nr <- ncol(xb)
out$sd$sigmaB <- se[1:ncol(xb)]*c(sqrt(diag(out$params$sigmaB)));
names(out$sd$sigmaB) <- c(paste("sd",colnames(xb),sep = "."))
se <- se[-(1:ncol(xb))]
if(nr>1){
out$sd$corrpar <- se[1:(nr*(nr-1)/2)]
se <- se[-(1:(nr*(nr-1)/2))]
}
}
if(row.eff=="random") {
out$sd$sigma <- se[1:length(out$params$sigma)]*c(out$params$sigma[1],rep(1,length(out$params$sigma)-1));
names(out$sd$sigma) <- "sigma";
se=se[-(1:(length(out$params$sigma)))]
}
if(family %in% c("ordinal")){
se.zetanew <- se.zetas <- se;
if(zeta.struc == "species"){
se.zetanew <- matrix(NA,nrow=p,ncol=K)
idx<-0
for(j in 1:ncol(y)){
k<-max(y[,j])-2
if(k>0){
for(l in 1:k){
se.zetanew[j,l+1]<-se.zetas[idx+l]
}
}
idx<-idx+k
}
se.zetanew[,1] <- 0
out$sd$zeta <- se.zetanew
row.names(out$sd$zeta) <- colnames(y00); colnames(out$sd$zeta) <- paste(min(y00):(max(y00)-1),"|",(min(y00)+1):max(y00),sep="")
}else{
se.zetanew <- c(0, se.zetanew)
out$sd$zeta <- se.zetanew
names(out$sd$zeta) <- paste(min(y00):(max(y00)-1),"|",(min(y00)+1):max(y00),sep="")
}
}
}
})
if(inherits(tr, "try-error")) { cat("Standard errors for parameters could not be calculated.\n") }
return(out)
}
| /R/TMBtrait.R | no_license | Raykova/gllvm | R | false | false | 45,711 | r | ########################################################################################
## GLLVM fourth corner model, with estimation done via Laplace and Variational approximation using TMB-package
## Original author: Jenni Niku
##########################################################################################
trait.TMB <- function(
y, X = NULL,TR=NULL,formula=NULL, num.lv = 2, family = "poisson",
Lambda.struc = "unstructured", Ab.struct = "unstructured", row.eff = FALSE, reltol = 1e-6, seed = NULL,
maxit = 1000, start.lvs = NULL, offset=NULL, sd.errors = FALSE,trace=FALSE,
link="logit",n.init=1,start.params=NULL,start0=FALSE,optimizer="optim",
starting.val="res",method="VA",randomX=NULL,Power=1.5,diag.iter=1, Ab.diag.iter = 0, dependent.row = FALSE,
Lambda.start=c(0.2, 0.5), jitter.var=0, yXT = NULL, scale.X = FALSE, randomX.start = "zero", beta0com = FALSE
,zeta.struc = "species", quad.start=0.01, start.struc="LV",quadratic=FALSE) {
if(is.null(X) && !is.null(TR)) stop("Unable to fit a model that includes only trait covariates")
if(!is.null(start.params)) starting.val <- "zero"
objrFinal <- optrFinal <- NULL
term <- NULL
n <- dim(y)[1]; p <- dim(y)[2];
y <- as.data.frame(y)
formula1 <- formula
beta0com0 = beta0com
if(method=="VA"){ link <- "probit"}
jitter.var.r <- 0
if(length(jitter.var)>1){
jitter.var.r <- jitter.var[2]
jitter.var <- jitter.var[1]
}
if(NCOL(X) < 1) stop("No covariates in the model, fit the model using gllvm(y,family=",family,"...)")
# change categorical variables to dummy variables
num.X <- 0
X.new <- NULL
if(!is.null(X)) {
num.X <- dim(X)[2]
for (i in 1:num.X) {
if(!is.factor(X[,i])) {
if(length(unique(X[,i]))>2){ Xi <- scale(X[,i], scale = scale.X, center = scale.X) } else { Xi <- X[,i] }
X[,i] <- Xi
X.new <- cbind(X.new,Xi); if(!is.null(colnames(X)[i])) colnames(X.new)[dim(X.new)[2]] <- colnames(X)[i]
} else {
dum <- model.matrix( ~ X[,i])
dum <- as.matrix(dum[, !(colnames(dum) %in% c("(Intercept)"))])
colnames(dum) <- paste(colnames(X)[i], levels(X[,i])[ - 1], sep = "")
X.new <- cbind(X.new, dum)
}
}
X.new <- data.frame(X.new);
}
num.T <- 0
T.new <- NULL
if(!is.null(TR)) {
num.T <- dim(TR)[2]
T.new <- matrix(0, p, 0)
if(num.T > 0){
for (i in 1 : num.T) {
#if(!is.factor(TR[,i]) && length(unique(TR[,i])) > 2) { #!!!
if(is.numeric(TR[,i]) && length(unique(TR[,i])) > 2) {
TR[,i] <- scale(TR[,i])
T.new <- cbind(T.new,scale(TR[,i], scale = scale.X, center = scale.X)); colnames(T.new)[dim(T.new)[2]] <- colnames(TR)[i]
} else {
if(!is.factor(TR[,i])) TR[,i] <- factor(TR[,i]) #!!!
dum <- model.matrix(~TR[,i]-1)
colnames(dum) <- paste(colnames(TR)[i],levels(TR[,i]),sep="")
T.new <- cbind(T.new,dum)
}
}
T.new <- data.matrix(T.new);
}
}
if(is.null(formula)){
n1 <- colnames(X)
n2 <- colnames(TR)
form1 <- paste("",n1[1],sep = "")
if(length(n1)>1){
for(i1 in 2:length(n1)){
form1 <- paste(form1,n1[i1],sep = "+")
}}
formula <- paste("y~",form1,sep = "")
formula <- paste(formula, form1,sep = " + (")
formula <- paste(formula, ") : (", sep = "")
formula <- paste(formula, n2[1], sep = "")
if(length(n2) > 1){
for(i2 in 2:length(n2)){
formula <- paste(formula, n2[i2], sep = "+")
}}
formula1 <- paste(formula, ")", sep = "")
formula <- formula(formula1)
}
if(!is.null(X) || !is.null(TR)){
yX <- cbind(cbind(X,id = 1:nrow(y))[rep(1:nrow(X), times=ncol(y)),], time = rep(1:ncol(y), each= nrow(y)), y = c(as.matrix(y))) #reshape(data.frame(cbind(y, X)), direction = "long", varying = colnames(y), v.names = "y")
TR2 <- data.frame(time = 1:p, TR)
if(is.null(yXT)){
yXT <- merge(yX, TR2, by = "time")
}
data <- yXT
m1 <- model.frame(formula, data = data)
term <- terms(m1)
Xd <- as.matrix(model.matrix(formula, data = data))
nXd <- colnames(Xd)
Xd <- as.matrix(Xd[, !(nXd %in% c("(Intercept)"))])
colnames(Xd) <- nXd[!(nXd %in% c("(Intercept)"))]
if(!is.null(X.new)) fx <- apply(matrix(sapply(colnames(X.new), function(x){grepl(x, colnames(Xd))}), ncol(Xd), ncol(X.new)), 2, any)
ft <- NULL;
if(NCOL(T.new) > 0) {
ft <- apply(matrix(sapply(colnames(T.new), function(x){ grepl(x, colnames(Xd)) }), ncol(Xd), ncol(T.new)), 2, any)
}
X1 <- as.matrix(X.new[,fx]);
TR1 <- as.matrix(T.new[,ft]);
colnames(X1) <- colnames(X.new)[fx]; colnames(TR1)<-colnames(T.new)[ft];
nxd <- colnames(Xd)
formulab <- paste("~",nxd[1],sep = "");
if(length(nxd)>1) for(i in 2:length(nxd)) formulab <- paste(formulab,nxd[i],sep = "+")
formula1 <- formulab
}
if(!(family %in% c("poisson","negative.binomial","binomial","tweedie","ZIP", "gaussian", "ordinal", "gamma", "exponential")))
stop("Selected family not permitted...sorry!")
if(!(Lambda.struc %in% c("unstructured","diagonal")))
stop("Lambda matrix (covariance of variational distribution for latent variable) not permitted...sorry!")
if(num.lv == 1) Lambda.struc <- "diagonal" ## Prevents it going to "unstructured" loops and causing chaos
trial.size <- 1
y <- as.matrix(y)
if(!is.numeric(y)) stop("y must a numeric. If ordinal data, please convert to numeric with lowest level equal to 1. Thanks")
if(family == "ordinal") {
y00<-y
if(min(y)==0){ y=y+1}
max.levels <- apply(y,2,function(x) length(min(x):max(x)))
if(any(max.levels == 1) || all(max.levels == 2))
stop("Ordinal data requires all columns to have at least has two levels. If all columns only have two levels, please use family == binomial instead. Thanks")
if(any(!apply(y,2,function(x)all(diff(sort(unique(x)))==1)))&zeta.struc=="species")
stop("Can't fit ordinal model if there are species with missing classes. Please reclassify per species or use zeta.struc = `common` ")
if(any(diff(sort(unique(c(y))))!=1)&zeta.struc=="common")
stop("Can't fit ordinal model if there are missing classes. Please reclassify.")
}
if(is.null(rownames(y))) rownames(y) <- paste("Row",1:n,sep="")
if(is.null(colnames(y))) colnames(y) <- paste("Col",1:p,sep="")
if(!is.null(X)) { if(is.null(colnames(X))) colnames(X) <- paste("x",1:ncol(X),sep="") }
out <- list(y = y, X = X1, TR = TR1, num.lv = num.lv, row.eff = row.eff, logL = Inf, family = family, offset=offset,randomX=randomX,X.design=Xd,terms=term, method = method)
if(is.null(formula) && is.null(X) && is.null(TR)){formula ="~ 1"}
n.i <- 1;
if(n.init > 1) seed <- sample(1:10000, n.init)
while(n.i <= n.init){
randomXb <- NULL
if(!is.null(randomX)){
#
if(num.lv>0 && randomX.start == "res" && starting.val == "res") {randomXb <- randomX}
#
xb <- as.matrix(model.matrix(randomX, data = data.frame(X)))
rnam <- colnames(xb)[!(colnames(xb) %in% c("(Intercept)"))]
xb <- as.matrix(xb[, rnam]); #as.matrix(X.new[, rnam])
if(NCOL(xb) == 1) colnames(xb) <- rnam
bstart <- start.values.randomX(y, xb, family, starting.val = randomX.start, power = Power)
Br <- bstart$Br
sigmaB <- bstart$sigmaB
sigmaij <- rep(0,(ncol(xb)-1)*ncol(xb)/2)
# method <- "LA"
# xb <- as.matrix(model.matrix(randomX,data = X.new))
# xb <- as.matrix(xb[,!(colnames(xb) %in% c("(Intercept)"))])
# Br <- matrix(0, ncol(xb), p)
# sigmaB <- diag(ncol(xb))
} else {
xb <- Br <- matrix(0); sigmaB <- diag(1); sigmaij <- 0; Abb <- 0
}
num.X <- dim(X)[2]
num.T <- dim(TR)[2]
phi<-phis <- NULL
sigma <- 1
phi <- phis <- NULL;
if(n.init > 1 && trace) cat("initial run ",n.i,"\n");
res <- start.values.gllvm.TMB(y = y, X = X1, TR = TR1, family = family, offset=offset, trial.size = trial.size, num.lv = num.lv, start.lvs = start.lvs, seed = seed[n.i],starting.val=starting.val,power=Power,formula = formula, jitter.var=jitter.var, #!!!
yXT=yXT, row.eff = row.eff, TMB=TRUE, link=link, randomX=randomXb, beta0com = beta0com0, zeta.struc = zeta.struc)
if(is.null(start.params)){
beta0 <- res$params[,1]
# common env params or different env response for each spp
B <- NULL
if(!is.null(TR) && !is.null(X)) {
B <- c(res$B)[1:ncol(Xd)]
if(any(is.na(B))) B[is.na(B)] <- 0
}
row.params <- NULL;
if(row.eff!=FALSE){
row.params <- res$row.params
if (row.eff == "random") {
sigma <- sd(row.params);
}
}
vameans <- theta <- lambda <- NULL
if(num.lv > 0) {
if(!is.null(randomXb) && family != "ordinal"){
Br <- res$Br
sigmaB <- (res$sigmaB)
if(length(sigmaB)>1) sigmaij <- rep(0,length(res$sigmaij))
if(randomX.start == "res" && !is.null(res$fitstart)) { ##!!!
res$sigmaij <- sigmaij <- res$fitstart$TMBfnpar[names(res$fitstart$TMBfnpar) == "sigmaij"]
}
}
if(start.struc=="LV"&quadratic!=FALSE){
lambda2 <- matrix(quad.start, ncol = num.lv, nrow = 1)
}else if(start.struc=="all"&quadratic!=FALSE){
lambda2 <- matrix(quad.start, ncol = num.lv, nrow = p)
}else if(quadratic==FALSE){
lambda2 <- 0
}
if(quadratic != FALSE){
res$params <- cbind(res$params, matrix(lambda2,nrow=p,ncol=num.lv))
}else{
res$params <- res$params
}
vameans <- res$index
theta <- as.matrix(res$params[,(ncol(res$params) - num.lv + 1):ncol(res$params)])#fts$coef$theta#
theta[upper.tri(theta)] <- 0
if(Lambda.struc == "unstructured") {
lambda <- array(NA,dim=c(n,num.lv,num.lv))
for(i in 1:n) { lambda[i,,] <- diag(rep(1,num.lv)) }
}
if(Lambda.struc == "diagonal") {
lambda <- matrix(1,n,num.lv)
}
zero.cons <- which(theta == 0)
if(n.init > 1 && !is.null(res$mu) && starting.val == "res" && family != "tweedie") {
if(family=="ZIP") {
lastart <- FAstart(res$mu, family="poisson", y=y, num.lv = num.lv, jitter.var = jitter.var[1])
} else {
lastart <- FAstart(res$mu, family=family, y=y, num.lv = num.lv, phis = res$phi, jitter.var = jitter.var[1])
}
theta <- lastart$gamma#/lastart$gamma
vameans<-lastart$index#/max(lastart$index)
}
}
} else{
if(all(dim(start.params$y)==dim(y)) && is.null(X)==is.null(start.params$X) && is.null(T)==is.null(start.params$TR) && row.eff == start.params$row.eff){
beta0 <- start.params$params$beta0
# common env params or different env response for each spp
B <- NULL
if(!is.null(TR) && !is.null(X)) {
B <- start.params$params$B;
}
fourth <- inter <- NULL; if(!is.null(TR) ) inter <- start.params$params$fourth # let's treat this as a vector (vec(B'))'
vameans <- theta <- lambda <- NULL
row.params <- NULL
if(row.eff %in% c("fixed","random",TRUE)) {
if(row.eff == start.params$row.eff){
res$row.params <- row.params <- start.params$params$row.params
if(row.eff %in% c("random")) res$sigma <- sigma <- start.params$params$sigma
} else {
row.params <- res$row.params
}
}
if(num.lv > 0) {
theta <- (start.params$params$theta) ## LV coefficients
vameans <- matrix(start.params$lvs, ncol = num.lv);
lambda <- start.params$A
if(class(start.params)[2]=="gllvm.quadratic" && quadratic != FALSE){
lambda2 <- start.params$params$theta[,-c(1:start.params$num.lv),drop=F]
}else if(class(start.params)[1]=="gllvm" && quadratic != FALSE){
if(start.struc=="LV"|quadratic=="LV"){
lambda2 <- matrix(quad.start, ncol = num.lv, nrow = 1)
}else if(start.struc=="all"&quadratic=="all"){
lambda2 <- matrix(quad.start, ncol = num.lv, nrow = p)
}
}
}
if(family == "negative.binomial" && start.params$family == "negative.binomial" && !is.null(start.params$params$phi)) {res$phi<-start.params$params$phi}
} else { stop("Model which is set as starting parameters isn't the suitable you are trying to fit. Check that attributes y, X, TR and row.eff match to each other.");}
}
if (is.null(offset)) offset <- matrix(0, nrow = n, ncol = p)
if(family == "negative.binomial") {
phis <- res$phi
if (any(phis > 10))
phis[phis > 50] <- 50
if (any(phis < 0.02))
phis[phis < 0.02] <- 0.02
res$phi <- phis
phis <- 1/phis
}
if(family == "tweedie") {
phis <- res$phi;
if(any(phis>10)) phis[phis>10]=10;
if(any(phis<0.10))phis[phis<0.10]=0.10;
phis= (phis)
}
if (family == "ZIP") {
phis <- (colMeans(y == 0) * 0.98) + 0.01;
phis <- phis / (1 - phis)
} # ZIP probability
# if (family %in% c("gaussian", "gamma")) {
# phis <- res$phi
# }
if(family=="ordinal"){
K = max(y00)-min(y00)
if(zeta.struc=="species"){
zeta <- c(t(res$zeta[,-1]))
zeta <- zeta[!is.na(zeta)]
}else{
zeta <- res$zeta[-1]
}
}else{
zeta = 0
}
if(jitter.var.r>0){
if(row.eff == "random") row.params <- row.params + rnorm(n, 0, sd = sqrt(jitter.var.r));
if(!is.null(randomX)) Br <- Br + t(mvtnorm::rmvnorm(p, rep(0, nrow(Br)),diag(nrow(Br))*jitter.var.r));
}
q <- num.lv
a <- c(beta0)
if(num.lv > 0) {
# diag(theta) <- log(diag(theta)) # !!!
theta <- theta[lower.tri(theta, diag = TRUE)]
u <- vameans
}
if(!is.null(phis)) {phi=(phis)} else {phi <- rep(1,p)}
q <- num.lv
if(!is.null(row.params)){ r0 <- row.params} else {r0 <- rep(0, n)}
if(row.eff == "random"){ nlvr<-num.lv+1 } else {nlvr=num.lv}
if(row.eff=="fixed"){xr <- matrix(1,1,p)} else {xr <- matrix(0,1,p)}
# set starting values for variational distribution covariances
if(nlvr > 0){
if(Lambda.struc=="diagonal" || diag.iter>0){
Au <- log(rep(Lambda.start[1],nlvr*n)) #
} else{
Au <- c(log(rep(Lambda.start[1],nlvr*n)),rep(0,nlvr*(nlvr-1)/2*n))
}
} else { Au <- 0}
if(length(Lambda.start)<2){ Ar <- rep(1,n)} else {Ar <- rep(Lambda.start[2],n)}
if(!is.null(randomX)){
if(length(Lambda.start)>2) {
a.var <- Lambda.start[3];
} else {a.var <- 0.5;}
if(randomX.start == "res"){ # !!!! && !is.null(res$fitstart$Ab)
if(Ab.struct == "diagonal" || Ab.diag.iter>0){
Abb <- c(log(c(apply(res$fitstart$Ab,1, diag))))
} else {
Abb <- c(log(c(apply(res$fitstart$Ab,1, diag))), rep(0, ncol(xb) * (ncol(xb) - 1) / 2 * p))
}
res$Br <- Br
res$Ab <- c(apply(res$fitstart$Ab,1, diag))
} else{ #!!!
if(Ab.struct == "diagonal" || Ab.diag.iter>0){
Abb <- c(log(rep(a.var, ncol(xb) * p)))
} else {
Abb <- c(log(rep(a.var, ncol(xb) * p)), rep(0, ncol(xb) * (ncol(xb) - 1) / 2 * p))
}
} #!!!
} else { Abb <- 0 }
optr<-NULL
timeo<-NULL
se <- NULL
map.list <- list()
# if(row.eff==FALSE) map.list$r0 <- factor(rep(NA,n))
if(family %in% c("poisson","binomial","ordinal","exponential")) map.list$lg_phi <- factor(rep(NA,p))
if(family != "ordinal") map.list$zeta <- factor(NA)
randoml=c(0,0)
# For Laplace method, specify random paramteters to randomp
randomp= NULL #c("u","Br")
if(num.lv>0 || row.eff == "random") {randomp <- c(randomp,"u")}
# family settings
extra <- c(0,1)
if(family == "poisson") { familyn=0}
if(family == "negative.binomial") { familyn=1}
if(family == "binomial") {
familyn <- 2;
if(link=="probit") extra[1] <- 1
}
if(family == "gaussian") {familyn=3}
if(family == "gamma") {familyn=4}
if(family == "tweedie"){ familyn <- 5; extra[1] <- Power}
if(family == "ZIP"){ familyn <- 6;}
if(family == "ordinal") {familyn=7}
if(family == "exponential") {familyn=8}
if(beta0com){
extra[2] <- 0
Xd<-cbind(1,Xd)
a <- a*0
B<-c(mean(a),B)
}
# Specify parameter list, data.list and map.list
if(!is.null(randomX)){
randoml[2]=1
randomp <- c(randomp,"Br")
res$Br <- Br
res$sigmaB <- sigmaB
} else {
map.list$Br = factor(NA)
map.list$sigmaB = factor(NA)
map.list$sigmaij = factor(NA)
map.list$Abb = factor(NA)
}
if(quadratic==FALSE){
map.list$lambda2 <- factor(NA)
}
if(row.eff=="random"){
randoml[1] <- 1
if(dependent.row) sigma<-c(sigma[1], rep(0, num.lv))
if(num.lv>0){
u<-cbind(r0,u)
} else {
u<-cbind(r0)
}
} else {
sigma=0
map.list$log_sigma <- factor(NA)
}
if(num.lv==0) {
theta = 0;
lambda2 <- 0
map.list$lambda = factor(NA)
map.list$lambda2 = factor(NA)
if(row.eff != "random") {
u = matrix(0)
map.list$u = factor(NA)
map.list$Au = factor(NA)
}
}
if(starting.val!="zero" && start.struc != "LV" && quadratic == TRUE && num.lv>0 && method == "VA"){
map.list2 <- map.list
map.list2$r0 = factor(rep(NA, length(r0)))
map.list2$b = factor(rep(NA, length(rbind(a))))
map.list2$B = factor(rep(NA, length(B)))
map.list2$Br = factor(rep(NA,length(Br)))
map.list2$lambda = factor(rep(NA, length(theta)))
map.list2$u = factor(rep(NA, length(u)))
map.list2$lg_phi = factor(rep(NA, length(phi)))
map.list2$sigmaB = factor(rep(NA,length(sigmaB)))
map.list2$sigmaij = factor(rep(NA,length(sigmaij)))
map.list2$log_sigma = factor(rep(NA, length(sigma)))
map.list2$Au = factor(rep(NA, length(Au)))
map.list2$Abb = factor(rep(NA, length(Abb)))
map.list2$zeta = factor(rep(NA, length(zeta)))
parameter.list = list(r0=matrix(r0), b = rbind(a), B=matrix(B), Br=Br, lambda = theta, lambda2 = t(lambda2), u = u, lg_phi=log(phi), sigmaB=log(sqrt(diag(sigmaB))), sigmaij=sigmaij, log_sigma=c(sigma), Au=Au, Abb=Abb, zeta=zeta)
objr <- TMB::MakeADFun(
data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, quadratic = 1, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0)), silent=!trace,
parameters = parameter.list, map = map.list2,
inner.control=list(mgcmax = 1e+200,maxit = maxit),
DLL = "gllvm")
if(optimizer=="nlminb") {
timeo <- system.time(optr <- try(nlminb(objr$par, objr$fn, objr$gr,control = list(rel.tol=reltol,iter.max=maxit,eval.max=maxit)),silent = TRUE))
}
if(optimizer=="optim") {
timeo <- system.time(optr <- try(optim(objr$par, objr$fn, objr$gr,method = "BFGS",control = list(reltol=reltol,maxit=maxit),hessian = FALSE),silent = TRUE))
}
lambda2 <- matrix(optr$par, byrow = T, ncol = num.lv, nrow = p)
if(inherits(optr,"try-error")) warning(optr[1]);
}
# Call makeADFun
if(method == "VA" && (num.lv>0 || row.eff=="random" || !is.null(randomX))){
parameter.list = list(r0=matrix(r0), b = rbind(a), B=matrix(B), Br=Br, lambda = theta, lambda2 = t(lambda2), u = u, lg_phi=log(phi), sigmaB=log(sqrt(diag(sigmaB))), sigmaij=sigmaij, log_sigma=c(sigma), Au=Au, Abb=Abb, zeta=zeta)
objr <- TMB::MakeADFun(
data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, quadratic = ifelse(quadratic!=FALSE,1,0), family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0)), silent=!trace,
parameters = parameter.list, map = map.list,
inner.control=list(mgcmax = 1e+200,maxit = maxit),
DLL = "gllvm")
} else {
Au=0; Abb=0
map.list$Au <- map.list$Abb <- factor(NA)
parameter.list = list(r0=matrix(r0), b = rbind(a), B=matrix(B), Br=Br, lambda = theta, lambda2 = t(lambda2), u = u, lg_phi=log(phi), sigmaB=log(sqrt(diag(sigmaB))), sigmaij=sigmaij, log_sigma=c(sigma), Au=Au, Abb=Abb, zeta=zeta)
objr <- TMB::MakeADFun(
data = list(y = y, x = Xd,xr=xr, xb=xb, offset=offset, num_lv = num.lv, quadratic = 0, family=familyn,extra=extra,method=1,model=1,random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0)), silent=!trace,
parameters = parameter.list, map = map.list,
inner.control=list(mgcmax = 1e+200,maxit = maxit,tol10=0.01),
random = randomp, DLL = "gllvm")
}
if(optimizer=="nlminb") {
timeo <- system.time(optr <- try(nlminb(objr$par, objr$fn, objr$gr,control = list(rel.tol=reltol,iter.max=maxit,eval.max=maxit)),silent = TRUE))
}
if(optimizer=="optim") {
timeo <- system.time(optr <- try(optim(objr$par, objr$fn, objr$gr,method = "BFGS",control = list(reltol=reltol,maxit=maxit),hessian = FALSE),silent = TRUE))
}
if(inherits(optr,"try-error")) warning(optr[1]);
if(diag.iter>0 && Lambda.struc=="unstructured" && method =="VA" && (nlvr>0 || !is.null(randomX)) && !inherits(optr,"try-error")){
objr1 <- objr
optr1 <- optr
param1 <- optr$par
nam <- names(param1)
r1 <- matrix(param1[nam=="r0"])
b1 <- rbind(param1[nam=="b"])
B1 <- matrix(param1[nam=="B"])
if(!is.null(randomX)) {
Br1 <- matrix(param1[nam=="Br"], ncol(xb), p) #!!!
sigmaB1 <- param1[nam=="sigmaB"]
sigmaij1 <- param1[nam=="sigmaij"]*0
Abb <- param1[nam=="Abb"]
if(Ab.diag.iter>0 && Ab.struct == "unstructured")
Abb <- c(Abb, rep(0,ncol(xb)*(ncol(xb)-1)/2*p))
} else {
Br1 <- Br
sigmaB1 <- sigmaB
sigmaij1 <- sigmaij
}
if(nlvr>0) {
lambda1 <- param1[nam=="lambda"];
u1 <- matrix(param1[nam=="u"],n,nlvr)
Au<- c(pmax(param1[nam=="Au"],rep(log(1e-6), nlvr*n)), rep(0,nlvr*(nlvr-1)/2*n))
if (quadratic=="LV" | quadratic == T && start.struc == "LV"){
lambda2 <- matrix(param1[nam == "lambda2"], byrow = T, ncol = num.lv, nrow = 1)#In this scenario we have estimated two quadratic coefficients before
}else if(quadratic == T){
lambda2 <- matrix(param1[nam == "lambda2"], byrow = T, ncol = num.lv, nrow = p)
}
} else {u1 <- u}
if(num.lv==0) {lambda1 <- 0; }
if(family %in% c("poisson","binomial","ordinal","exponential")){ lg_phi1 <- log(phi)} else {lg_phi1 <- param1[nam=="lg_phi"]}
if(row.eff == "random"){lg_sigma1 <- param1[nam=="log_sigma"]} else {lg_sigma1 = 0}
if(family == "ordinal"){ zeta <- param1[nam=="zeta"] } else { zeta <- 0 }
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au1, Abb=Abb1, zeta=zeta)
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = 0,u = matrix(0), lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=0, Abb=Abb1, zeta=zeta)
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=0, Au=Au1, Abb=Abb1, zeta=zeta)
parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, lambda2 = t(lambda2), u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au, Abb=Abb, zeta=zeta)
# if(nlvr>0 || !is.null(randomX)){
# if(nlvr>0){
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au1, Abb=Abb1, zeta=zeta)
# } else {
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br, lambda = 0,u = matrix(0), lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=0, Abb=Abb1, zeta=zeta)
# }
# } else {
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=0, Au=Au1, Abb=Abb1, zeta=zeta)
# }
data.list = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, quadratic = ifelse(quadratic!=FALSE&num.lv>0,1,0), family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
objr <- TMB::MakeADFun(
data = data.list, silent=!trace,
parameters = parameter.list, map = map.list,
inner.control=list(mgcmax = 1e+200,maxit = 1000),
DLL = "gllvm")
if(optimizer=="nlminb") {
timeo <- system.time(optr <- try(nlminb(objr$par, objr$fn, objr$gr,control = list(rel.tol=reltol,iter.max=maxit,eval.max=maxit)),silent = TRUE))
}
if(optimizer=="optim") {
timeo <- system.time(optr <- try(optim(objr$par, objr$fn, objr$gr,method = "BFGS",control = list(reltol=reltol,maxit=maxit),hessian = FALSE),silent = TRUE))
}
if(inherits(optr, "try-error")){optr <- optr1; objr <- objr1; Lambda.struc <- "diagonal"}
}
if(!inherits(optr,"try-error") && start.struc=="LV" && quadratic == TRUE && method == "VA"){
objr1 <- objr
optr1 <- optr
param1 <- optr$par
nam <- names(param1)
r1 <- matrix(param1[nam=="r0"])
b1 <- rbind(param1[nam=="b"])
B1 <- matrix(param1[nam=="B"])
if(!is.null(randomX)) {
Br1 <- matrix(param1[nam=="Br"], ncol(xb), p) #!!!
sigmaB1 <- param1[nam=="sigmaB"]
sigmaij1 <- param1[nam=="sigmaij"]*0
Abb <- param1[nam=="Abb"]
if(Ab.diag.iter>0 && Ab.struct == "unstructured")
Abb <- c(Abb, rep(0,ncol(xb)*(ncol(xb)-1)/2*p))
} else {
Br1 <- Br
sigmaB1 <- sigmaB
sigmaij1 <- sigmaij
}
lambda1 <- param1[nam=="lambda"];
u1 <- matrix(param1[nam=="u"],n,nlvr)
Au<- param1[nam=="Au"]
lambda2 <- abs(matrix(param1[nam == "lambda2"], byrow = T, ncol = num.lv, nrow = p))
if(family %in% c("poisson","binomial","ordinal","exponential")){ lg_phi1 <- log(phi)} else {lg_phi1 <- param1[nam=="lg_phi"]}
if(row.eff == "random"){lg_sigma1 <- param1[nam=="log_sigma"]} else {lg_sigma1 = 0}
if(family == "ordinal"){ zeta <- param1[nam=="zeta"] } else { zeta <- 0 }
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# data = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au1, Abb=Abb1, zeta=zeta)
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = 0,u = matrix(0), lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=0, Abb=Abb1, zeta=zeta)
# parameters = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=0, Au=Au1, Abb=Abb1, zeta=zeta)
parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, lambda2 = t(lambda2), u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au, Abb=Abb, zeta=zeta)
# if(nlvr>0 || !is.null(randomX)){
# if(nlvr>0){
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=Au1, Abb=Abb1, zeta=zeta)
# } else {
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br, lambda = 0,u = matrix(0), lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=lg_sigma1, Au=0, Abb=Abb1, zeta=zeta)
# }
# } else {
# parameter.list = list(r0=r1, b = b1, B=B1, Br=Br1, lambda = lambda1, u = u1, lg_phi=lg_phi1, sigmaB=sigmaB1, sigmaij=sigmaij1, log_sigma=0, Au=Au1, Abb=Abb1, zeta=zeta)
# }
data.list = list(y = y, x = Xd, xr=xr, xb=xb, offset=offset, num_lv = num.lv, quadratic = 1, family=familyn, extra=extra, method=0, model=1, random=randoml, zetastruc = ifelse(zeta.struc=="species",1,0))
objr <- TMB::MakeADFun(
data = data.list, silent=!trace,
parameters = parameter.list, map = map.list,
inner.control=list(mgcmax = 1e+200,maxit = 1000),
DLL = "gllvm")
if(optimizer=="nlminb") {
timeo <- system.time(optr <- try(nlminb(objr$par, objr$fn, objr$gr,control = list(rel.tol=reltol,iter.max=maxit,eval.max=maxit)),silent = TRUE))
}
if(optimizer=="optim") {
timeo <- system.time(optr <- try(optim(objr$par, objr$fn, objr$gr,method = "BFGS",control = list(reltol=reltol,maxit=maxit),hessian = FALSE),silent = TRUE))
}
#quick check to see if something actually happened
flag <- 1
if(all(round(lambda2,0)==round(matrix(abs(optr$par[names(optr$par)=="lambda2"]),byrow=T,ncol=num.lv,nrow=p),0))){
flag <- 0
warning("Full quadratic model did not properly converge or all quadratic coefficients are close to zero. Try changing 'start.struc' in 'control.start'. /n")
}
if(inherits(optr, "try-error") || flag == 0){optr <- optr1; objr <- objr1; quadratic <- "LV";}
}
param <- objr$env$last.par.best
if(family %in% c("negative.binomial", "tweedie", "gaussian", "gamma")) {
phis=exp(param[names(param)=="lg_phi"])
}
if(family=="ZIP") {
lp0 <- param[names(param)=="lg_phi"]; out$lp0=lp0
phis <- exp(lp0)/(1+exp(lp0));#log(phis); #
}
if(family == "ordinal"){
zetas <- param[names(param)=="zeta"]
if(zeta.struc=="species"){
zetanew <- matrix(NA,nrow=p,ncol=K)
idx<-0
for(j in 1:ncol(y)){
k<-max(y[,j])-2
if(k>0){
for(l in 1:k){
zetanew[j,l+1]<-zetas[idx+l]
}
}
idx<-idx+k
}
zetanew[,1] <- 0
row.names(zetanew) <- colnames(y00); colnames(zetanew) <- paste(min(y):(max(y00)-1),"|",(min(y00)+1):max(y00),sep="")
}else{
zetanew <- c(0,zetas)
names(zetanew) <- paste(min(y00):(max(y00)-1),"|",(min(y00)+1):max(y00),sep="")
}
zetas<-zetanew
out$y<-y00
}
bi<-names(param)=="b"
Bi<-names(param)=="B"
li<-names(param)=="lambda"
li2 <- names(param)=="lambda2"
ui<-names(param)=="u"
if(nlvr > 0){
lvs <- (matrix(param[ui],n,nlvr))
theta <- matrix(0,p,num.lv)
if(p>1) {
theta[lower.tri(theta,diag=TRUE)] <- param[li];
if(quadratic!=FALSE){
theta<-cbind(theta,matrix(-abs(param[li2]),ncol=num.lv,nrow=p,byrow=T))
}
} else {theta <- c(param[li],-abs(param[li2]))}
# diag(theta) <- exp(diag(theta))#!!!
}
if(row.eff!=FALSE) {
ri <- names(param)=="r0"
if(method=="LA" || row.eff=="random"){ row.params=param[ri] } else {row.params <- param[ri]}
if(row.eff=="random") {
row.params <- lvs[,1]; lvs<- as.matrix(lvs[,-1])
sigma<-exp(param["log_sigma"])[1]
if(nlvr>1 && dependent.row) sigma <- c(exp(param[names(param)=="log_sigma"])[1],(param[names(param)=="log_sigma"])[-1])
}
}
if(!is.null(randomX)){
Bri <- names(param)=="Br"
Br <- matrix(param[Bri],ncol(xb),p)
Sri <- names(param)=="sigmaB"
L <- diag(ncol(xb))
if(ncol(xb)>1){
sigmaB <- diag(exp(param[Sri]))
Srij <- names(param)=="sigmaij"
Sr <- param[Srij]
L[upper.tri(L)] <- Sr
D <- diag(diag(t(L)%*%L))
} else{
D <- 1
sigmaB <- (exp(param[Sri]))
}
sigmaB_ <- solve(sqrt(D))%*%(t(L)%*%L)%*%solve(sqrt(D))
sigmaB <- sigmaB%*%sigmaB_%*%t(sigmaB)
}
beta0 <- param[bi]
B <- param[Bi]
if(beta0com){
beta0=B[1]
B = B[-1]
cn<-colnames(Xd)
Xd<-as.matrix(Xd[,-1])
colnames(Xd)<-cn[-1]
}
new.loglik<-objr$env$value.best[1]
if((n.i==1 || out$logL > abs(new.loglik)) && is.finite(new.loglik) && !inherits(optr, "try-error") && new.loglik>0){ #
objrFinal<-objr1 <- objr; optrFinal<-optr1 <- optr;
out$logL <- new.loglik
if(num.lv > 0) {
out$lvs <- lvs
out$params$theta <- theta
rownames(out$lvs) <- rownames(out$y);
rownames(out$params$theta) <- colnames(out$y)
if(quadratic==FALSE)colnames(out$params$theta) <- colnames(out$lvs) <- paste("LV", 1:num.lv, sep="");
if(quadratic!=FALSE){
colnames(out$lvs) <- paste("LV", 1:num.lv, sep="");
colnames(out$params$theta)<- c(paste("LV", 1:num.lv, sep=""),paste("LV", 1:num.lv, "^2",sep=""));
}
}
if(!beta0com) names(beta0) <- colnames(out$y);
if(beta0com) names(beta0) <- "Community intercept";
out$params$beta0 <- beta0;
out$params$B <- B; names(out$params$B)=colnames(Xd)
if(row.eff!=FALSE) {
if(row.eff=="random"){
out$params$sigma <- sigma;
names(out$params$sigma) <- "sigma"
if(num.lv>0 && dependent.row) names(out$params$sigma) <- paste("sigma",c("",1:num.lv), sep = "")
}
out$params$row.params <- row.params;
names(out$params$row.params) <- rownames(out$y)
}
if(family %in% c("negative.binomial")) {
out$params$phi <- 1/phis; names(out$params$phi) <- colnames(out$y);
out$params$inv.phi <- phis; names(out$params$inv.phi) <- colnames(out$y);
}
if(family %in% c("gaussian","tweedie","gamma")) {
out$params$phi <- phis; names(out$params$phi) <- colnames(out$y);
}
if(family =="ZIP") {
out$params$phi <- phis; names(out$params$phi) <- colnames(out$y);
}
if (family == "ordinal") {
out$params$zeta <- zetas
}
if(!is.null(randomX)){
out$params$Br <- Br
out$params$sigmaB <- sigmaB
out$corr <- sigmaB_ #!!!!
rownames(out$params$Br) <- rownames(out$params$sigmaB) <- colnames(out$params$sigmaB) <- colnames(xb)
}
if(family == "binomial") out$link <- link;
out$row.eff <- row.eff
out$time <- timeo
out$start <- res
out$Power <- Power
pars <- optr$par
if(method=="VA" && num.lv>0){
param <- objr$env$last.par.best
Au <- param[names(param)=="Au"]
A <- array(0, dim=c(n, nlvr, nlvr))
for (d in 1:nlvr){
for(i in 1:n){
A[i,d,d] <- exp(Au[(d-1)*n+i]);
}
}
if(length(Au) > nlvr*n){
k <- 0;
for (c1 in 1:nlvr){
r <- c1 + 1;
while (r <= nlvr){
for(i in 1:n){
A[i,r,c1] <- Au[nlvr*n+k*n+i];
A[i,c1,r] <- A[i,r,c1];
}
k <- k+1; r <- r+1;
}
}
}
for(i in 1:n){
A[i,,] <- A[i,,]%*%t(A[i,,])
}
out$A <- A
}
if(method == "VA" && !is.null(randomX)){
Abb <- param[names(param) == "Abb"]
dr <- ncol(xb)
Ab <- array(0,dim=c(p,dr,dr))
for (d in 1:dr){
for(j in 1:p){
Ab[j,d,d] <- exp(Abb[(d-1)*p + j]);
}
}
if(length(Abb)>dr*p){
k <- 0;
for (c1 in 1:dr){
r <- c1+1;
while (r <= dr){
for(j in 1:p){
Ab[j,r,c1] <- Abb[dr*p+k*p+j];
Ab[j,c1,r] <- Ab[j,r,c1];
}
k <- k+1; r <- r+1;
}
}
}
for(j in 1:p){
Ab[j,,] <- Ab[j,,]%*%t(Ab[j,,])
}
out$Ab <- Ab
}
}
n.i <- n.i+1;
}
if(is.null(formula1)){ out$formula <- formula} else {out$formula <- formula1}
out$Xrandom <- xb
out$D <- Xd
out$TMBfn <- objrFinal
out$TMBfn$par <- optrFinal$par #ensure params in this fn take final values
out$convergence <- optrFinal$convergence == 0
out$quadratic <- quadratic
out$logL <- -out$logL
out$zeta.struc <- zeta.struc
out$beta0com <- beta0com
if(method == "VA"){
if(num.lv > 0) out$logL = out$logL + n*0.5*num.lv
if(row.eff == "random") out$logL = out$logL + n*0.5
if(!is.null(randomX)) out$logL = out$logL + p*0.5*ncol(xb)
if(family=="gaussian") {
out$logL <- out$logL - n*p*log(pi)/2
}
}
tr<-try({
if(sd.errors && is.finite(out$logL)) {
if(trace) cat("Calculating standard errors for parameters...\n")
out$TMB <- TRUE
# out <- c(out, se.gllvm(out))
if(method == "VA"){
sdr <- objrFinal$he(optrFinal$par)
}
if(method == "LA"){
sdr <- optimHess(optrFinal$par, objrFinal$fn, objrFinal$gr,control = list(reltol=reltol,maxit=maxit))
}
m <- dim(sdr)[1]; incl <- rep(TRUE,m); incld <- rep(FALSE,m)
incl[names(objrFinal$par)=="Abb"] <- FALSE;
incl[names(objrFinal$par)=="Au"] <- FALSE;
if(quadratic == FALSE){incl[names(objrFinal$par)=="lambda2"]<-FALSE}
if(nlvr > 0) incld[names(objrFinal$par)=="Au"] <- TRUE
if(beta0com){
incl[names(objrFinal$par)=="b"] <- FALSE
}
if(row.eff=="random") {
incl[names(objrFinal$par)=="r0"] <- FALSE; incld[names(objrFinal$par)=="r0"] <- FALSE
} else {
incl[names(objrFinal$par)=="log_sigma"] <- FALSE
}
if(row.eff==FALSE) incl[names(objrFinal$par)=="r0"] <- FALSE
if(row.eff=="fixed") incl[1] <- FALSE
if(is.null(randomX)) {
incl[names(objrFinal$par)%in%c("Br","sigmaB","sigmaij")] <- FALSE
} else {
incl[names(objrFinal$par)=="Abb"] <- FALSE; incld[names(objrFinal$par)=="Abb"] <- TRUE
incl[names(objrFinal$par)=="Br"] <- FALSE; incld[names(objrFinal$par)=="Br"] <- TRUE
if(NCOL(xb)==1) incl[names(objrFinal$par) == "sigmaij"] <- FALSE
}
incl[names(objrFinal$par)=="Au"] <- FALSE; if(num.lv>0) incld[names(objrFinal$par)=="Au"] <- TRUE
incl[names(objrFinal$par)=="u"] <- FALSE; incld[names(objrFinal$par)=="u"] <- TRUE
if(familyn==0 || familyn==2 || familyn==7 || familyn==8) incl[names(objrFinal$par)=="lg_phi"] <- FALSE
if(familyn!=7) incl[names(objrFinal$par)=="zeta"] <- FALSE
if(familyn==7) incl[names(objrFinal$par)=="zeta"] <- TRUE
if(nlvr==0){
incl[names(objrFinal$par)=="u"] <- FALSE;
incld[names(objrFinal$par)=="u"] <- FALSE;
incl[names(objrFinal$par)=="lambda"] <- FALSE;
incl[names(objrFinal$par)=="lambda2"] <- FALSE;
incl[names(objrFinal$par)=="Au"] <- FALSE;
}
if(method=="LA" || (num.lv==0 && (row.eff!="random" && is.null(randomX)))){
incl[names(objrFinal$par)=="Au"] <- FALSE;
covM <- try(MASS::ginv(sdr[incl,incl]))
se <- try(sqrt(diag(abs(covM))))
if(num.lv > 0 || row.eff == "random" || !is.null(randomX)) {
sd.random <- sdrandom(objrFinal, covM, incl)
prediction.errors <- list()
if(!is.null(randomX)){
prediction.errors$Br <- matrix(diag(as.matrix(sd.random))[1:(ncol(xb)*ncol(y))], ncol(xb), ncol(y));
sd.random <- sd.random[-(1:(ncol(xb)*ncol(y))),-(1:(ncol(xb)*ncol(y)))]
}
if(row.eff=="random"){
prediction.errors$row.params <- diag(as.matrix(sd.random))[1:n];
sd.random <- sd.random[-(1:n),-(1:n)]
}
if(num.lv > 0){
cov.lvs <- array(0, dim = c(n, num.lv, num.lv))
for (i in 1:n) {
cov.lvs[i,,] <- as.matrix(sd.random[(0:(num.lv-1)*n+i),(0:(num.lv-1)*n+i)])
}
prediction.errors$lvs <- cov.lvs
}
out$prediction.errors <- prediction.errors
}
} else {
A.mat <- sdr[incl,incl] # a x a
D.mat <- sdr[incld,incld] # d x d
B.mat <- sdr[incl,incld] # a x d
cov.mat.mod <- try(MASS::ginv(A.mat-B.mat%*%solve(D.mat)%*%t(B.mat)),silent=T)
se <- sqrt(diag(abs(cov.mat.mod)))
incla<-rep(FALSE, length(incl))
incla[names(objrFinal$par)=="u"] <- TRUE
out$Hess <- list(Hess.full=sdr, incla = incla, incl=incl, incld=incld, cov.mat.mod=cov.mat.mod)
}
if(row.eff=="fixed") {
se.row.params <- c(0,se[1:(n-1)]);
names(se.row.params) = rownames(out$y); se <- se[-(1:(n-1))]
}
if(beta0com){
se.beta0 <- se[1]; se <- se[-1];
} else {
se.beta0 <- se[1:p]; se <- se[-(1:p)];
}
se.B <- se[1:length(B)]; se <- se[-(1:length(B))];
if(num.lv>0) {
se.theta <- matrix(0,p,num.lv); se.theta[lower.tri(se.theta, diag = TRUE)]<-se[1:(p * num.lv - sum(0:(num.lv-1)))];
colnames(se.theta) <- paste("LV", 1:num.lv, sep="");
rownames(se.theta) <- colnames(out$y)
out$sd$theta <- se.theta; se <- se[-(1:(p * num.lv - sum(0:(num.lv-1))))];
# diag(out$sd$theta) <- diag(out$sd$theta)*diag(out$params$theta) !!!
if(quadratic==TRUE){
se.lambdas2 <- matrix(se[1:(p * num.lv)], p, num.lv, byrow = T)
colnames(se.lambdas2) <- paste("LV", 1:num.lv, "^2", sep = "")
se <- se[-(1:(num.lv*p))]
out$sd$theta <- cbind(out$sd$theta,se.lambdas2)
}else if(quadratic=="LV"){
se.lambdas2 <- matrix(se[1:num.lv], p, num.lv, byrow = T)
colnames(se.lambdas2) <- paste("LV", 1:num.lv, "^2", sep = "")
se <- se[-(1:num.lv)]
out$sd$theta <- cbind(out$sd$theta,se.lambdas2)
}
}
out$sd$beta0 <- se.beta0;
if(!beta0com){ names(out$sd$beta0) <- colnames(out$y);}
out$sd$B <- se.B; names(out$sd$B) <- colnames(Xd)
if(row.eff=="fixed") {out$sd$row.params <- se.row.params}
if(family %in% c("negative.binomial")) {
se.lphis <- se[1:p]; out$sd$inv.phi <- se.lphis*out$params$inv.phi;
out$sd$phi <- se.lphis*out$params$phi;
names(out$sd$inv.phi) <- names(out$sd$phi) <- colnames(y); se <- se[-(1:p)]
}
if(family %in% c("gaussian","tweedie","gamma")) {
se.lphis <- se[1:p];
out$sd$phi <- se.lphis*out$params$phi;
names(out$sd$phi) <- colnames(y); se <- se[-(1:p)]
}
if(family %in% c("ZIP")) {
se.phis <- se[1:p];
out$sd$phi <- se.phis*exp(lp0)/(1+exp(lp0))^2;#
names(out$sd$phi) <- colnames(y); se <- se[-(1:p)]
}
if(!is.null(randomX)){
nr <- ncol(xb)
out$sd$sigmaB <- se[1:ncol(xb)]*c(sqrt(diag(out$params$sigmaB)));
names(out$sd$sigmaB) <- c(paste("sd",colnames(xb),sep = "."))
se <- se[-(1:ncol(xb))]
if(nr>1){
out$sd$corrpar <- se[1:(nr*(nr-1)/2)]
se <- se[-(1:(nr*(nr-1)/2))]
}
}
if(row.eff=="random") {
out$sd$sigma <- se[1:length(out$params$sigma)]*c(out$params$sigma[1],rep(1,length(out$params$sigma)-1));
names(out$sd$sigma) <- "sigma";
se=se[-(1:(length(out$params$sigma)))]
}
if(family %in% c("ordinal")){
se.zetanew <- se.zetas <- se;
if(zeta.struc == "species"){
se.zetanew <- matrix(NA,nrow=p,ncol=K)
idx<-0
for(j in 1:ncol(y)){
k<-max(y[,j])-2
if(k>0){
for(l in 1:k){
se.zetanew[j,l+1]<-se.zetas[idx+l]
}
}
idx<-idx+k
}
se.zetanew[,1] <- 0
out$sd$zeta <- se.zetanew
row.names(out$sd$zeta) <- colnames(y00); colnames(out$sd$zeta) <- paste(min(y00):(max(y00)-1),"|",(min(y00)+1):max(y00),sep="")
}else{
se.zetanew <- c(0, se.zetanew)
out$sd$zeta <- se.zetanew
names(out$sd$zeta) <- paste(min(y00):(max(y00)-1),"|",(min(y00)+1):max(y00),sep="")
}
}
}
})
if(inherits(tr, "try-error")) { cat("Standard errors for parameters could not be calculated.\n") }
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_french_data.R
\name{print.french_dataset}
\alias{print.french_dataset}
\title{Generic print method for objects of class \code{french_dataset}}
\usage{
\method{print}{french_dataset}(x, ...)
}
\arguments{
\item{x}{an object of class \code{french_dataset}}
\item{...}{other arguments passed to \code{print()}}
}
\description{
Prints an object of class \code{french_dataset}
}
\examples{
\dontrun{
ff_3f <- download_french_data('Fama/French 3 Factors')
ff_3f
}
}
| /man/print.french_dataset.Rd | permissive | minghao2016/frenchdata | R | false | true | 555 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_french_data.R
\name{print.french_dataset}
\alias{print.french_dataset}
\title{Generic print method for objects of class \code{french_dataset}}
\usage{
\method{print}{french_dataset}(x, ...)
}
\arguments{
\item{x}{an object of class \code{french_dataset}}
\item{...}{other arguments passed to \code{print()}}
}
\description{
Prints an object of class \code{french_dataset}
}
\examples{
\dontrun{
ff_3f <- download_french_data('Fama/French 3 Factors')
ff_3f
}
}
|
# Crops!
# Load libraries -----
library(tidyverse)
library(ggimage)
library(treemapify)
library(ggtext)
library(ochRe)
library(extrafont)
library(patchwork)
# Get data -----
tt_data <- tidytuesdayR::tt_load(2020, 36)
tidytuesdayR::readme(tt_data)
# Set theme across plots ----
earthCol <- "#3a2b1f"
# To get colours used in plot with 8 levels: scales::show_col(ochre_pal("mccrea")(8))
accentCol <- "#D1C0AE" # Potato colour
theme_crops <- function() {
theme_minimal() %+replace%
theme(plot.background = element_rect(fill = earthCol, colour = earthCol),
panel.grid = element_line(color = earthCol),
panel.background = element_rect(fill = earthCol, colour = earthCol),
legend.background = element_rect(fill = earthCol, colour = earthCol),
text = element_text(colour = accentCol,
family = "EngraversGothic BT"),
plot.title = element_text(hjust = 0.5, size = 24),
plot.subtitle = element_text(hjust = 0.5, size = 18),
axis.text = element_text(color = accentCol, size = 14),
axis.title = element_text(color = accentCol, size = 16),
strip.text = element_text(colour = accentCol, size = 16,
margin = margin(1,0,0.5,0, "cm")), # facet title
plot.caption = element_text(hjust = 0.5, size = 14),
# Right extra padding to avoid patchwork moving everything to the right
plot.margin = margin(t = 1, r = 3, b = 1, l = 2, "cm"))
}
# Tree map -----
frenchCrops <- tt_data$key_crop_yields %>%
filter(Entity == "France") %>%
# turn into long format
pivot_longer(cols = 4:last_col(),
names_to = "crop",
values_to = "crop_production") %>%
mutate(crop = str_remove_all(crop, " \\(tonnes per hectare\\)")) %>%
set_names(nm = names(.) %>% tolower()) %>%
filter(!is.na(crop_production)) %>%
# doing factor transformation here to exclude unused levels
mutate(crop = factor(crop, levels = unique(crop)))
treeMap <-
ggplot(filter(frenchCrops, year %in% c(max(year), min(year))),
aes(area = crop_production, fill = crop)) +
geom_treemap(show.legend = F) +
geom_treemap_text(aes(label=crop, family = "EngraversGothic BT"),
color=earthCol, place = "centre") +
facet_grid(~year, drop = F) +
scale_fill_ochre(palette="mccrea") +
labs(title = "Proportion of Different Crops Produced in France\n1961 vs. 2018",
subtitle = "\nRice and Peas lost out to Soybeans and Maize") +
theme_crops()
# Tractor plot ----
tIcon <- "../making-of/temp/tractor.png"
tractors <- data.frame(x = rep(2020.5, 8),
y = filter(frenchCrops, year == 2018)$crop_production + 2, # for trajectory alignment
crop = filter(frenchCrops, year == 2018)$crop,
image = rep(tIcon, 8))
texts <-
tibble(
year = c(2015, 2008),
crop_production = c(30, 15),
crop = c("Potatoes", "Potatoes"),
text = c(
'Potatoes',
'Everything else'))
tractorPlot <- ggplot(frenchCrops,
aes(x = year, y = crop_production, colour = crop)) +
# Colours were otherwise reversed between plots - not sure why!!
scale_colour_ochre(palette="mccrea", reverse = T) +
geom_line(size = 2, linetype = "twodash",
show.legend = T, na.rm = T) +
geom_image(data = tractors,
aes(image = image, x = x, y = y),
size = 0.08,
angle = 0) +
geom_textbox(data = texts,
aes(year, crop_production,
label = text),
vjust = 0.5,
colour = accentCol,
box.colour = earthCol,
size = 5,
fill = earthCol,
family = "EngraversGothic BT",
maxwidth = unit(8, "lines"),
hjust = .5,
show.legend = F) +
annotate("curve", x = 2005, xend = 2001, y = 30, yend = 34, curvature = -.3,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = accentCol) +
annotate("curve", x = 1998, xend = 1992, y = 15, yend = 10, curvature = 0.3,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = accentCol) +
xlab("Year") +
ylab("Crop production (tonnes per hectare)") +
labs(title = "\nPotatoes are the steady top of the crops",
subtitle = "\nFrance has consistently produced 4 times more tonnes of potatoes\nthan the next leading crop since 1961.",
caption = "
@cararthompson | #TidyTuesday | Source: Our World In Data") +
theme_crops() +
theme(legend.position = "none")
# Export image ----
# using {patchwork}
p <- treeMap / tractorPlot
ggsave(p, filename = "../plots/202009_crops.png", height = 14, width = 8.5, dpi = 400)
| /scripts/202009_crops.R | no_license | umardhiah/tidytuesdays | R | false | false | 4,812 | r | # Crops!
# Load libraries -----
library(tidyverse)
library(ggimage)
library(treemapify)
library(ggtext)
library(ochRe)
library(extrafont)
library(patchwork)
# Get data -----
tt_data <- tidytuesdayR::tt_load(2020, 36)
tidytuesdayR::readme(tt_data)
# Set theme across plots ----
earthCol <- "#3a2b1f"
# To get colours used in plot with 8 levels: scales::show_col(ochre_pal("mccrea")(8))
accentCol <- "#D1C0AE" # Potato colour
theme_crops <- function() {
theme_minimal() %+replace%
theme(plot.background = element_rect(fill = earthCol, colour = earthCol),
panel.grid = element_line(color = earthCol),
panel.background = element_rect(fill = earthCol, colour = earthCol),
legend.background = element_rect(fill = earthCol, colour = earthCol),
text = element_text(colour = accentCol,
family = "EngraversGothic BT"),
plot.title = element_text(hjust = 0.5, size = 24),
plot.subtitle = element_text(hjust = 0.5, size = 18),
axis.text = element_text(color = accentCol, size = 14),
axis.title = element_text(color = accentCol, size = 16),
strip.text = element_text(colour = accentCol, size = 16,
margin = margin(1,0,0.5,0, "cm")), # facet title
plot.caption = element_text(hjust = 0.5, size = 14),
# Right extra padding to avoid patchwork moving everything to the right
plot.margin = margin(t = 1, r = 3, b = 1, l = 2, "cm"))
}
# Tree map -----
frenchCrops <- tt_data$key_crop_yields %>%
filter(Entity == "France") %>%
# turn into long format
pivot_longer(cols = 4:last_col(),
names_to = "crop",
values_to = "crop_production") %>%
mutate(crop = str_remove_all(crop, " \\(tonnes per hectare\\)")) %>%
set_names(nm = names(.) %>% tolower()) %>%
filter(!is.na(crop_production)) %>%
# doing factor transformation here to exclude unused levels
mutate(crop = factor(crop, levels = unique(crop)))
treeMap <-
ggplot(filter(frenchCrops, year %in% c(max(year), min(year))),
aes(area = crop_production, fill = crop)) +
geom_treemap(show.legend = F) +
geom_treemap_text(aes(label=crop, family = "EngraversGothic BT"),
color=earthCol, place = "centre") +
facet_grid(~year, drop = F) +
scale_fill_ochre(palette="mccrea") +
labs(title = "Proportion of Different Crops Produced in France\n1961 vs. 2018",
subtitle = "\nRice and Peas lost out to Soybeans and Maize") +
theme_crops()
# Tractor plot ----
tIcon <- "../making-of/temp/tractor.png"
tractors <- data.frame(x = rep(2020.5, 8),
y = filter(frenchCrops, year == 2018)$crop_production + 2, # for trajectory alignment
crop = filter(frenchCrops, year == 2018)$crop,
image = rep(tIcon, 8))
texts <-
tibble(
year = c(2015, 2008),
crop_production = c(30, 15),
crop = c("Potatoes", "Potatoes"),
text = c(
'Potatoes',
'Everything else'))
tractorPlot <- ggplot(frenchCrops,
aes(x = year, y = crop_production, colour = crop)) +
# Colours were otherwise reversed between plots - not sure why!!
scale_colour_ochre(palette="mccrea", reverse = T) +
geom_line(size = 2, linetype = "twodash",
show.legend = T, na.rm = T) +
geom_image(data = tractors,
aes(image = image, x = x, y = y),
size = 0.08,
angle = 0) +
geom_textbox(data = texts,
aes(year, crop_production,
label = text),
vjust = 0.5,
colour = accentCol,
box.colour = earthCol,
size = 5,
fill = earthCol,
family = "EngraversGothic BT",
maxwidth = unit(8, "lines"),
hjust = .5,
show.legend = F) +
annotate("curve", x = 2005, xend = 2001, y = 30, yend = 34, curvature = -.3,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = accentCol) +
annotate("curve", x = 1998, xend = 1992, y = 15, yend = 10, curvature = 0.3,
size = .75, arrow = arrow(length = unit(2, "mm")), colour = accentCol) +
xlab("Year") +
ylab("Crop production (tonnes per hectare)") +
labs(title = "\nPotatoes are the steady top of the crops",
subtitle = "\nFrance has consistently produced 4 times more tonnes of potatoes\nthan the next leading crop since 1961.",
caption = "
@cararthompson | #TidyTuesday | Source: Our World In Data") +
theme_crops() +
theme(legend.position = "none")
# Export image ----
# using {patchwork}
p <- treeMap / tractorPlot
ggsave(p, filename = "../plots/202009_crops.png", height = 14, width = 8.5, dpi = 400)
|
margcoef <- function(x, y, condind = NULL, family, null.model = FALSE, iterind){
n = dim(x)[1]; p = dim(x)[2]; ones = rep(1, n)
candind = setdiff(1:p, condind)
if(iterind == 0){
if(family == "cox")
margcoef = abs(cor(x,y[,1]))
else
margcoef = abs(cor(x,y))
}else{
if(null.model == TRUE){
if(is.null(condind) == TRUE) {x = x[sample(1:n),]}
if(is.null(condind) == FALSE) {x[,candind] = x[sample(1:n),candind]}
}
margcoef = abs(sapply(candind, mg, x, y, ones, family, condind))
}
return(margcoef)
}
mg <- function(index, x=x, y=y, ones=ones, family=family, condind=condind){
margfit = switch(family, gaussian = coef(glm.fit(cbind(ones, x[,index], x[,condind]), y, family=gaussian()))[2],
binomial = coef(glm.fit(cbind(ones, x[,index], x[,condind]), y, family=binomial()))[2],
poisson = coef(glm.fit(cbind(ones, x[,index], x[,condind]), y, family=poisson()))[2],
cox = coef(coxph(y ~ cbind(x[,index], x[,condind])))[1]
)
}
| /SIS/R/margcoef.R | no_license | ingted/R-Examples | R | false | false | 1,118 | r | margcoef <- function(x, y, condind = NULL, family, null.model = FALSE, iterind){
n = dim(x)[1]; p = dim(x)[2]; ones = rep(1, n)
candind = setdiff(1:p, condind)
if(iterind == 0){
if(family == "cox")
margcoef = abs(cor(x,y[,1]))
else
margcoef = abs(cor(x,y))
}else{
if(null.model == TRUE){
if(is.null(condind) == TRUE) {x = x[sample(1:n),]}
if(is.null(condind) == FALSE) {x[,candind] = x[sample(1:n),candind]}
}
margcoef = abs(sapply(candind, mg, x, y, ones, family, condind))
}
return(margcoef)
}
mg <- function(index, x=x, y=y, ones=ones, family=family, condind=condind){
margfit = switch(family, gaussian = coef(glm.fit(cbind(ones, x[,index], x[,condind]), y, family=gaussian()))[2],
binomial = coef(glm.fit(cbind(ones, x[,index], x[,condind]), y, family=binomial()))[2],
poisson = coef(glm.fit(cbind(ones, x[,index], x[,condind]), y, family=poisson()))[2],
cox = coef(coxph(y ~ cbind(x[,index], x[,condind])))[1]
)
}
|
cmdargs <- c("-m","mask.nii.gz", "--set1", "/mnt/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/datafiles/setfilenames_fearGTcalm.txt",
"--setlabels1", "/mnt/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/datafiles/depanxcov-midpoint5.csv",
"--model", "permute_free_model.R",
"--testvoxel", "10000",
"--output", "perm.free.dep_lead.fear/perm.free.dep_lead.fear.",
"--debugfile", "debug.Rdata",
"--slurmN", "60",
"--permute", "1000")
| /dep_lead.fear/perm.free.dep_lead.fear/readargs.R | no_license | jflournoy/sea_np_models | R | false | false | 561 | r |
cmdargs <- c("-m","mask.nii.gz", "--set1", "/mnt/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/datafiles/setfilenames_fearGTcalm.txt",
"--setlabels1", "/mnt/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/datafiles/depanxcov-midpoint5.csv",
"--model", "permute_free_model.R",
"--testvoxel", "10000",
"--output", "perm.free.dep_lead.fear/perm.free.dep_lead.fear.",
"--debugfile", "debug.Rdata",
"--slurmN", "60",
"--permute", "1000")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootcorrect.R
\name{bootcor}
\alias{bootcor}
\title{Bootstrap correction to obtain desired failure probability}
\usage{
bootcor(
ppdata,
cutoff,
numit = 1000,
tol = 0.02,
nxprob = 0.1,
intens = NULL,
covmatrix = NULL,
simulate = "intens",
radiusClust = NULL,
clustering = 5,
verbose = TRUE
)
}
\arguments{
\item{ppdata}{Observed spatial point process of class ppp.}
\item{cutoff}{Desired failure probability alpha, which is the probability of having
unobserved events outside the high-risk zone.}
\item{numit}{Number of iterations to perform (per tested value for cutoff). Default value is 1000.}
\item{tol}{Tolerance: acceptable difference between the desired failure probability and the fraction of
high-risk zones not covering all events. Default value is 0.02.}
\item{nxprob}{Probability of having unobserved events.
Default value is 0.1.}
\item{intens}{(optional) estimated intensity of the observed process (object of class "im",
see \code{\link[spatstat.explore]{density.ppp}}). If not given,
it will be estimated.}
\item{covmatrix}{(optional) Covariance matrix of the kernel of a normal distribution, only meaningful
if no intensity is given. If not given, it will be estimated.}
\item{simulate}{The type of simulation, can be one of \code{"thinning", "intens"} or \code{"clintens"}}
\item{radiusClust}{(optional) radius of the circles around the parent points in which the cluster
points are located. Only used for \code{simulate = "clintens"}.}
\item{clustering}{a value >= 1 which describes the amount of clustering; the
adjusted estimated intensity of the observed pattern is divided by
this value; it also is the parameter of the Poisson distribution
for the number of points per cluster. Only used for \code{simulate = "clintens"}.}
\item{verbose}{logical. Should information on tested values/progress be printed?}
}
\value{
An object of class bootcorr, which consists of a list of the final value for alpha (\code{alphastar})
and a data.frame \code{course} containing information on the simulation course, e.g. the tested values.
}
\description{
Simulation-based iterative procedure to correct for possible bias with respect to the
failure probability alpha
}
\details{
For a desired failure probability alpha, the corresponding parameter which is to use
when determining a high-risk zone is found in an iterative procedure. The simulation procedure
is the same as in \code{\link[highriskzone]{eval_method}}. In every iteration,
the number of high-risk zones with at least one unobserved event located outside is
compared with the desired failure probability. If necessary, the value of \code{cutoff} is
increased or decreased. The final value \code{alphastar} can than be used in
\code{\link[highriskzone]{det_hrz}}.
If there are restriction areas in the observation window, use \code{\link[highriskzone]{bootcor_restr}}
instead.
}
\examples{
\dontrun{
data(craterB)
set.seed(4321)
bc <- bootcor(ppdata=craterB, cutoff=0.2, numit=100, tol=0.02, nxprob=0.1)
bc
summary(bc)
plot(bc)
hrzbc <- det_hrz(craterB, type = "intens", criterion = "indirect",
cutoff = bc$alphastar, nxprob = 0.1)
}
}
\references{
Monia Mahling, Michael \enc{H?hle}{Hoehle} & Helmut \enc{K?chenhoff}{Kuechenhoff} (2013),
\emph{Determining high-risk zones for unexploded World War II bombs by using point process methodology.}
Journal of the Royal Statistical Society, Series C 62(2), 181-199.
Monia Mahling (2013),
\emph{Determining high-risk zones by using spatial point process methodology.}
Ph.D. thesis, Cuvillier Verlag \enc{G?ttingen}{Goettingen},
available online: http://edoc.ub.uni-muenchen.de/15886/
Chapter 6
}
\seealso{
\code{\link[highriskzone]{det_hrz}}, \code{\link[highriskzone]{eval_method}}, \code{\link[highriskzone]{bootcor_restr}}
}
| /man/bootcor.Rd | no_license | cran/highriskzone | R | false | true | 3,967 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootcorrect.R
\name{bootcor}
\alias{bootcor}
\title{Bootstrap correction to obtain desired failure probability}
\usage{
bootcor(
ppdata,
cutoff,
numit = 1000,
tol = 0.02,
nxprob = 0.1,
intens = NULL,
covmatrix = NULL,
simulate = "intens",
radiusClust = NULL,
clustering = 5,
verbose = TRUE
)
}
\arguments{
\item{ppdata}{Observed spatial point process of class ppp.}
\item{cutoff}{Desired failure probability alpha, which is the probability of having
unobserved events outside the high-risk zone.}
\item{numit}{Number of iterations to perform (per tested value for cutoff). Default value is 1000.}
\item{tol}{Tolerance: acceptable difference between the desired failure probability and the fraction of
high-risk zones not covering all events. Default value is 0.02.}
\item{nxprob}{Probability of having unobserved events.
Default value is 0.1.}
\item{intens}{(optional) estimated intensity of the observed process (object of class "im",
see \code{\link[spatstat.explore]{density.ppp}}). If not given,
it will be estimated.}
\item{covmatrix}{(optional) Covariance matrix of the kernel of a normal distribution, only meaningful
if no intensity is given. If not given, it will be estimated.}
\item{simulate}{The type of simulation, can be one of \code{"thinning", "intens"} or \code{"clintens"}}
\item{radiusClust}{(optional) radius of the circles around the parent points in which the cluster
points are located. Only used for \code{simulate = "clintens"}.}
\item{clustering}{a value >= 1 which describes the amount of clustering; the
adjusted estimated intensity of the observed pattern is divided by
this value; it also is the parameter of the Poisson distribution
for the number of points per cluster. Only used for \code{simulate = "clintens"}.}
\item{verbose}{logical. Should information on tested values/progress be printed?}
}
\value{
An object of class bootcorr, which consists of a list of the final value for alpha (\code{alphastar})
and a data.frame \code{course} containing information on the simulation course, e.g. the tested values.
}
\description{
Simulation-based iterative procedure to correct for possible bias with respect to the
failure probability alpha
}
\details{
For a desired failure probability alpha, the corresponding parameter which is to use
when determining a high-risk zone is found in an iterative procedure. The simulation procedure
is the same as in \code{\link[highriskzone]{eval_method}}. In every iteration,
the number of high-risk zones with at least one unobserved event located outside is
compared with the desired failure probability. If necessary, the value of \code{cutoff} is
increased or decreased. The final value \code{alphastar} can than be used in
\code{\link[highriskzone]{det_hrz}}.
If there are restriction areas in the observation window, use \code{\link[highriskzone]{bootcor_restr}}
instead.
}
\examples{
\dontrun{
data(craterB)
set.seed(4321)
bc <- bootcor(ppdata=craterB, cutoff=0.2, numit=100, tol=0.02, nxprob=0.1)
bc
summary(bc)
plot(bc)
hrzbc <- det_hrz(craterB, type = "intens", criterion = "indirect",
cutoff = bc$alphastar, nxprob = 0.1)
}
}
\references{
Monia Mahling, Michael \enc{H?hle}{Hoehle} & Helmut \enc{K?chenhoff}{Kuechenhoff} (2013),
\emph{Determining high-risk zones for unexploded World War II bombs by using point process methodology.}
Journal of the Royal Statistical Society, Series C 62(2), 181-199.
Monia Mahling (2013),
\emph{Determining high-risk zones by using spatial point process methodology.}
Ph.D. thesis, Cuvillier Verlag \enc{G?ttingen}{Goettingen},
available online: http://edoc.ub.uni-muenchen.de/15886/
Chapter 6
}
\seealso{
\code{\link[highriskzone]{det_hrz}}, \code{\link[highriskzone]{eval_method}}, \code{\link[highriskzone]{bootcor_restr}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aspects.R
\name{is_asp}
\alias{is_asp}
\title{Aspect Check}
\usage{
is_asp(x)
}
\arguments{
\item{x}{Object to check.}
}
\value{
A boolean.
}
\description{
Checks whether the object is of class \code{aspects},
as returned by \code{\link[=asp]{asp()}}.
}
\examples{
\dontrun{
is_asp(1)
is_asp(asp(dist))
}
}
\keyword{internal}
| /man/is_asp.Rd | permissive | han-tun/g2r | R | false | true | 405 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aspects.R
\name{is_asp}
\alias{is_asp}
\title{Aspect Check}
\usage{
is_asp(x)
}
\arguments{
\item{x}{Object to check.}
}
\value{
A boolean.
}
\description{
Checks whether the object is of class \code{aspects},
as returned by \code{\link[=asp]{asp()}}.
}
\examples{
\dontrun{
is_asp(1)
is_asp(asp(dist))
}
}
\keyword{internal}
|
\name{R0-package}
\alias{R0-package}
\alias{R0}
\docType{package}
\title{
\packageTitle{R0}
}
\description{
\packageDescription{R0}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{R0}
\packageIndices{R0}
~~ An overview of how to use the package, including the ~~
~~ most important functions ~~
}
\author{
\packageAuthor{R0}
Maintainer: \packageMaintainer{R0}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from ~~
~~ file KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
| /man/R0-package.Rd | no_license | OutbreakResources/R0 | R | false | false | 754 | rd | \name{R0-package}
\alias{R0-package}
\alias{R0}
\docType{package}
\title{
\packageTitle{R0}
}
\description{
\packageDescription{R0}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{R0}
\packageIndices{R0}
~~ An overview of how to use the package, including the ~~
~~ most important functions ~~
}
\author{
\packageAuthor{R0}
Maintainer: \packageMaintainer{R0}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from ~~
~~ file KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
## Script for summarising and plotting data/mutations/annotations etc
## Get first the directories (samples may be in different directories)
library(RColorBrewer)
es = c("LP6005690-DNA_E02_vs_LP6005689-DNA_E02",
"LP6008280-DNA_F02_vs_LP6008264-DNA_F02",
"LP6008202-DNA_F01_vs_LP6008201-DNA_F01",
"LP6005935-DNA_C01_vs_LP6005934-DNA_C01",
"LP6008031-DNA_E03_vs_LP6008032-DNA_A04")
## For the mutations I just need to load them - because runOncodriveClust script concatenated the data from all samples
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/Rdata/muts_129_66_71_OACs_annovar_dbnsfp_oncodriveClust.Rdata")
## Save the data for 19,014
#muts = muts %>% subset(!is.na(entrez_19014))
#save(muts, file="~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/Rdata/muts_129_66_71_OACs_annovar_dbnsfp_oncodriveClust_19014.Rdata")
## Plot the number of all mutations
samples2muts = muts %>% group_by(sample) %>% summarise(all_muts=n())
samples2muts$sample = factor(as.character(samples2muts$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
sm = data.frame(type= names(summary(samples2muts$all_muts)),
value=unname(c(summary(samples2muts$all_muts))))
p = ggplot(samples2muts, aes(x=sample, y=all_muts)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (#)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
scale_y_continuous(breaks = seq(0, max(samples2muts$all_muts), max(samples2muts$all_muts)/10)) +
annotation_custom(tableGrob(sm, cols = NULL, rows = NULL), xmin=100, xmax=150, ymin=150000, ymax=200000) +
ggtitle("All mutations (this sample order forced to all other plots)")
ns = c("nonsynonymous","stopgain","frameshift deletion","splicing","frameshift insertion","nonframeshift deletion","nonframeshift insertion","nonframeshift substitution","stoploss","frameshift substitution")
dam = c("nonsynonymous","frameshift deletion","frameshift insertion","frameshift substitution","nonframeshift deletion","nonframeshift insertion","nonframeshift substitution","splicing","stopgain","stoploss")
trunc = c("frameshift deletion","frameshift insertion","frameshift substitution","stopgain","stoploss") ## Always damaging==TRUE
non_trunc = c("nonsynonymous","splicing")
ns_vep=c("missense_variant", "splice_region_variant", "splice_donor_variant", "stop_gained", "splice_acceptor_variant", "stop_lost")
d = muts %>% count(sample, Func.refGene) %>% data.frame()
d = d %>% left_join(samples2muts) %>% mutate(perc=n/all_muts)
d$sample = factor(as.character(d$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
## First plot check the fraction of exonic overall
n <- 15
qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
pie(rep(1,n), col=col_vector[1:n])
cols = c(col_vector[1:2], "red", col_vector[4:n])
p1 = ggplot(d,
aes(x=sample, y=perc, fill=Func.refGene)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (fraction)") +
scale_fill_manual(values = cols) +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) + labs(fill='Effect') +
ggtitle("All mutations")
## Get distribution of the percentage of exonic
tb1 = rbind(muts %>% group_by(sample) %>% count(Func.refGene) %>% mutate(Func.refGene=ifelse(Func.refGene=="exonic", "exonic", "other")) %>%
group_by(sample, Func.refGene) %>% summarise(n=sum(n)) %>%left_join(samples2muts) %>% mutate(perc=n/all_muts) %>% subset(Func.refGene=="exonic") %>% .$perc %>% summary())
## Now check the categories of the exonic
samples2exonic = muts %>% subset(Func.refGene=="exonic") %>% group_by(sample) %>% summarise(exonic_muts=n())
samples2exonic$sample = factor(as.character(samples2exonic$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
sm = data.frame(type= names(summary(samples2exonic$exonic_muts)),
value=unname(c(summary(samples2exonic$exonic_muts))))
p2 = ggplot(samples2exonic, aes(x=sample, y=exonic_muts)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (#)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
scale_y_continuous(breaks = seq(0, max(samples2exonic$exonic_muts), max(samples2exonic$exonic_muts)/10)) +
annotation_custom(tableGrob(sm, cols = NULL, rows = NULL), xmin=100, xmax=150, ymin=700, ymax=900) +
ggtitle("Exonic mutations")
d = muts %>% subset(Func.refGene=="exonic") %>%count(sample, ExonicFunc.refGene) %>% data.frame()
d = d %>% left_join(samples2exonic) %>% mutate(perc=n/exonic_muts)
d$sample = factor(as.character(d$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
p3 = ggplot(d ,
aes(x=sample, y=perc, fill=ExonicFunc.refGene)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (fraction)") +
scale_fill_manual(values = cols) +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) + labs(fill='Effect') +
ggtitle("Exonic mutations")
## Now check the damaging
samples2damaging = muts %>% subset(damaging) %>% group_by(sample) %>% summarise(damaging_muts=n())
samples2damaging = samples2damaging %>% full_join(samples2muts%>%select(sample)) ## Not all samples have damaging mutations
samples2damaging$damaging_muts[is.na(samples2damaging$damaging_muts)] = 0
samples2damaging$sample = factor(as.character(samples2damaging$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
sm = data.frame(type= names(summary(samples2damaging$damaging_muts)),
value=unname(c(summary(samples2damaging$damaging_muts))))
p4 = ggplot(samples2damaging, aes(x=sample, y=damaging_muts)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (#)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
scale_y_continuous(breaks = seq(0, max(samples2damaging$damaging_muts), max(samples2damaging$damaging_muts)/10)) +
annotation_custom(tableGrob(sm, cols = NULL, rows = NULL), xmin=100, xmax=150, ymin=250, ymax=300) +
ggtitle("Damaging mutations")
## Now the gain of function mutations
samples2gof = muts %>% subset(oncodriveClust) %>% group_by(sample) %>% summarise(gof_muts=n())
samples2gof = samples2gof %>% full_join(samples2muts%>%select(sample)) ## Not all samples have damaging mutations
samples2gof$gof_muts[is.na(samples2gof$gof_muts)] = 0
samples2gof$sample = factor(as.character(samples2gof$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
sm = data.frame(type= names(summary(samples2gof$gof_muts)),
value=unname(c(summary(samples2gof$gof_muts))))
p5 = ggplot(samples2gof, aes(x=sample, y=gof_muts)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (#)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
scale_y_continuous(breaks = seq(0, max(samples2gof$gof_muts), 5)) +
annotation_custom(tableGrob(sm, cols = NULL, rows = NULL), xmin=10, xmax=60, ymin=10, ymax=15) +
ggtitle("Gain-of-function mutations")
grid.arrange(p, p1, p2, p3, p4, ncol=2)
grid.arrange(p, p1, p2, p3, p4, p5, ncol=2)
## CNVs
## Gather all CNVs
# mainDirs = c("~/data/OAC/71_OAC/ascat/",
# "~/data/OAC/87_OAC/66_ICGC/ascat/",
# "~/data/OAC/129_OAC/ascat/")
mainDirs = c("~/rosalind_lustre/mourikisa/data/OAC/87_OAC/21_literature/ascat/")
message("Getting CNVs...")
all_cnvs = data.frame()
count = 0
for(b in mainDirs){
samples = list.dirs(b, recursive = F)
for(s in samples){
cat(s, "\n")
fn = paste0(s, "/parsing_and_annotation/cnvs.Rdata")
load(fn)
sname = unlist(strsplit(s, "/"))
sname = sname[length(sname)]
d = cnvs[["df_cnvs_19014"]]
all_cnvs = rbind(all_cnvs, d %>% mutate(sample=sname))
count = count +1
}
}
cat(paste0("Samples: ", count))
## Save raw data
cnvs = all_cnvs
save(cnvs, file="~/rosalind_lustre/mourikisa/data/OAC/87_OAC/21_literature/Rdata/cnvs_21_literature_OACs.Rdata")
samples2cnvs = all_cnvs %>% subset(!is.na(entrez_19014)) %>% select(sample, entrez_19014, CNV_type_corrected) %>% unique %>% count(sample, CNV_type_corrected)
all_samples = rbind(samples2cnvs%>%select(sample)%>%unique%>%mutate(CNV_type_corrected="Gain"), samples2cnvs%>%select(sample)%>%unique%>%mutate(CNV_type_corrected="Loss"))
samples2cnvs = samples2cnvs %>% full_join(all_samples)
samples2cnvs$n[is.na(samples2cnvs$n)] = 0
samples2cnvs = samples2cnvs %>% subset(!is.na(CNV_type_corrected))
sm1 = data.frame(type= names(summary(samples2cnvs$n[samples2cnvs$CNV_type_corrected=="Loss"])),
value=unname(c(summary(samples2cnvs$n[samples2cnvs$CNV_type_corrected=="Loss"]))))
sm2 = data.frame(type= names(summary(samples2cnvs$n[samples2cnvs$CNV_type_corrected=="Gain"])),
value=unname(c(summary(samples2cnvs$n[samples2cnvs$CNV_type_corrected=="Gain"]))))
p = ggplot(samples2cnvs %>% subset(n>0), aes(x=sample, y=n, fill=CNV_type_corrected)) +
geom_bar(stat = "identity", position = "dodge") +
ylab("Genes (#)") +
xlab("Samples") + theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
ggtitle("Gains>=2*ploidy; Losses=CN<2")
sm1 = tableGrob(sm1, cols = NULL, rows = NULL)
sm2 = tableGrob(sm2, cols = NULL, rows = NULL)
grid.arrange(arrangeGrob(sm1, sm2, ncol=2),
arrangeGrob(p, nrow=1, ncol=1), heights=c(0.2, 0.8))
## SVs
mainDirs = c("~/athena/data/OAC/71_OAC/manta/",
"~/athena/data/OAC/87_OAC/66_ICGC/manta/",
"~/athena/data/OAC/129_OAC/manta/")
message("Getting SVs...")
all_svs = data.frame()
count = 0
ss = NULL
for(b in mainDirs){
samples = list.dirs(b, recursive = F)
for(s in samples){
cat(s, "\n")
fn = paste0(s, "/parsing_and_annotation/svs.Rdata")
if(file.exists(fn)){
load(fn)
sname = unlist(strsplit(s, "/"))
sname = sname[length(sname)]
ss = c(ss, sname)
all_svs = rbind.fill(all_svs, svs %>% mutate(sample=sname))
count = count +1
}else{
next
}
}
}
cat(paste0("Samples: ", count))
## Save raw data
svs = all_svs
svs[,2:6][is.na(svs[,2:6])] = 0
save(svs, file="~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/Rdata/svs.Rdata")
## Save the data on the 19,014
#svs = svs %>% subset(!is.na(entrez_19014)) %>% data.frame()
#save(svs, file="~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/Rdata/svs_19014.Rdata")
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/Rdata/svs.Rdata")
samples2svs2type = svs %>% select(sample, gene, DEL, DUP, INV, BND, INS) %>% gather(type, value, -sample, -gene) %>% subset(value!=0) %>% group_by(sample, type) %>% summarise(n=sum(value))
samples2svs = samples2svs2type %>% group_by(sample) %>% summarise(svs=sum(n))
samples2svs$sample = factor(as.character(samples2svs$sample), levels = unique(samples2svs$sample[order(samples2svs$svs, decreasing = F)]))
samples2svs2type = samples2svs2type %>% left_join(samples2svs) %>% mutate(perc=(n/svs)*100)
samples2svs2type$sample = factor(as.character(samples2svs2type$sample), levels = unique(samples2svs$sample[order(samples2svs$svs, decreasing = F)]))
sm = data.frame(type= names(summary(samples2svs$svs)),
value=unname(c(summary(samples2svs$svs))))
p1 = ggplot(samples2svs, aes(x=sample, y=svs)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Genes (#)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
scale_y_continuous(breaks = seq(0, max(samples2svs$svs), 100)) +
annotation_custom(tableGrob(sm, cols = NULL, rows = NULL), xmin=50, xmax=100, ymin=1000, ymax=1200) +
ggtitle("All SVs (this sample order forced to all other plots)")
p2 = ggplot(samples2svs2type, aes(x=sample, y=perc, fill=type)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Genes (%)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
ggtitle("SV types")
grid.arrange(p1, p2, nrow=1)
## ---------------------------------------
## Plots for the drivers
## ---------------------------------------
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/sysSVM/OAC/training_set_noScale.Rdata")
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/sysSVM/OAC/validation_set_noScale.Rdata")
training_ns = training_ns %>% tibble::rownames_to_column() %>% separate(rowname, into=c("cancer_type", "sample", "entrez"), sep="\\.")
validation_ns = validation_ns %>% tibble::rownames_to_column() %>% separate(rowname, into=c("cancer_type", "sample", "entrez"), sep="\\.")
cohort = rbind.fill(training_ns, validation_ns)
## Get count for basic alterations
train_toPlot = training_ns %>% select(sample, no_TRUNC_muts, no_NTDam_muts, no_GOF_muts, BND, INS, INV, CNVGain) %>%
mutate(CNVGain=as.numeric(as.character(CNVGain))) %>% gather(type, value, -sample) %>%
subset(value==1) %>% group_by(type) %>% summarise(n=n()) %>% mutate(all=4091, perc=(as.numeric(n)/all)*100) %>%
rbind(c("Homo_dels", training_ns %>% subset(Copy_number==0) %>% nrow, 4091, (training_ns %>% subset(Copy_number==0) %>% nrow)/4091)) %>%
rbind(c("Multiple_hits", training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow, 4091, (training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow)/4091)) %>%
mutate(label="training set (4091 obs)")
validation_toPlot = validation_ns %>% select(sample, no_TRUNC_muts, no_NTDam_muts, no_GOF_muts, BND, INS, INV, CNVGain) %>%
mutate(CNVGain=as.numeric(as.character(CNVGain))) %>% gather(type, value, -sample) %>%
subset(value==1) %>% group_by(type) %>% summarise(n=n()) %>% mutate(all=112898, perc=(as.numeric(n)/all)*100) %>%
rbind(c("Homo_dels", training_ns %>% subset(Copy_number==0) %>% nrow, 112898, (training_ns %>% subset(Copy_number==0) %>% nrow)/112898)) %>%
rbind(c("Multiple_hits", training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow, 112898, (training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow)/112898)) %>%
mutate(label="prediction set (112,898 obs)")
toPlot = rbind(train_toPlot, validation_toPlot)
ggplot(toPlot, aes(x=type, y=as.numeric(perc))) +
geom_bar(stat = "identity", position="dodge", color="black", fill="grey50") +
facet_wrap(~label) +
xlab("") + ylab("Drivers (%)") +
theme(
axis.text.x=element_text(angle=90),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
scale_y_continuous(limits = c(0,100), breaks = seq(0,100,10))
write.table(toPlot, file="~/Desktop/drivers_261.tsv", quote = F, sep = "\t", row.names = F)
## And the same for the sys-candidates
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/sysSVM/OAC/gsea.noAmp.top10.plusCGC.Rdata")
toPlot_noAmp = gsea.noAmp.top10.plusCGC[["genes"]] %>% subset(gene_type!="cgc") %>% select(sample, no_TRUNC_muts, no_NTDam_muts, no_GOF_muts, BND, INS, INV, CNVGain) %>%
mutate(CNVGain=as.numeric(as.character(CNVGain))) %>% gather(type, value, -sample) %>%
subset(value==1) %>% group_by(type) %>% summarise(n=n()) %>% mutate(all=2598, perc=(as.numeric(n)/all)*100) %>%
rbind(c("Homo_dels", training_ns %>% subset(Copy_number==0) %>% nrow, 2598, (training_ns %>% subset(Copy_number==0) %>% nrow)/2598)) %>%
rbind(c("Multiple_hits", training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow, 2598, (training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow)/2598)) %>%
mutate(label="Sys-candidates without Amplification (2,598 obs)")
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/sysSVM/OAC/gsea.withAmp.top10.plusCGC.Rdata")
toPlot_withAmp = gsea.withAmp.top10.plusCGC[["genes"]] %>% subset(gene_type!="cgc") %>% select(sample, no_TRUNC_muts, no_NTDam_muts, no_GOF_muts, BND, INS, INV, CNVGain) %>%
mutate(CNVGain=as.numeric(as.character(CNVGain))) %>% gather(type, value, -sample) %>%
subset(value==1) %>% group_by(type) %>% summarise(n=n()) %>% mutate(all=2608, perc=(as.numeric(n)/all)*100) %>%
rbind(c("Homo_dels", training_ns %>% subset(Copy_number==0) %>% nrow, 2608, (training_ns %>% subset(Copy_number==0) %>% nrow)/2608)) %>%
rbind(c("Multiple_hits", training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow, 2608, (training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow)/2608)) %>%
mutate(label="Sys-candidates with Amplifications (2,608 obs)")
toPlot = rbind(toPlot_noAmp, toPlot_withAmp)
ggplot(toPlot, aes(x=type, y=as.numeric(perc))) +
geom_bar(stat = "identity", position="dodge", color="black", fill="grey50") +
facet_wrap(~label) +
xlab("") + ylab("Drivers (%)") +
theme(
axis.text.x=element_text(angle=90),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
scale_y_continuous(limits = c(0,100), breaks = seq(0,100,10))
write.table(toPlot, file="~/Desktop/drivers_syscans.tsv", quote = F, sep = "\t", row.names = F)
| /raw_scripts/summary_plotting.R | no_license | ciccalab/sysSVM | R | false | false | 18,790 | r | ## Script for summarising and plotting data/mutations/annotations etc
## Get first the directories (samples may be in different directories)
library(RColorBrewer)
es = c("LP6005690-DNA_E02_vs_LP6005689-DNA_E02",
"LP6008280-DNA_F02_vs_LP6008264-DNA_F02",
"LP6008202-DNA_F01_vs_LP6008201-DNA_F01",
"LP6005935-DNA_C01_vs_LP6005934-DNA_C01",
"LP6008031-DNA_E03_vs_LP6008032-DNA_A04")
## For the mutations I just need to load them - because runOncodriveClust script concatenated the data from all samples
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/Rdata/muts_129_66_71_OACs_annovar_dbnsfp_oncodriveClust.Rdata")
## Save the data for 19,014
#muts = muts %>% subset(!is.na(entrez_19014))
#save(muts, file="~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/Rdata/muts_129_66_71_OACs_annovar_dbnsfp_oncodriveClust_19014.Rdata")
## Plot the number of all mutations
samples2muts = muts %>% group_by(sample) %>% summarise(all_muts=n())
samples2muts$sample = factor(as.character(samples2muts$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
sm = data.frame(type= names(summary(samples2muts$all_muts)),
value=unname(c(summary(samples2muts$all_muts))))
p = ggplot(samples2muts, aes(x=sample, y=all_muts)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (#)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
scale_y_continuous(breaks = seq(0, max(samples2muts$all_muts), max(samples2muts$all_muts)/10)) +
annotation_custom(tableGrob(sm, cols = NULL, rows = NULL), xmin=100, xmax=150, ymin=150000, ymax=200000) +
ggtitle("All mutations (this sample order forced to all other plots)")
ns = c("nonsynonymous","stopgain","frameshift deletion","splicing","frameshift insertion","nonframeshift deletion","nonframeshift insertion","nonframeshift substitution","stoploss","frameshift substitution")
dam = c("nonsynonymous","frameshift deletion","frameshift insertion","frameshift substitution","nonframeshift deletion","nonframeshift insertion","nonframeshift substitution","splicing","stopgain","stoploss")
trunc = c("frameshift deletion","frameshift insertion","frameshift substitution","stopgain","stoploss") ## Always damaging==TRUE
non_trunc = c("nonsynonymous","splicing")
ns_vep=c("missense_variant", "splice_region_variant", "splice_donor_variant", "stop_gained", "splice_acceptor_variant", "stop_lost")
d = muts %>% count(sample, Func.refGene) %>% data.frame()
d = d %>% left_join(samples2muts) %>% mutate(perc=n/all_muts)
d$sample = factor(as.character(d$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
## First plot check the fraction of exonic overall
n <- 15
qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
pie(rep(1,n), col=col_vector[1:n])
cols = c(col_vector[1:2], "red", col_vector[4:n])
p1 = ggplot(d,
aes(x=sample, y=perc, fill=Func.refGene)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (fraction)") +
scale_fill_manual(values = cols) +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) + labs(fill='Effect') +
ggtitle("All mutations")
## Get distribution of the percentage of exonic
tb1 = rbind(muts %>% group_by(sample) %>% count(Func.refGene) %>% mutate(Func.refGene=ifelse(Func.refGene=="exonic", "exonic", "other")) %>%
group_by(sample, Func.refGene) %>% summarise(n=sum(n)) %>%left_join(samples2muts) %>% mutate(perc=n/all_muts) %>% subset(Func.refGene=="exonic") %>% .$perc %>% summary())
## Now check the categories of the exonic
samples2exonic = muts %>% subset(Func.refGene=="exonic") %>% group_by(sample) %>% summarise(exonic_muts=n())
samples2exonic$sample = factor(as.character(samples2exonic$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
sm = data.frame(type= names(summary(samples2exonic$exonic_muts)),
value=unname(c(summary(samples2exonic$exonic_muts))))
p2 = ggplot(samples2exonic, aes(x=sample, y=exonic_muts)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (#)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
scale_y_continuous(breaks = seq(0, max(samples2exonic$exonic_muts), max(samples2exonic$exonic_muts)/10)) +
annotation_custom(tableGrob(sm, cols = NULL, rows = NULL), xmin=100, xmax=150, ymin=700, ymax=900) +
ggtitle("Exonic mutations")
d = muts %>% subset(Func.refGene=="exonic") %>%count(sample, ExonicFunc.refGene) %>% data.frame()
d = d %>% left_join(samples2exonic) %>% mutate(perc=n/exonic_muts)
d$sample = factor(as.character(d$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
p3 = ggplot(d ,
aes(x=sample, y=perc, fill=ExonicFunc.refGene)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (fraction)") +
scale_fill_manual(values = cols) +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) + labs(fill='Effect') +
ggtitle("Exonic mutations")
## Now check the damaging
samples2damaging = muts %>% subset(damaging) %>% group_by(sample) %>% summarise(damaging_muts=n())
samples2damaging = samples2damaging %>% full_join(samples2muts%>%select(sample)) ## Not all samples have damaging mutations
samples2damaging$damaging_muts[is.na(samples2damaging$damaging_muts)] = 0
samples2damaging$sample = factor(as.character(samples2damaging$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
sm = data.frame(type= names(summary(samples2damaging$damaging_muts)),
value=unname(c(summary(samples2damaging$damaging_muts))))
p4 = ggplot(samples2damaging, aes(x=sample, y=damaging_muts)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (#)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
scale_y_continuous(breaks = seq(0, max(samples2damaging$damaging_muts), max(samples2damaging$damaging_muts)/10)) +
annotation_custom(tableGrob(sm, cols = NULL, rows = NULL), xmin=100, xmax=150, ymin=250, ymax=300) +
ggtitle("Damaging mutations")
## Now the gain of function mutations
samples2gof = muts %>% subset(oncodriveClust) %>% group_by(sample) %>% summarise(gof_muts=n())
samples2gof = samples2gof %>% full_join(samples2muts%>%select(sample)) ## Not all samples have damaging mutations
samples2gof$gof_muts[is.na(samples2gof$gof_muts)] = 0
samples2gof$sample = factor(as.character(samples2gof$sample), levels = samples2muts$sample[order(samples2muts$all_muts, decreasing = F)])
sm = data.frame(type= names(summary(samples2gof$gof_muts)),
value=unname(c(summary(samples2gof$gof_muts))))
p5 = ggplot(samples2gof, aes(x=sample, y=gof_muts)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Mutations (#)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
scale_y_continuous(breaks = seq(0, max(samples2gof$gof_muts), 5)) +
annotation_custom(tableGrob(sm, cols = NULL, rows = NULL), xmin=10, xmax=60, ymin=10, ymax=15) +
ggtitle("Gain-of-function mutations")
grid.arrange(p, p1, p2, p3, p4, ncol=2)
grid.arrange(p, p1, p2, p3, p4, p5, ncol=2)
## CNVs
## Gather all CNVs
# mainDirs = c("~/data/OAC/71_OAC/ascat/",
# "~/data/OAC/87_OAC/66_ICGC/ascat/",
# "~/data/OAC/129_OAC/ascat/")
mainDirs = c("~/rosalind_lustre/mourikisa/data/OAC/87_OAC/21_literature/ascat/")
message("Getting CNVs...")
all_cnvs = data.frame()
count = 0
for(b in mainDirs){
samples = list.dirs(b, recursive = F)
for(s in samples){
cat(s, "\n")
fn = paste0(s, "/parsing_and_annotation/cnvs.Rdata")
load(fn)
sname = unlist(strsplit(s, "/"))
sname = sname[length(sname)]
d = cnvs[["df_cnvs_19014"]]
all_cnvs = rbind(all_cnvs, d %>% mutate(sample=sname))
count = count +1
}
}
cat(paste0("Samples: ", count))
## Save raw data
cnvs = all_cnvs
save(cnvs, file="~/rosalind_lustre/mourikisa/data/OAC/87_OAC/21_literature/Rdata/cnvs_21_literature_OACs.Rdata")
samples2cnvs = all_cnvs %>% subset(!is.na(entrez_19014)) %>% select(sample, entrez_19014, CNV_type_corrected) %>% unique %>% count(sample, CNV_type_corrected)
all_samples = rbind(samples2cnvs%>%select(sample)%>%unique%>%mutate(CNV_type_corrected="Gain"), samples2cnvs%>%select(sample)%>%unique%>%mutate(CNV_type_corrected="Loss"))
samples2cnvs = samples2cnvs %>% full_join(all_samples)
samples2cnvs$n[is.na(samples2cnvs$n)] = 0
samples2cnvs = samples2cnvs %>% subset(!is.na(CNV_type_corrected))
sm1 = data.frame(type= names(summary(samples2cnvs$n[samples2cnvs$CNV_type_corrected=="Loss"])),
value=unname(c(summary(samples2cnvs$n[samples2cnvs$CNV_type_corrected=="Loss"]))))
sm2 = data.frame(type= names(summary(samples2cnvs$n[samples2cnvs$CNV_type_corrected=="Gain"])),
value=unname(c(summary(samples2cnvs$n[samples2cnvs$CNV_type_corrected=="Gain"]))))
p = ggplot(samples2cnvs %>% subset(n>0), aes(x=sample, y=n, fill=CNV_type_corrected)) +
geom_bar(stat = "identity", position = "dodge") +
ylab("Genes (#)") +
xlab("Samples") + theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
ggtitle("Gains>=2*ploidy; Losses=CN<2")
sm1 = tableGrob(sm1, cols = NULL, rows = NULL)
sm2 = tableGrob(sm2, cols = NULL, rows = NULL)
grid.arrange(arrangeGrob(sm1, sm2, ncol=2),
arrangeGrob(p, nrow=1, ncol=1), heights=c(0.2, 0.8))
## SVs
mainDirs = c("~/athena/data/OAC/71_OAC/manta/",
"~/athena/data/OAC/87_OAC/66_ICGC/manta/",
"~/athena/data/OAC/129_OAC/manta/")
message("Getting SVs...")
all_svs = data.frame()
count = 0
ss = NULL
for(b in mainDirs){
samples = list.dirs(b, recursive = F)
for(s in samples){
cat(s, "\n")
fn = paste0(s, "/parsing_and_annotation/svs.Rdata")
if(file.exists(fn)){
load(fn)
sname = unlist(strsplit(s, "/"))
sname = sname[length(sname)]
ss = c(ss, sname)
all_svs = rbind.fill(all_svs, svs %>% mutate(sample=sname))
count = count +1
}else{
next
}
}
}
cat(paste0("Samples: ", count))
## Save raw data
svs = all_svs
svs[,2:6][is.na(svs[,2:6])] = 0
save(svs, file="~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/Rdata/svs.Rdata")
## Save the data on the 19,014
#svs = svs %>% subset(!is.na(entrez_19014)) %>% data.frame()
#save(svs, file="~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/Rdata/svs_19014.Rdata")
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/Rdata/svs.Rdata")
samples2svs2type = svs %>% select(sample, gene, DEL, DUP, INV, BND, INS) %>% gather(type, value, -sample, -gene) %>% subset(value!=0) %>% group_by(sample, type) %>% summarise(n=sum(value))
samples2svs = samples2svs2type %>% group_by(sample) %>% summarise(svs=sum(n))
samples2svs$sample = factor(as.character(samples2svs$sample), levels = unique(samples2svs$sample[order(samples2svs$svs, decreasing = F)]))
samples2svs2type = samples2svs2type %>% left_join(samples2svs) %>% mutate(perc=(n/svs)*100)
samples2svs2type$sample = factor(as.character(samples2svs2type$sample), levels = unique(samples2svs$sample[order(samples2svs$svs, decreasing = F)]))
sm = data.frame(type= names(summary(samples2svs$svs)),
value=unname(c(summary(samples2svs$svs))))
p1 = ggplot(samples2svs, aes(x=sample, y=svs)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Genes (#)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
scale_y_continuous(breaks = seq(0, max(samples2svs$svs), 100)) +
annotation_custom(tableGrob(sm, cols = NULL, rows = NULL), xmin=50, xmax=100, ymin=1000, ymax=1200) +
ggtitle("All SVs (this sample order forced to all other plots)")
p2 = ggplot(samples2svs2type, aes(x=sample, y=perc, fill=type)) +
geom_bar(stat = "identity") + xlab("samples") + ylab("Genes (%)") +
theme_boss() +
theme(
axis.text.x=element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
axis.ticks.x = element_blank()
) +
ggtitle("SV types")
grid.arrange(p1, p2, nrow=1)
## ---------------------------------------
## Plots for the drivers
## ---------------------------------------
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/sysSVM/OAC/training_set_noScale.Rdata")
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/sysSVM/OAC/validation_set_noScale.Rdata")
training_ns = training_ns %>% tibble::rownames_to_column() %>% separate(rowname, into=c("cancer_type", "sample", "entrez"), sep="\\.")
validation_ns = validation_ns %>% tibble::rownames_to_column() %>% separate(rowname, into=c("cancer_type", "sample", "entrez"), sep="\\.")
cohort = rbind.fill(training_ns, validation_ns)
## Get count for basic alterations
train_toPlot = training_ns %>% select(sample, no_TRUNC_muts, no_NTDam_muts, no_GOF_muts, BND, INS, INV, CNVGain) %>%
mutate(CNVGain=as.numeric(as.character(CNVGain))) %>% gather(type, value, -sample) %>%
subset(value==1) %>% group_by(type) %>% summarise(n=n()) %>% mutate(all=4091, perc=(as.numeric(n)/all)*100) %>%
rbind(c("Homo_dels", training_ns %>% subset(Copy_number==0) %>% nrow, 4091, (training_ns %>% subset(Copy_number==0) %>% nrow)/4091)) %>%
rbind(c("Multiple_hits", training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow, 4091, (training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow)/4091)) %>%
mutate(label="training set (4091 obs)")
validation_toPlot = validation_ns %>% select(sample, no_TRUNC_muts, no_NTDam_muts, no_GOF_muts, BND, INS, INV, CNVGain) %>%
mutate(CNVGain=as.numeric(as.character(CNVGain))) %>% gather(type, value, -sample) %>%
subset(value==1) %>% group_by(type) %>% summarise(n=n()) %>% mutate(all=112898, perc=(as.numeric(n)/all)*100) %>%
rbind(c("Homo_dels", training_ns %>% subset(Copy_number==0) %>% nrow, 112898, (training_ns %>% subset(Copy_number==0) %>% nrow)/112898)) %>%
rbind(c("Multiple_hits", training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow, 112898, (training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow)/112898)) %>%
mutate(label="prediction set (112,898 obs)")
toPlot = rbind(train_toPlot, validation_toPlot)
ggplot(toPlot, aes(x=type, y=as.numeric(perc))) +
geom_bar(stat = "identity", position="dodge", color="black", fill="grey50") +
facet_wrap(~label) +
xlab("") + ylab("Drivers (%)") +
theme(
axis.text.x=element_text(angle=90),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
scale_y_continuous(limits = c(0,100), breaks = seq(0,100,10))
write.table(toPlot, file="~/Desktop/drivers_261.tsv", quote = F, sep = "\t", row.names = F)
## And the same for the sys-candidates
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/sysSVM/OAC/gsea.noAmp.top10.plusCGC.Rdata")
toPlot_noAmp = gsea.noAmp.top10.plusCGC[["genes"]] %>% subset(gene_type!="cgc") %>% select(sample, no_TRUNC_muts, no_NTDam_muts, no_GOF_muts, BND, INS, INV, CNVGain) %>%
mutate(CNVGain=as.numeric(as.character(CNVGain))) %>% gather(type, value, -sample) %>%
subset(value==1) %>% group_by(type) %>% summarise(n=n()) %>% mutate(all=2598, perc=(as.numeric(n)/all)*100) %>%
rbind(c("Homo_dels", training_ns %>% subset(Copy_number==0) %>% nrow, 2598, (training_ns %>% subset(Copy_number==0) %>% nrow)/2598)) %>%
rbind(c("Multiple_hits", training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow, 2598, (training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow)/2598)) %>%
mutate(label="Sys-candidates without Amplification (2,598 obs)")
load("~/athena/data/OAC/Combined_ICGC_cohort_129_66_71/sysSVM/OAC/gsea.withAmp.top10.plusCGC.Rdata")
toPlot_withAmp = gsea.withAmp.top10.plusCGC[["genes"]] %>% subset(gene_type!="cgc") %>% select(sample, no_TRUNC_muts, no_NTDam_muts, no_GOF_muts, BND, INS, INV, CNVGain) %>%
mutate(CNVGain=as.numeric(as.character(CNVGain))) %>% gather(type, value, -sample) %>%
subset(value==1) %>% group_by(type) %>% summarise(n=n()) %>% mutate(all=2608, perc=(as.numeric(n)/all)*100) %>%
rbind(c("Homo_dels", training_ns %>% subset(Copy_number==0) %>% nrow, 2608, (training_ns %>% subset(Copy_number==0) %>% nrow)/2608)) %>%
rbind(c("Multiple_hits", training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow, 2608, (training_ns %>% subset(Copy_number==1 & (no_TRUNC_muts>0 | no_NTDam_muts>0)) %>% nrow)/2608)) %>%
mutate(label="Sys-candidates with Amplifications (2,608 obs)")
toPlot = rbind(toPlot_noAmp, toPlot_withAmp)
ggplot(toPlot, aes(x=type, y=as.numeric(perc))) +
geom_bar(stat = "identity", position="dodge", color="black", fill="grey50") +
facet_wrap(~label) +
xlab("") + ylab("Drivers (%)") +
theme(
axis.text.x=element_text(angle=90),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black")
) +
scale_y_continuous(limits = c(0,100), breaks = seq(0,100,10))
write.table(toPlot, file="~/Desktop/drivers_syscans.tsv", quote = F, sep = "\t", row.names = F)
|
\name{print.relaxnet}
\alias{print.relaxnet}
\title{
Print Method for relaxnet Objects
}
\description{
This function just calls \code{print(summary(x))}. See \code{\link{summary.relaxnet}}.
}
\usage{
\method{print}{relaxnet}(x, digits, ...)
}
\arguments{
\item{x}{
The \code{"relaxnet"} object to be printed.
}
\item{digits}{
Passed to \code{print.summary.relaxnet}.
}
\item{\dots}{
Passed to \code{print.summary.relaxnet}.
}
}
\value{
Returns \code{x} invisibly.
}
\author{
Stephan Ritter, with design contributions from Alan Hubbard.
Much of the code (and some help file content) is adapted from the \pkg{glmnet} package, whose authors are Jerome Friedman, Trevor Hastie and Rob Tibshirani.
}
\seealso{
\code{\link{relaxnet}}, \code{\link{summary.relaxnet}}
}
| /man/print.relaxnet.Rd | no_license | cran/relaxnet | R | false | false | 775 | rd | \name{print.relaxnet}
\alias{print.relaxnet}
\title{
Print Method for relaxnet Objects
}
\description{
This function just calls \code{print(summary(x))}. See \code{\link{summary.relaxnet}}.
}
\usage{
\method{print}{relaxnet}(x, digits, ...)
}
\arguments{
\item{x}{
The \code{"relaxnet"} object to be printed.
}
\item{digits}{
Passed to \code{print.summary.relaxnet}.
}
\item{\dots}{
Passed to \code{print.summary.relaxnet}.
}
}
\value{
Returns \code{x} invisibly.
}
\author{
Stephan Ritter, with design contributions from Alan Hubbard.
Much of the code (and some help file content) is adapted from the \pkg{glmnet} package, whose authors are Jerome Friedman, Trevor Hastie and Rob Tibshirani.
}
\seealso{
\code{\link{relaxnet}}, \code{\link{summary.relaxnet}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipe.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe operator}
\usage{
lhs \%>\% rhs
}
\arguments{
\item{lhs}{a \code{\link{morrisjs}} object}
\item{rhs}{a charting function}
}
\description{
Imports the pipe operator from magrittr.
}
\examples{
morrisjs(mdeaths) \%>\% mjsLine
}
| /man/pipe.Rd | permissive | MarkEdmondson1234/morrisjs | R | false | true | 352 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipe.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe operator}
\usage{
lhs \%>\% rhs
}
\arguments{
\item{lhs}{a \code{\link{morrisjs}} object}
\item{rhs}{a charting function}
}
\description{
Imports the pipe operator from magrittr.
}
\examples{
morrisjs(mdeaths) \%>\% mjsLine
}
|
# Merging Data
## Peer review data
### Peer review study (model for scientific peer review)
### SAT questions answered (solutions) in one set
### People who reviewed those answers to decide whether they were right
### or wrong in the other set.
if(!file.exists("./data")){dir.creat("./data")}
fileUrl1 = "https://dl.dropboxusercontent.com/u/7710864/data/reviews-apr29.csv"
fileUrl2 = "https://dl.dropboxusercontent.com/u/7710864/data/solution-apr29.csv"
download.file(fileUrl1,destfile="./data/reviews,csv")
download.file(fileUrl2,destfile="./data/solutions,csv")
reviews <- read.csv("./data/reviews.csv")
solutions <- read.csv("./data/solutions.csv")
### similar to a SQL database these two sets have corresponding variables
### For reviews, one is "solution id" which corresponds with "id" in the
### solutions data set.
head(reviews)
head(solutions)
## Merging data
### Using solution id and id to merge datasets
### all=TRUE means include all variables not common to both data sets.
### Also insert NAs for missing values/rows. the x variable name will be
### used in place of the y variable name in the merged data set.
names(reviews)
names(solutions)
mergedData = merge(reviews,solutions,by.x="solution_id",by.y="id",all=TRUE)
head(mergedData)
## Default - merge all common column names
### Showing using merge() default values. Shows other variables common
### to both data sets but have different values. SO same name but not
### same variable. The merge creates different rows with same id numbers.
intersect(names(solutions)),names(reviews)
mergedData2 = merge(reviews,solutions,all=TRUE)
head(mergedData2)
## Using join in the plyr package
### faster but has fewer features than merge. Can only merge datasets
### common variable names. So not applicable with peer review data set.
df1 = data.frame(id=sample(1:10),x=rnorm(10))
df2 = data.frame(id=sample(1:10),y=rnorm(10))
### joins 2 data sets by id. Arrange orders data set by id in ascending order.
arrange(join(df1,df2),id)
## If you have multiple data frames
### Using join_all()
df3 = data.frame(id=sample(1:10),z=rnorm(10))
dfList = list(df1,df2,df3); dfList
arrange(join_all(dfList),id) | /Johns Hopkins Data Science Concentration/Data Cleaning/Merging Data.R | no_license | ercbk/Notes | R | false | false | 2,172 | r | # Merging Data
## Peer review data
### Peer review study (model for scientific peer review)
### SAT questions answered (solutions) in one set
### People who reviewed those answers to decide whether they were right
### or wrong in the other set.
if(!file.exists("./data")){dir.creat("./data")}
fileUrl1 = "https://dl.dropboxusercontent.com/u/7710864/data/reviews-apr29.csv"
fileUrl2 = "https://dl.dropboxusercontent.com/u/7710864/data/solution-apr29.csv"
download.file(fileUrl1,destfile="./data/reviews,csv")
download.file(fileUrl2,destfile="./data/solutions,csv")
reviews <- read.csv("./data/reviews.csv")
solutions <- read.csv("./data/solutions.csv")
### similar to a SQL database these two sets have corresponding variables
### For reviews, one is "solution id" which corresponds with "id" in the
### solutions data set.
head(reviews)
head(solutions)
## Merging data
### Using solution id and id to merge datasets
### all=TRUE means include all variables not common to both data sets.
### Also insert NAs for missing values/rows. the x variable name will be
### used in place of the y variable name in the merged data set.
names(reviews)
names(solutions)
mergedData = merge(reviews,solutions,by.x="solution_id",by.y="id",all=TRUE)
head(mergedData)
## Default - merge all common column names
### Showing using merge() default values. Shows other variables common
### to both data sets but have different values. SO same name but not
### same variable. The merge creates different rows with same id numbers.
intersect(names(solutions)),names(reviews)
mergedData2 = merge(reviews,solutions,all=TRUE)
head(mergedData2)
## Using join in the plyr package
### faster but has fewer features than merge. Can only merge datasets
### common variable names. So not applicable with peer review data set.
df1 = data.frame(id=sample(1:10),x=rnorm(10))
df2 = data.frame(id=sample(1:10),y=rnorm(10))
### joins 2 data sets by id. Arrange orders data set by id in ascending order.
arrange(join(df1,df2),id)
## If you have multiple data frames
### Using join_all()
df3 = data.frame(id=sample(1:10),z=rnorm(10))
dfList = list(df1,df2,df3); dfList
arrange(join_all(dfList),id) |
# Downloading and extracting the data.
if (!file.exists ("Project1_Data")) {
dir.create ("Project1_Data")
download.file ("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="Project1_Data/exdata-data-household_power_consumption.zip",
method="auto")
unzip ("Project1_Data/exdata-data-household_power_consumption.zip")
dateDownloaded <- date() # Saves the date the download was done.
}
# Read only 1st and 2nd Feb, 2007 data points into R.
library (RSQLite)
con <- dbConnect ("SQLite", dbname="household_data")
dbWriteTable (con, name="data_table", value="household_power_consumption.txt",
row.names=F, header=T, sep=";")
finalData <- dbGetQuery (con,
"SELECT * FROM data_table WHERE Date='1/2/2007' OR Date='2/2/2007'")
dbDisconnect(con)
# Convert character to date and time
finalData$Date <- strptime(paste(finalData$Date,finalData$Time), format="%d/%m/%Y %H:%M:%S")
# Delete the Time column (combined with Date now).
finalData <- finalData[,-2]
colnames(finalData)[1] <- "datetime"
## Plot 1
##############################################################################
#
png (filename="plot1.png") #
hist(finalData$Global_active_power, col="red", #
main="Global Active Power", xlab="Global Active Power (kilowatts)") #
dev.off() #
#
##############################################################################
## Deletes the temporary folder used to store the data.
unlink("Project1_Data", recursive=TRUE)
unlink(c("household_data.sql", "household_power_consumption.txt")) | /plot1.R | no_license | Vaskoman/ExData_Plotting1 | R | false | false | 1,967 | r | # Downloading and extracting the data.
if (!file.exists ("Project1_Data")) {
dir.create ("Project1_Data")
download.file ("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="Project1_Data/exdata-data-household_power_consumption.zip",
method="auto")
unzip ("Project1_Data/exdata-data-household_power_consumption.zip")
dateDownloaded <- date() # Saves the date the download was done.
}
# Read only 1st and 2nd Feb, 2007 data points into R.
library (RSQLite)
con <- dbConnect ("SQLite", dbname="household_data")
dbWriteTable (con, name="data_table", value="household_power_consumption.txt",
row.names=F, header=T, sep=";")
finalData <- dbGetQuery (con,
"SELECT * FROM data_table WHERE Date='1/2/2007' OR Date='2/2/2007'")
dbDisconnect(con)
# Convert character to date and time
finalData$Date <- strptime(paste(finalData$Date,finalData$Time), format="%d/%m/%Y %H:%M:%S")
# Delete the Time column (combined with Date now).
finalData <- finalData[,-2]
colnames(finalData)[1] <- "datetime"
## Plot 1
##############################################################################
#
png (filename="plot1.png") #
hist(finalData$Global_active_power, col="red", #
main="Global Active Power", xlab="Global Active Power (kilowatts)") #
dev.off() #
#
##############################################################################
## Deletes the temporary folder used to store the data.
unlink("Project1_Data", recursive=TRUE)
unlink(c("household_data.sql", "household_power_consumption.txt")) |
Family.sizes <-
function(Ped){
fam.id <- NULL
fam.names <- as.vector(unique(Ped[ ,1]))
for(i in 1:length(fam.names))
{
fam.id[Ped[ ,1]==fam.names[i]] <- i
}
Ped[,1] <- fam.id
nbre.family = length(unique(Ped[ ,1])) #### ceci donne le nbre total de familles dans un jeu de donnees
ntype=vector(length = nbre.family)
rep.family = c(Ped[ ,1],0)
j=1
cont1=1
for (i in 1:nbre.family){
cont=0
while(rep.family[j] == Ped[cont1,1] ){
j=j+1
cont=cont+1
}
ntype[i]=cont
cont1 = cont1 + cont
}
return(ntype)
}
| /R/Family.sizes.R | no_license | KarimOualkacha/PCH4Pedigees | R | false | false | 532 | r | Family.sizes <-
function(Ped){
fam.id <- NULL
fam.names <- as.vector(unique(Ped[ ,1]))
for(i in 1:length(fam.names))
{
fam.id[Ped[ ,1]==fam.names[i]] <- i
}
Ped[,1] <- fam.id
nbre.family = length(unique(Ped[ ,1])) #### ceci donne le nbre total de familles dans un jeu de donnees
ntype=vector(length = nbre.family)
rep.family = c(Ped[ ,1],0)
j=1
cont1=1
for (i in 1:nbre.family){
cont=0
while(rep.family[j] == Ped[cont1,1] ){
j=j+1
cont=cont+1
}
ntype[i]=cont
cont1 = cont1 + cont
}
return(ntype)
}
|
\name{read_jplace}
\alias{read_jplace}
\title{
Read a jplace file
}
\description{
Read a jplace file
}
\usage{
read_jplace(jplace_file, full = TRUE)
}
\arguments{
\item{jplace_file}{
A jplace file name
}
\item{full}{
If set to FALSE, only the tree is read from the jplace file
}
}
\details{
When the jplace or sqlite files are imported into R, the node numbering available in the original
file is converted to the class "phylo" numbering. The class phylo is defined in the "ape" package.
}
\value{
A list with
\item{arbre}{The tree in class "phylo" over wich placements are performed}
\item{placement}{The placement table}
\item{multiclass}{The multiclass table}
\item{run}{The command line used to obtained the jplace file}
}
\author{
pierre lefeuvre
}
\seealso{
read_sqlite
}
| /man/read_jplace.Rd | no_license | cran/BoSSA | R | false | false | 782 | rd | \name{read_jplace}
\alias{read_jplace}
\title{
Read a jplace file
}
\description{
Read a jplace file
}
\usage{
read_jplace(jplace_file, full = TRUE)
}
\arguments{
\item{jplace_file}{
A jplace file name
}
\item{full}{
If set to FALSE, only the tree is read from the jplace file
}
}
\details{
When the jplace or sqlite files are imported into R, the node numbering available in the original
file is converted to the class "phylo" numbering. The class phylo is defined in the "ape" package.
}
\value{
A list with
\item{arbre}{The tree in class "phylo" over wich placements are performed}
\item{placement}{The placement table}
\item{multiclass}{The multiclass table}
\item{run}{The command line used to obtained the jplace file}
}
\author{
pierre lefeuvre
}
\seealso{
read_sqlite
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 15079
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 15078
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 15078
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt21_215_403.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 4480
c no.of clauses 15079
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 15078
c
c QBFLIB/Basler/terminator/stmt21_215_403.qdimacs 4480 15079 E1 [1] 0 280 4199 15078 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/terminator/stmt21_215_403/stmt21_215_403.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 720 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 15079
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 15078
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 15078
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt21_215_403.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 4480
c no.of clauses 15079
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 15078
c
c QBFLIB/Basler/terminator/stmt21_215_403.qdimacs 4480 15079 E1 [1] 0 280 4199 15078 RED
|
library("testthat")
| /tmc-langs-r/src/test/resources/recognition_test_cases/testthat_folder/tests/testthat.R | no_license | testmycode/tmc-langs | R | false | false | 20 | r | library("testthat")
|
testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86808667591126e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615853231-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 659 | r | testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86808667591126e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
comparison <- function(dat1, dat2, dat3) {
inp1 <- read.fwf(dat1, c(5, 3, 13, 18, 12), skip = 8)
inp2 <- read.fwf(dat2, c(5, 3, 13, 18, 12), skip = 8)
inp3 <- read.fwf(dat3, c(5, 3, 13, 18, 12), skip = 8)
t <- inp1$V1 + inp1$V2 / 12 + inp1$V3 / 365
dra <- (inp1$V4 - inp2$V4) * 15 * 36e+5
ddec <- (inp1$V5 - inp2$V5) * 36e+5
png("./Ariel_Lainey-Emelyanov_RA.png", width = 1500, height = 500)
plot(cbind(t,dra), main = "Ariel, Lainey-Emelyanov, RA", type = "l", xlab = "year", ylab = "mas")
dev.off()
png("./Ariel_Lainey-Emelyanov_DEC", width = 1500, height = 500)
plot(cbind(t, ddec), main = "Ariel, Lainey-Emelyanov, DEC", type = "l", xlab = "year", ylab = "mas")
dev.off()
dra <- (inp2$V4 - inp3$V4) * 15 * 36e+5
ddec <- (inp2$V5 - inp3$V5) * 36e+5
png("./Ariel_Emelyanov-Laskar_RA.png", width = 1500, height = 500)
plot(cbind(t,dra), main = "Ariel, Emelyanov-Laskar, RA", type = "l", xlab = "year", ylab = "mas")
dev.off()
png("./Ariel_Emelyanov-Laskar_DEC", width = 1500, height = 500)
plot(cbind(t, ddec), main = "Ariel, Emelyanov-Laskar, DEC", type = "l", xlab = "year", ylab = "mas")
dev.off()
dra <- (inp3$V4 - inp1$V4) * 15 * 36e+5
ddec <- (inp3$V5 - inp1$V5) * 36e+5
png("./Ariel_Laskar-Lainey_RA.png", width = 1500, height = 500)
plot(cbind(t,dra), main = "Ariel, Laskar-Lainey, RA", type = "l", xlab = "year", ylab = "mas")
dev.off()
png("./Ariel_Laskar-Lainey_DEC", width = 1500, height = 500)
plot(cbind(t, ddec), main = "Ariel, Laskar-Lainey, DEC", type = "l", xlab = "year", ylab = "mas")
dev.off()
} | /work2/compareplot.R | no_license | Veyza/Uranus | R | false | false | 1,557 | r | comparison <- function(dat1, dat2, dat3) {
inp1 <- read.fwf(dat1, c(5, 3, 13, 18, 12), skip = 8)
inp2 <- read.fwf(dat2, c(5, 3, 13, 18, 12), skip = 8)
inp3 <- read.fwf(dat3, c(5, 3, 13, 18, 12), skip = 8)
t <- inp1$V1 + inp1$V2 / 12 + inp1$V3 / 365
dra <- (inp1$V4 - inp2$V4) * 15 * 36e+5
ddec <- (inp1$V5 - inp2$V5) * 36e+5
png("./Ariel_Lainey-Emelyanov_RA.png", width = 1500, height = 500)
plot(cbind(t,dra), main = "Ariel, Lainey-Emelyanov, RA", type = "l", xlab = "year", ylab = "mas")
dev.off()
png("./Ariel_Lainey-Emelyanov_DEC", width = 1500, height = 500)
plot(cbind(t, ddec), main = "Ariel, Lainey-Emelyanov, DEC", type = "l", xlab = "year", ylab = "mas")
dev.off()
dra <- (inp2$V4 - inp3$V4) * 15 * 36e+5
ddec <- (inp2$V5 - inp3$V5) * 36e+5
png("./Ariel_Emelyanov-Laskar_RA.png", width = 1500, height = 500)
plot(cbind(t,dra), main = "Ariel, Emelyanov-Laskar, RA", type = "l", xlab = "year", ylab = "mas")
dev.off()
png("./Ariel_Emelyanov-Laskar_DEC", width = 1500, height = 500)
plot(cbind(t, ddec), main = "Ariel, Emelyanov-Laskar, DEC", type = "l", xlab = "year", ylab = "mas")
dev.off()
dra <- (inp3$V4 - inp1$V4) * 15 * 36e+5
ddec <- (inp3$V5 - inp1$V5) * 36e+5
png("./Ariel_Laskar-Lainey_RA.png", width = 1500, height = 500)
plot(cbind(t,dra), main = "Ariel, Laskar-Lainey, RA", type = "l", xlab = "year", ylab = "mas")
dev.off()
png("./Ariel_Laskar-Lainey_DEC", width = 1500, height = 500)
plot(cbind(t, ddec), main = "Ariel, Laskar-Lainey, DEC", type = "l", xlab = "year", ylab = "mas")
dev.off()
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nested_one_of.R
\docType{class}
\name{NestedOneOf}
\alias{NestedOneOf}
\title{NestedOneOf}
\format{
An \code{R6Class} generator object
}
\description{
NestedOneOf Class
}
\details{
OpenAPI Petstore
This is a sample server Petstore server. For this sample, you can use the api key `special-key` to test the authorization filters.
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{size}}{integer [optional]}
\item{\code{nested_pig}}{\link{Pig} [optional]}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-NestedOneOf-new}{\code{NestedOneOf$new()}}
\item \href{#method-NestedOneOf-toJSON}{\code{NestedOneOf$toJSON()}}
\item \href{#method-NestedOneOf-fromJSON}{\code{NestedOneOf$fromJSON()}}
\item \href{#method-NestedOneOf-toJSONString}{\code{NestedOneOf$toJSONString()}}
\item \href{#method-NestedOneOf-fromJSONString}{\code{NestedOneOf$fromJSONString()}}
\item \href{#method-NestedOneOf-validateJSON}{\code{NestedOneOf$validateJSON()}}
\item \href{#method-NestedOneOf-toString}{\code{NestedOneOf$toString()}}
\item \href{#method-NestedOneOf-clone}{\code{NestedOneOf$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-new"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-new}{}}}
\subsection{Method \code{new()}}{
Initialize a new NestedOneOf class.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$new(size = NULL, nested_pig = NULL, ...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{size}}{size}
\item{\code{nested_pig}}{nested_pig}
\item{\code{...}}{Other optional arguments.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
To JSON String
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$toJSON()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
NestedOneOf in JSON format
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
Deserialize JSON string into an instance of NestedOneOf
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$fromJSON(input_json)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input_json}}{the JSON input}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
the instance of NestedOneOf
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
To JSON String
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$toJSONString()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
NestedOneOf in JSON format
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
Deserialize JSON string into an instance of NestedOneOf
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$fromJSONString(input_json)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input_json}}{the JSON input}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
the instance of NestedOneOf
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-validateJSON"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-validateJSON}{}}}
\subsection{Method \code{validateJSON()}}{
Validate JSON input with respect to NestedOneOf and throw an exception if invalid
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$validateJSON(input)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input}}{the JSON input}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-toString"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-toString}{}}}
\subsection{Method \code{toString()}}{
To string (JSON format)
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$toString()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
String representation of NestedOneOf
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-clone"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
| /samples/client/petstore/R-httr2/man/NestedOneOf.Rd | permissive | OpenAPITools/openapi-generator | R | false | true | 5,234 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nested_one_of.R
\docType{class}
\name{NestedOneOf}
\alias{NestedOneOf}
\title{NestedOneOf}
\format{
An \code{R6Class} generator object
}
\description{
NestedOneOf Class
}
\details{
OpenAPI Petstore
This is a sample server Petstore server. For this sample, you can use the api key `special-key` to test the authorization filters.
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{size}}{integer [optional]}
\item{\code{nested_pig}}{\link{Pig} [optional]}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-NestedOneOf-new}{\code{NestedOneOf$new()}}
\item \href{#method-NestedOneOf-toJSON}{\code{NestedOneOf$toJSON()}}
\item \href{#method-NestedOneOf-fromJSON}{\code{NestedOneOf$fromJSON()}}
\item \href{#method-NestedOneOf-toJSONString}{\code{NestedOneOf$toJSONString()}}
\item \href{#method-NestedOneOf-fromJSONString}{\code{NestedOneOf$fromJSONString()}}
\item \href{#method-NestedOneOf-validateJSON}{\code{NestedOneOf$validateJSON()}}
\item \href{#method-NestedOneOf-toString}{\code{NestedOneOf$toString()}}
\item \href{#method-NestedOneOf-clone}{\code{NestedOneOf$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-new"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-new}{}}}
\subsection{Method \code{new()}}{
Initialize a new NestedOneOf class.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$new(size = NULL, nested_pig = NULL, ...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{size}}{size}
\item{\code{nested_pig}}{nested_pig}
\item{\code{...}}{Other optional arguments.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
To JSON String
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$toJSON()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
NestedOneOf in JSON format
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
Deserialize JSON string into an instance of NestedOneOf
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$fromJSON(input_json)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input_json}}{the JSON input}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
the instance of NestedOneOf
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
To JSON String
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$toJSONString()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
NestedOneOf in JSON format
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
Deserialize JSON string into an instance of NestedOneOf
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$fromJSONString(input_json)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input_json}}{the JSON input}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
the instance of NestedOneOf
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-validateJSON"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-validateJSON}{}}}
\subsection{Method \code{validateJSON()}}{
Validate JSON input with respect to NestedOneOf and throw an exception if invalid
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$validateJSON(input)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{input}}{the JSON input}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-toString"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-toString}{}}}
\subsection{Method \code{toString()}}{
To string (JSON format)
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$toString()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
String representation of NestedOneOf
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-NestedOneOf-clone"></a>}}
\if{latex}{\out{\hypertarget{method-NestedOneOf-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{NestedOneOf$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
#!/usr/bin/Rscript
args <- commandArgs(trailingOnly = TRUE)
if (length(args) != 2) {
message("USAGE: ./ej07_plot.R parallel_errors diagonal_errors")
quit()
}
message("parallel")
message(args[1])
message("diagonal")
message(args[2])
parallel <- read.csv(args[1], header = TRUE)
diagonal <- read.csv(args[2], header = TRUE)
# parallel
# diagonal
# before pruning
minX <- min(parallel$d, diagonal$d)
maxX <- max(parallel$d, diagonal$d)
minY <- min(parallel$TestEBP, parallel$TrainEBP, diagonal$TrainEBP, diagonal$TestEBP,
parallel$TestEAP, parallel$TrainEAP, diagonal$TrainEAP, diagonal$TestEAP)
maxY <- max(parallel$TestEBP, parallel$TrainEBP, diagonal$TrainEBP, diagonal$TestEBP,
parallel$TestEAP, parallel$TrainEAP, diagonal$TrainEAP, diagonal$TestEAP)
# rojo -> diagonal
# verde -> parallel
png("ej07_ebp.png")
par(mar=c(4,4,1,1)) # par = parametros de plot, mar = margenes, c(bottom, left, top, right)
plot(diagonal$d
, diagonal$TrainEBP
, col = "red"
, type = "o"
, xlim = c(minX, maxX)
, ylim = c(minY, maxY)
, xlab = "Dimensions"
, ylab = "Error percentage"
, lwd = 2
, lty = 3)
lines(diagonal$d
, diagonal$TestEBP
, col = "red"
, type = "o"
, lwd = 2)
lines(parallel$d
, parallel$TrainEBP
, col = "green"
, type = "o"
, lwd = 2
, lty = 3)
lines(parallel$d
, parallel$TestEBP
, col = "green"
, type = "o"
, lwd = 2)
# after pruning
png("ej07_eap.png")
par(mar=c(4,4,1,1)) # par = parametros de plot, mar = margenes, c(bottom, left, top, right)
plot(diagonal$d
, diagonal$TrainEAP
, col = "red"
, type = "o"
, xlim = c(minX, maxX)
, ylim = c(minY, maxY)
, xlab = "Dimensions"
, ylab = "Error percentage"
, lwd = 2
, lty = 3)
lines(diagonal$d
, diagonal$TestEAP
, col = "red"
, type = "o"
, lwd = 2)
lines(parallel$d
, parallel$TrainEAP
, col = "green"
, type = "o"
, lwd = 2
, lty = 3)
lines(parallel$d
, parallel$TestEAP
, col = "green"
, type = "o"
, lwd = 2)
| /ml01/ej07_plot_errors.R | no_license | hgurmendi/machine-learning | R | false | false | 2,077 | r | #!/usr/bin/Rscript
args <- commandArgs(trailingOnly = TRUE)
if (length(args) != 2) {
message("USAGE: ./ej07_plot.R parallel_errors diagonal_errors")
quit()
}
message("parallel")
message(args[1])
message("diagonal")
message(args[2])
parallel <- read.csv(args[1], header = TRUE)
diagonal <- read.csv(args[2], header = TRUE)
# parallel
# diagonal
# before pruning
minX <- min(parallel$d, diagonal$d)
maxX <- max(parallel$d, diagonal$d)
minY <- min(parallel$TestEBP, parallel$TrainEBP, diagonal$TrainEBP, diagonal$TestEBP,
parallel$TestEAP, parallel$TrainEAP, diagonal$TrainEAP, diagonal$TestEAP)
maxY <- max(parallel$TestEBP, parallel$TrainEBP, diagonal$TrainEBP, diagonal$TestEBP,
parallel$TestEAP, parallel$TrainEAP, diagonal$TrainEAP, diagonal$TestEAP)
# rojo -> diagonal
# verde -> parallel
png("ej07_ebp.png")
par(mar=c(4,4,1,1)) # par = parametros de plot, mar = margenes, c(bottom, left, top, right)
plot(diagonal$d
, diagonal$TrainEBP
, col = "red"
, type = "o"
, xlim = c(minX, maxX)
, ylim = c(minY, maxY)
, xlab = "Dimensions"
, ylab = "Error percentage"
, lwd = 2
, lty = 3)
lines(diagonal$d
, diagonal$TestEBP
, col = "red"
, type = "o"
, lwd = 2)
lines(parallel$d
, parallel$TrainEBP
, col = "green"
, type = "o"
, lwd = 2
, lty = 3)
lines(parallel$d
, parallel$TestEBP
, col = "green"
, type = "o"
, lwd = 2)
# after pruning
png("ej07_eap.png")
par(mar=c(4,4,1,1)) # par = parametros de plot, mar = margenes, c(bottom, left, top, right)
plot(diagonal$d
, diagonal$TrainEAP
, col = "red"
, type = "o"
, xlim = c(minX, maxX)
, ylim = c(minY, maxY)
, xlab = "Dimensions"
, ylab = "Error percentage"
, lwd = 2
, lty = 3)
lines(diagonal$d
, diagonal$TestEAP
, col = "red"
, type = "o"
, lwd = 2)
lines(parallel$d
, parallel$TrainEAP
, col = "green"
, type = "o"
, lwd = 2
, lty = 3)
lines(parallel$d
, parallel$TestEAP
, col = "green"
, type = "o"
, lwd = 2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allele_genotype_functions.R
\docType{methods}
\name{stringCoverage,extractedReadsListCombined-method}
\alias{stringCoverage,extractedReadsListCombined-method}
\title{Get string coverage STR identified objects.}
\usage{
\S4method{stringCoverage}{extractedReadsListCombined}(extractedReadsListObject,
control = stringCoverage.control())
}
\arguments{
\item{extractedReadsListObject}{an extractedReadsList-object, created using the \link{identifySTRRegions}-function.}
\item{control}{an \link{stringCoverage.control}-object.}
}
\value{
Returns a list, with an element for every marker in extractedReadsList-object, each element contains the string coverage of all unique strings of a given marker.
}
\description{
\code{stringCoverage} takes an extractedReadsList-object and finds the coverage of every unique string for every marker in the provided list.
}
\examples{
# Regions identified using 'identifySTRs()'
data("identifiedSTRs")
# Limiting and restructuring
sortedIncludedMarkers <- sapply(names(identifiedSTRs$identifiedMarkersSequencesUniquelyAssigned),
function(m) which(m == flankingRegions$Marker))
# Aggregate the strings
stringCoverage(extractedReadsListObject = identifiedSTRs,
control = stringCoverage.control(
motifLength = flankingRegions$MotifLength[sortedIncludedMarkers],
Type = flankingRegions$Type[sortedIncludedMarkers],
numberOfThreads = 1,
trace = FALSE,
simpleReturn = TRUE))
}
| /man/stringCoverage-extractedReadsListCombined-method.Rd | no_license | cran/STRMPS | R | false | true | 1,627 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allele_genotype_functions.R
\docType{methods}
\name{stringCoverage,extractedReadsListCombined-method}
\alias{stringCoverage,extractedReadsListCombined-method}
\title{Get string coverage STR identified objects.}
\usage{
\S4method{stringCoverage}{extractedReadsListCombined}(extractedReadsListObject,
control = stringCoverage.control())
}
\arguments{
\item{extractedReadsListObject}{an extractedReadsList-object, created using the \link{identifySTRRegions}-function.}
\item{control}{an \link{stringCoverage.control}-object.}
}
\value{
Returns a list, with an element for every marker in extractedReadsList-object, each element contains the string coverage of all unique strings of a given marker.
}
\description{
\code{stringCoverage} takes an extractedReadsList-object and finds the coverage of every unique string for every marker in the provided list.
}
\examples{
# Regions identified using 'identifySTRs()'
data("identifiedSTRs")
# Limiting and restructuring
sortedIncludedMarkers <- sapply(names(identifiedSTRs$identifiedMarkersSequencesUniquelyAssigned),
function(m) which(m == flankingRegions$Marker))
# Aggregate the strings
stringCoverage(extractedReadsListObject = identifiedSTRs,
control = stringCoverage.control(
motifLength = flankingRegions$MotifLength[sortedIncludedMarkers],
Type = flankingRegions$Type[sortedIncludedMarkers],
numberOfThreads = 1,
trace = FALSE,
simpleReturn = TRUE))
}
|
library(jsonlite)
library(curl)
download.file(url = "http://ergast.com/api/f1/2012/results.json" , destfile = "manjeets_DAdata.json")
| /manjeets_Prob1/manjeets_DA1.R | no_license | manjeetsingh87/Data-Visualization-using-R | R | false | false | 135 | r | library(jsonlite)
library(curl)
download.file(url = "http://ergast.com/api/f1/2012/results.json" , destfile = "manjeets_DAdata.json")
|
# Using a given value of m and a target value of h, finds the actual h
# and the resulting arl's
findBernoulliCL.simple <- function(p0, m, h.start, arl.b0=NULL, arl.g0=NULL,
head.start.state=NULL, verbose=FALSE) {
if (is.null(arl.b0) & is.null(arl.g0)) stop("\nNeed a value for arl.b0 or arl.g0.\n")
if (!is.null(arl.b0) & !is.null(arl.g0)) stop("\nNeed either arl.b0 or arl.g0, not both.\n")
if (is.null(arl.g0))
if (arl.b0 < 1/p0)
stop("\narl.b0 = ",arl.b0," < 1 * (1/p0), need larger arl.b0.\n")
if (is.null(arl.b0)) arl.b0 <- arl.g0 / p0
if (!is.null(head.start.state))
firstState <- max(1, head.start.state)
else
firstState <- 1
find.arl.b0 <- Bernoulli.linear.ARL(m, round(m * h.start , 0) / m, p0)[firstState]
if (verbose) cat("Initial: find.arl.b0 = ",find.arl.b0," ns =",
h.start*m," h =",h.start,"\n")
count <- 0
if (find.arl.b0 > arl.b0) {
ns <- round(m * h.start , 0)
# stops on first step that is strictly below arl.b0
while ((find.arl.b0 >= arl.b0) & (ns > 1)) {
ns <- ns - 1
find.arl.b0 <- Bernoulli.linear.ARL(m, ns / m, p0)[firstState]
if (verbose) cat("Overshoot: find.arl.b0 = ",find.arl.b0,
" ns =",ns," h =",ns/m,"\n")
count <- count + 1
}
# Go up 1 step and recalculate
ns <- ns + 1
find.arl.b0 <- Bernoulli.linear.ARL(m, ns / m, p0)[firstState]
if (verbose & (count > 20)) cat("Note: ", count,
"calls to Bernoulli.linear.ARL() were required to find h.\n")
return(list(arl.b0 = find.arl.b0,
arl.g0 = find.arl.b0 * p0,
m = m,
h = ns / m,
ns = ns))
}
else if (find.arl.b0 < arl.b0) {
ns <- round(m * h.start , 0)
# stops when we are at or above arl.b0
while (find.arl.b0 < arl.b0) {
ns <- ns + 1
find.arl.b0 <- Bernoulli.linear.ARL(m, ns / m, p0)[firstState]
if (verbose) cat("Undershoot: find.arl.b0 = ",find.arl.b0,
" ns =",ns," h =",ns/m,"\n")
count <- count + 1
}
if (verbose & (count > 20)) cat("Note: ", count,
"calls to Bernoulli.linear.ARL() were required to find h.\n")
return(list(arl.b0 = find.arl.b0,
arl.g0 = find.arl.b0 * p0,
m = m,
h = ns / m,
ns = ns))
}
# We hit ARL0 exactly
else {
if (verbose) cat("Exact hit.\n")
return(list(arl.b0 = find.arl.b0,
arl.g0 = find.arl.b0 * p0,
m = m,
h = h.start,
ns = round(m * h.start)))
}
} # end find.h
| /monitoring/R/findBernoulliCL.simple.R | no_license | lhsego/sUtils | R | false | false | 2,949 | r | # Using a given value of m and a target value of h, finds the actual h
# and the resulting arl's
findBernoulliCL.simple <- function(p0, m, h.start, arl.b0=NULL, arl.g0=NULL,
head.start.state=NULL, verbose=FALSE) {
if (is.null(arl.b0) & is.null(arl.g0)) stop("\nNeed a value for arl.b0 or arl.g0.\n")
if (!is.null(arl.b0) & !is.null(arl.g0)) stop("\nNeed either arl.b0 or arl.g0, not both.\n")
if (is.null(arl.g0))
if (arl.b0 < 1/p0)
stop("\narl.b0 = ",arl.b0," < 1 * (1/p0), need larger arl.b0.\n")
if (is.null(arl.b0)) arl.b0 <- arl.g0 / p0
if (!is.null(head.start.state))
firstState <- max(1, head.start.state)
else
firstState <- 1
find.arl.b0 <- Bernoulli.linear.ARL(m, round(m * h.start , 0) / m, p0)[firstState]
if (verbose) cat("Initial: find.arl.b0 = ",find.arl.b0," ns =",
h.start*m," h =",h.start,"\n")
count <- 0
if (find.arl.b0 > arl.b0) {
ns <- round(m * h.start , 0)
# stops on first step that is strictly below arl.b0
while ((find.arl.b0 >= arl.b0) & (ns > 1)) {
ns <- ns - 1
find.arl.b0 <- Bernoulli.linear.ARL(m, ns / m, p0)[firstState]
if (verbose) cat("Overshoot: find.arl.b0 = ",find.arl.b0,
" ns =",ns," h =",ns/m,"\n")
count <- count + 1
}
# Go up 1 step and recalculate
ns <- ns + 1
find.arl.b0 <- Bernoulli.linear.ARL(m, ns / m, p0)[firstState]
if (verbose & (count > 20)) cat("Note: ", count,
"calls to Bernoulli.linear.ARL() were required to find h.\n")
return(list(arl.b0 = find.arl.b0,
arl.g0 = find.arl.b0 * p0,
m = m,
h = ns / m,
ns = ns))
}
else if (find.arl.b0 < arl.b0) {
ns <- round(m * h.start , 0)
# stops when we are at or above arl.b0
while (find.arl.b0 < arl.b0) {
ns <- ns + 1
find.arl.b0 <- Bernoulli.linear.ARL(m, ns / m, p0)[firstState]
if (verbose) cat("Undershoot: find.arl.b0 = ",find.arl.b0,
" ns =",ns," h =",ns/m,"\n")
count <- count + 1
}
if (verbose & (count > 20)) cat("Note: ", count,
"calls to Bernoulli.linear.ARL() were required to find h.\n")
return(list(arl.b0 = find.arl.b0,
arl.g0 = find.arl.b0 * p0,
m = m,
h = ns / m,
ns = ns))
}
# We hit ARL0 exactly
else {
if (verbose) cat("Exact hit.\n")
return(list(arl.b0 = find.arl.b0,
arl.g0 = find.arl.b0 * p0,
m = m,
h = h.start,
ns = round(m * h.start)))
}
} # end find.h
|
# Probability distributions
# 18 Feburary 2020
# d probability density function
# p cumulative probability distribution
# q quantile function (inverse of p)
# r random number generator
### command + = zoom in
### command - = zoom out
# Poisson distribution ----------------------------------------------------
# discrete 0 to infinity
# parameter lamba > 0 (continuous)
# constant rate parameter (observations per unit time or unit area)
library(ggplot2)
library(MASS)
# d function for probability density
hits <- 0:10
my_vec <- dpois(x=hits, lambda = 1) # one event per sampling area
qplot(x=hits,
y=my_vec,
geom = "col",
color=I("black"), # black to outline the colors of the plot
fill=I("goldenrod")) # use I for the identity funtion for simple
# shape is highest on the left side
my_vec <- dpois(x=hits, lambda = 4.4)
qplot(x=hits,
y=my_vec,
geom = "col",
color=I("black"),
fill=I("goldenrod"))
# shape is closer to the symetric but not quite. highest prob is occuring around 4
sum(my_vec) # this doesn't sum to 1 because hits only goes till 10
# for the poisson with lambda = 2
# what is the probabilty that a single draw will yield x=0 ??
dpois(x=0, lambda = 2)
hits <- 0:10
my_vec <- ppois(q=hits, lambda = 2)
qplot(x=hits,
y=my_vec,
geom = "col",
color=I("black"),
fill=I("goldenrod"))
# for poisson with lambda = 2
# what is the probability that a single random draw will yield x <= 1?
# p function is the cummulative probabilty function
ppois(q=1,lambda = 2)
p1 <- dpois(x=1, lambda=2)
print(p1)
p2 <- dpois(x=0, lambda = 2)
print(p2)
p1 + p2
# the q function is the inverse of p
qpois(p=0.5,lambda=2.5)
# answer is 2 because integer count
# simulate a poisson to get acutal values
ran_pois <- rpois(n=1000, lambda = 2.5)
qplot(x=ran_pois,
color=I("black"),
fill=I("goldenrod"))
quantile(x=ran_pois,probs = c(0.025,0.975))
# Binomial ----------------------------------------------------------------
# p = probability of dichotomous outcome
# size = number of trials
# x = possible outcomes
# outcome x is bounded between 0 and size
# density function for binomial
hits <- 0:10
my_vec <- dbinom(x=hits, size=10, prob=0.5)
qplot(x=hits,
y=my_vec,
geom = "col",
color=I("black"),
fill=I("goldenrod"))
# what is the probability of getting 5 heads out of 10 tosses?
dbinom(x=5, size=10, prob=0.5)
# the probability is not 0.5
# biased coin
hits <- 0:10
my_vec <- dbinom(x=hits, size=10, prob=0.005)
qplot(x=0:10,
y=my_vec,
geom="col",
col=I("black"),
fill=I("goldenrod"))
# p function for tail probability
# probability of 5 or fewer heads out of 10 tosses
pbinom(q=5,size=10,prob=0.5)
pbinom(q=4,size=9,prob=0.5)
# what is the 95% confidence interval for 100 trials with p = 0.7
qbinom(p=c(0.025,0.975),
size=100,
prob=0.7)
# how does this compare to a sample interval for real data? rbiom will give a random set of values
my_coins <- rbinom(n=50,
size=100,
prob=0.50)
qplot(x=my_coins,
color=I("black"),
fill=I("goldenrod"))
quantile(x=my_coins,probs=c(0.025, 0.975))
# Negative binomial -------------------------------------------------------
# number of failures in a series of
# (Bernouli) with p= probability of success (=size)
# before a targeted number of successes (=size) generates a distribution that is more heterogenous ("overdispersed") than poisson
# Poisson
hits
# we are saying, how many failures (get tails) will you get beofre we accumulate 5 heads
hits <- 0:40
my_vec <- dnbinom(x=hits,
size=5,
prob=0.5)
qplot(x=hits,
y=my_vec,
geom="col",
color=I("black"),
fill=I("goldenrod"))
# geometric series is a special case where number of successes = 1. Each bar is a constant fraction of the one that came before it with prob 1-p
my_vec <- dnbinom(x=hits,
size=1,
prob=0.1)
qplot(x=hits,
y=my_vec,
geom="col",
color=I("black"),
fill=I("goldenrod")) # each bar is 90% lower than the previous one
# alternatively, specify mean = mu of distribution and size
# this give us a poisson with a lambda value that varies
# dispersion parameter is the shape parameter is the shape parameter from a gamma distribution as it increases, the distribution the variance gets smaller
nbi_ran <- rnbinom(n=1000, size=10,mu=5)
qplot(nbi_ran,
color=I("black"),
fill=I("goldenrod")) | /ProbDist_02.18.2020.R | permissive | sarazenj/SarazenBio381 | R | false | false | 4,567 | r | # Probability distributions
# 18 Feburary 2020
# d probability density function
# p cumulative probability distribution
# q quantile function (inverse of p)
# r random number generator
### command + = zoom in
### command - = zoom out
# Poisson distribution ----------------------------------------------------
# discrete 0 to infinity
# parameter lamba > 0 (continuous)
# constant rate parameter (observations per unit time or unit area)
library(ggplot2)
library(MASS)
# d function for probability density
hits <- 0:10
my_vec <- dpois(x=hits, lambda = 1) # one event per sampling area
qplot(x=hits,
y=my_vec,
geom = "col",
color=I("black"), # black to outline the colors of the plot
fill=I("goldenrod")) # use I for the identity funtion for simple
# shape is highest on the left side
my_vec <- dpois(x=hits, lambda = 4.4)
qplot(x=hits,
y=my_vec,
geom = "col",
color=I("black"),
fill=I("goldenrod"))
# shape is closer to the symetric but not quite. highest prob is occuring around 4
sum(my_vec) # this doesn't sum to 1 because hits only goes till 10
# for the poisson with lambda = 2
# what is the probabilty that a single draw will yield x=0 ??
dpois(x=0, lambda = 2)
hits <- 0:10
my_vec <- ppois(q=hits, lambda = 2)
qplot(x=hits,
y=my_vec,
geom = "col",
color=I("black"),
fill=I("goldenrod"))
# for poisson with lambda = 2
# what is the probability that a single random draw will yield x <= 1?
# p function is the cummulative probabilty function
ppois(q=1,lambda = 2)
p1 <- dpois(x=1, lambda=2)
print(p1)
p2 <- dpois(x=0, lambda = 2)
print(p2)
p1 + p2
# the q function is the inverse of p
qpois(p=0.5,lambda=2.5)
# answer is 2 because integer count
# simulate a poisson to get acutal values
ran_pois <- rpois(n=1000, lambda = 2.5)
qplot(x=ran_pois,
color=I("black"),
fill=I("goldenrod"))
quantile(x=ran_pois,probs = c(0.025,0.975))
# Binomial ----------------------------------------------------------------
# p = probability of dichotomous outcome
# size = number of trials
# x = possible outcomes
# outcome x is bounded between 0 and size
# density function for binomial
hits <- 0:10
my_vec <- dbinom(x=hits, size=10, prob=0.5)
qplot(x=hits,
y=my_vec,
geom = "col",
color=I("black"),
fill=I("goldenrod"))
# what is the probability of getting 5 heads out of 10 tosses?
dbinom(x=5, size=10, prob=0.5)
# the probability is not 0.5
# biased coin
hits <- 0:10
my_vec <- dbinom(x=hits, size=10, prob=0.005)
qplot(x=0:10,
y=my_vec,
geom="col",
col=I("black"),
fill=I("goldenrod"))
# p function for tail probability
# probability of 5 or fewer heads out of 10 tosses
pbinom(q=5,size=10,prob=0.5)
pbinom(q=4,size=9,prob=0.5)
# what is the 95% confidence interval for 100 trials with p = 0.7
qbinom(p=c(0.025,0.975),
size=100,
prob=0.7)
# how does this compare to a sample interval for real data? rbiom will give a random set of values
my_coins <- rbinom(n=50,
size=100,
prob=0.50)
qplot(x=my_coins,
color=I("black"),
fill=I("goldenrod"))
quantile(x=my_coins,probs=c(0.025, 0.975))
# Negative binomial -------------------------------------------------------
# number of failures in a series of
# (Bernouli) with p= probability of success (=size)
# before a targeted number of successes (=size) generates a distribution that is more heterogenous ("overdispersed") than poisson
# Poisson
hits
# we are saying, how many failures (get tails) will you get beofre we accumulate 5 heads
hits <- 0:40
my_vec <- dnbinom(x=hits,
size=5,
prob=0.5)
qplot(x=hits,
y=my_vec,
geom="col",
color=I("black"),
fill=I("goldenrod"))
# geometric series is a special case where number of successes = 1. Each bar is a constant fraction of the one that came before it with prob 1-p
my_vec <- dnbinom(x=hits,
size=1,
prob=0.1)
qplot(x=hits,
y=my_vec,
geom="col",
color=I("black"),
fill=I("goldenrod")) # each bar is 90% lower than the previous one
# alternatively, specify mean = mu of distribution and size
# this give us a poisson with a lambda value that varies
# dispersion parameter is the shape parameter is the shape parameter from a gamma distribution as it increases, the distribution the variance gets smaller
nbi_ran <- rnbinom(n=1000, size=10,mu=5)
qplot(nbi_ran,
color=I("black"),
fill=I("goldenrod")) |
library(qiimer)
### Name: dist_groups
### Title: Create a data frame of distances between groups of items.
### Aliases: dist_groups
### ** Examples
data(relmbeta_dist)
data(relmbeta)
head(dist_groups(relmbeta_dist, relmbeta$Diet))
| /data/genthat_extracted_code/qiimer/examples/dist_groups.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 238 | r | library(qiimer)
### Name: dist_groups
### Title: Create a data frame of distances between groups of items.
### Aliases: dist_groups
### ** Examples
data(relmbeta_dist)
data(relmbeta)
head(dist_groups(relmbeta_dist, relmbeta$Diet))
|
source('./common.R')
# Read the data used for plotting
plotData <- readData()
# Open PNG device
png(file = "plot4.png", height = 480, width = 480)
# Make space for 4 plots in one and set transparent background color
par(mfrow = c(2, 2), bg = NA)
# Create the plot
with(plotData, {
# Top left plot
plot(Global_active_power ~ DateTime, type = "l", xlab = "", ylab = "Global Active Power")
# Top right plot
plot(Voltage ~ DateTime, type = "l", xlab = "datetime", ylab = "Voltage")
# Bottom left plot
plot(Sub_metering_1 ~ DateTime, type = "l", xlab = "", ylab = "Energy sub metering")
lines(Sub_metering_2 ~ DateTime, col = "red")
lines(Sub_metering_3 ~ DateTime, col = "blue")
legend("topright",
col=c("black", "red", "blue"),
lty = 1,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Bottom right plot
plot(Global_reactive_power~DateTime, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
#Shut down PNG device
dev.off()
| /plot4.R | no_license | sorenlind/ExData_Plotting1 | R | false | false | 1,020 | r | source('./common.R')
# Read the data used for plotting
plotData <- readData()
# Open PNG device
png(file = "plot4.png", height = 480, width = 480)
# Make space for 4 plots in one and set transparent background color
par(mfrow = c(2, 2), bg = NA)
# Create the plot
with(plotData, {
# Top left plot
plot(Global_active_power ~ DateTime, type = "l", xlab = "", ylab = "Global Active Power")
# Top right plot
plot(Voltage ~ DateTime, type = "l", xlab = "datetime", ylab = "Voltage")
# Bottom left plot
plot(Sub_metering_1 ~ DateTime, type = "l", xlab = "", ylab = "Energy sub metering")
lines(Sub_metering_2 ~ DateTime, col = "red")
lines(Sub_metering_3 ~ DateTime, col = "blue")
legend("topright",
col=c("black", "red", "blue"),
lty = 1,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Bottom right plot
plot(Global_reactive_power~DateTime, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
#Shut down PNG device
dev.off()
|
context("test-ldpop")
test_that("ldpop throws an error", {
skip_on_cran()
expect_error(LDpop(var1 = "s3", var2 = "Rs4", pop = "YRI", r2d = "r2", token = Sys.getenv("LDLINK_TOKEN")))
})
test_that("ldpop throws an error w/ invalid var2 coord", {
skip_on_cran()
expect_error(LDpop(var1 = "chr13:32446842", var2 = "cr13:32446842", pop = "YRI", r2d = "r2", token = Sys.getenv("LDLINK_TOKEN")))
})
test_that("ldpop works", {
skip_on_cran()
expect_named(LDpop(var1 = "rs3", var2 = "rs4", pop = "YRI", r2d = "r2", token = Sys.getenv("LDLINK_TOKEN")))
})
test_that("ldpop works with upper case var1", {
skip_on_cran()
expect_named(LDpop(var1 = "rs3", var2 = "rs4", pop = "YRI", r2d = "r2", token = Sys.getenv("LDLINK_TOKEN")))
})
| /tests/testthat/test-ldpop.R | no_license | timyers/LDlinkR-1 | R | false | false | 741 | r | context("test-ldpop")
test_that("ldpop throws an error", {
skip_on_cran()
expect_error(LDpop(var1 = "s3", var2 = "Rs4", pop = "YRI", r2d = "r2", token = Sys.getenv("LDLINK_TOKEN")))
})
test_that("ldpop throws an error w/ invalid var2 coord", {
skip_on_cran()
expect_error(LDpop(var1 = "chr13:32446842", var2 = "cr13:32446842", pop = "YRI", r2d = "r2", token = Sys.getenv("LDLINK_TOKEN")))
})
test_that("ldpop works", {
skip_on_cran()
expect_named(LDpop(var1 = "rs3", var2 = "rs4", pop = "YRI", r2d = "r2", token = Sys.getenv("LDLINK_TOKEN")))
})
test_that("ldpop works with upper case var1", {
skip_on_cran()
expect_named(LDpop(var1 = "rs3", var2 = "rs4", pop = "YRI", r2d = "r2", token = Sys.getenv("LDLINK_TOKEN")))
})
|
library(caret) # model training and evaluation
library(ROCR) # model evaluation
source("performance_plot_utils.R") # plotting metric results
## separate feature and class variables
test.feature.vars <- test.data[,-1]
test.class.var <- test.data[,1]
# build a logistic regression model
formula.init <- "credit.rating ~ ."
formula.init <- as.formula(formula.init)
lr.model <- glm(formula=formula.init, data=train.data, family="binomial")
# view model details
summary(lr.model)
# perform and evaluate predictions
lr.predictions <- predict(lr.model, test.data, type="response")
lr.predictions <- round(lr.predictions)
confusionMatrix(data=lr.predictions, reference=test.class.var, positive='1')
## glm specific feature selection
formula <- "credit.rating ~ ."
formula <- as.formula(formula)
control <- trainControl(method="repeatedcv", number=10, repeats=2)
model <- train(formula, data=train.data, method="glm",
trControl=control)
importance <- varImp(model, scale=FALSE)
plot(importance)
# build new model with selected features
formula.new <- "credit.rating ~ account.balance + credit.purpose + previous.credit.payment.status
+ savings + credit.duration.months"
formula.new <- as.formula(formula.new)
lr.model.new <- glm(formula=formula.new, data=train.data, family="binomial")
# view model details
summary(lr.model.new)
# perform and evaluate predictions
lr.predictions.new <- predict(lr.model.new, test.data, type="response")
lr.predictions.new <- round(lr.predictions.new)
confusionMatrix(data=lr.predictions.new, reference=test.class.var, positive='1')
## model performance evaluations
# plot best model evaluation metric curves
lr.model.best <- lr.model
lr.prediction.values <- predict(lr.model.best, test.feature.vars, type="response")
predictions <- prediction(lr.prediction.values, test.class.var)
par(mfrow=c(1,2))
plot.roc.curve(predictions, title.text="LR ROC Curve")
plot.pr.curve(predictions, title.text="LR Precision/Recall Curve") | /demo/app_intro/examples/2016_RMachineLearningByExample/Ch6_PredictCredit/lr_classifier.R | permissive | stharrold/demo | R | false | false | 2,065 | r | library(caret) # model training and evaluation
library(ROCR) # model evaluation
source("performance_plot_utils.R") # plotting metric results
## separate feature and class variables
test.feature.vars <- test.data[,-1]
test.class.var <- test.data[,1]
# build a logistic regression model
formula.init <- "credit.rating ~ ."
formula.init <- as.formula(formula.init)
lr.model <- glm(formula=formula.init, data=train.data, family="binomial")
# view model details
summary(lr.model)
# perform and evaluate predictions
lr.predictions <- predict(lr.model, test.data, type="response")
lr.predictions <- round(lr.predictions)
confusionMatrix(data=lr.predictions, reference=test.class.var, positive='1')
## glm specific feature selection
formula <- "credit.rating ~ ."
formula <- as.formula(formula)
control <- trainControl(method="repeatedcv", number=10, repeats=2)
model <- train(formula, data=train.data, method="glm",
trControl=control)
importance <- varImp(model, scale=FALSE)
plot(importance)
# build new model with selected features
formula.new <- "credit.rating ~ account.balance + credit.purpose + previous.credit.payment.status
+ savings + credit.duration.months"
formula.new <- as.formula(formula.new)
lr.model.new <- glm(formula=formula.new, data=train.data, family="binomial")
# view model details
summary(lr.model.new)
# perform and evaluate predictions
lr.predictions.new <- predict(lr.model.new, test.data, type="response")
lr.predictions.new <- round(lr.predictions.new)
confusionMatrix(data=lr.predictions.new, reference=test.class.var, positive='1')
## model performance evaluations
# plot best model evaluation metric curves
lr.model.best <- lr.model
lr.prediction.values <- predict(lr.model.best, test.feature.vars, type="response")
predictions <- prediction(lr.prediction.values, test.class.var)
par(mfrow=c(1,2))
plot.roc.curve(predictions, title.text="LR ROC Curve")
plot.pr.curve(predictions, title.text="LR Precision/Recall Curve") |
context("test-check-annotation-keys.R")
test_that("check_annotation_keys returns character(0) when no invalid annotations present", {
dat1 <- data.frame()
dat2 <- data.frame(assay = "rnaSeq")
res1 <- check_annotation_keys(dat1)
res2 <- check_annotation_keys(dat2)
expect_equal(res1, character(0))
expect_equal(res2, character(0))
})
test_that("check_annotation_keys returns invalid annotation values", {
dat <- data.frame(a = 1, b = 2)
suppressMessages(res <- check_annotation_keys(dat))
expect_equal(res, names(dat))
})
test_that("check_annotation_keys provides message", {
dat <- data.frame(a = 1, b = 2)
expect_message(check_annotation_keys(dat))
})
test_that("check_annotation_keys works for File objects", {
skip_on_travis()
skip_on_cran()
library("synapser")
synLogin()
a <- synGet("syn17038064", downloadFile = FALSE)
b <- synGet("syn17038065", downloadFile = FALSE)
resa <- suppressMessages(check_annotation_keys(a))
resb <- suppressMessages(check_annotation_keys(b))
expect_equal(resa, character(0))
expect_equal(resb, "randomAnnotation")
})
test_that("check_annotation_keys works for file views", {
skip_on_travis()
skip_on_cran()
library("synapser")
synLogin()
fv <- synTableQuery("SELECT * FROM syn17038067")
res <- suppressMessages(check_annotation_keys(fv))
expect_equal(res, "randomAnnotation")
})
test_that("report_invalid_keys creates a message", {
expect_message(report_invalid_keys("foo"))
})
| /tests/testthat/test-check-annotation-keys.R | permissive | milen-sage/dccvalidator | R | false | false | 1,476 | r | context("test-check-annotation-keys.R")
test_that("check_annotation_keys returns character(0) when no invalid annotations present", {
dat1 <- data.frame()
dat2 <- data.frame(assay = "rnaSeq")
res1 <- check_annotation_keys(dat1)
res2 <- check_annotation_keys(dat2)
expect_equal(res1, character(0))
expect_equal(res2, character(0))
})
test_that("check_annotation_keys returns invalid annotation values", {
dat <- data.frame(a = 1, b = 2)
suppressMessages(res <- check_annotation_keys(dat))
expect_equal(res, names(dat))
})
test_that("check_annotation_keys provides message", {
dat <- data.frame(a = 1, b = 2)
expect_message(check_annotation_keys(dat))
})
test_that("check_annotation_keys works for File objects", {
skip_on_travis()
skip_on_cran()
library("synapser")
synLogin()
a <- synGet("syn17038064", downloadFile = FALSE)
b <- synGet("syn17038065", downloadFile = FALSE)
resa <- suppressMessages(check_annotation_keys(a))
resb <- suppressMessages(check_annotation_keys(b))
expect_equal(resa, character(0))
expect_equal(resb, "randomAnnotation")
})
test_that("check_annotation_keys works for file views", {
skip_on_travis()
skip_on_cran()
library("synapser")
synLogin()
fv <- synTableQuery("SELECT * FROM syn17038067")
res <- suppressMessages(check_annotation_keys(fv))
expect_equal(res, "randomAnnotation")
})
test_that("report_invalid_keys creates a message", {
expect_message(report_invalid_keys("foo"))
})
|
#Page 395
T = 300
k = 8.617 * 10 ^ -5
e = 1.6 * 10 ^ -19
DE = 10
DB = 25
xB = 0.70 * 10 ^ -4
xE = 0.50 * 10 ^ -3
NE = 1 * 10 ^ 18
NB = 1 * 10 ^ 16
VBE = 0.65
tauB0 = 5 * 10 ^ -7
tauE0 = 1 * 10 ^ -7
Jr0 = 5 * 10 ^ -8
pE0 = 2.25 * 10 ^ 2
nB0 = 2.25 * 10 ^ 4
LE = 10 ^ -3
LB = 3.54 * 10 ^ -3
gamma = 1 / (1 + (pE0 * DE * LB * tanh(0.0198)) / (nB0 * DB * LE * tanh(0.050)))
gamma1 = round(gamma, digits = 4)
cat(gamma1, "\n")
alpha = 1 / cosh(xB / LB)
alphatau = round(alpha, digits = 4)
cat(alphatau, "\n")
Js0 = (e * DB * nB0) / (LB * tanh(xB / LB))
cat(signif(Js0, digits = 3), "A/cm^2\n")
z = 1 / (1 + (Jr0 / Js0) * exp(-VBE / (2 * 0.0259)))
delta = round(z, digits = 5)
cat(delta, "\n")
q = gamma1 * alphatau * delta
z = round(q, digits = 5)
cat(z, "\n")
beta1 = z / (1 - z)
cat(round(beta1, digits = 0), "\n") | /Semiconductor_Physics_And_Devices_-_Basic_Principles_by_D_A_Neamen/CH10/EX10.4/Ex10_4.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 818 | r | #Page 395
T = 300
k = 8.617 * 10 ^ -5
e = 1.6 * 10 ^ -19
DE = 10
DB = 25
xB = 0.70 * 10 ^ -4
xE = 0.50 * 10 ^ -3
NE = 1 * 10 ^ 18
NB = 1 * 10 ^ 16
VBE = 0.65
tauB0 = 5 * 10 ^ -7
tauE0 = 1 * 10 ^ -7
Jr0 = 5 * 10 ^ -8
pE0 = 2.25 * 10 ^ 2
nB0 = 2.25 * 10 ^ 4
LE = 10 ^ -3
LB = 3.54 * 10 ^ -3
gamma = 1 / (1 + (pE0 * DE * LB * tanh(0.0198)) / (nB0 * DB * LE * tanh(0.050)))
gamma1 = round(gamma, digits = 4)
cat(gamma1, "\n")
alpha = 1 / cosh(xB / LB)
alphatau = round(alpha, digits = 4)
cat(alphatau, "\n")
Js0 = (e * DB * nB0) / (LB * tanh(xB / LB))
cat(signif(Js0, digits = 3), "A/cm^2\n")
z = 1 / (1 + (Jr0 / Js0) * exp(-VBE / (2 * 0.0259)))
delta = round(z, digits = 5)
cat(delta, "\n")
q = gamma1 * alphatau * delta
z = round(q, digits = 5)
cat(z, "\n")
beta1 = z / (1 - z)
cat(round(beta1, digits = 0), "\n") |
### Charles Ferté
### Sage Bionetworks
### Seattle, WA
### January, 6th 2012
### script for running modelling prediction
#load the packages
library(affy)
library(survival)
library(Biobase)
library(MASS)
library(glmnet)
library(corpcor)
library(ROCR)
library(synapseClient)
library(survival)
library(risksetROC)
library(caret)
library(survcomp)
# point the directory (choose method among = RMA, GCRMA, MAS5, dCHIP, metaGEO, fRMA or barcode)
method= "barcode"
PATH <- "/home/cferte/FELLOW/cferte/NSCLC_MA/MATRIX_RESP_OBJECTS/"
setwd(paste(PATH,method,sep=""))
## load the matrix and response files
load("MATRIX_TS.Rdata")
load("MATRIX_VS.Rdata")
load("MATRIX_VS2.Rdata")
load("y_TS.Rdata")
load("y_VS.Rdata")
load("y_VS2.Rdata")
load("y_OS_TS.Rdata")
load("y_OS_VS.Rdata")
load("y_OS_VS2.Rdata")
##############################################################################################
### rescale the VS according to the TS and call the new p . n matrix YSCALED
##############################################################################################
normalize_to_X <- function(mean.x, sd.x, Y){
m.y <- rowMeans(Y)
sd.y <- apply(Y, 1, sd)
Y.adj <- (Y - m.y) * sd.x / sd.y + mean.x
Y.adj
}
X <- MATRIX_TS # this my p x n training set
mean_x <- apply(X,1,mean)
sd_x <- apply(X,1,sd)
MATRIX_TS_S <- MATRIX_TS
MATRIX_VS_S <- normalize_to_X(mean_x,sd_x,MATRIX_VS)
MATRIX_VS2_S <- normalize_to_X(mean_x,sd_x,MATRIX_VS2)
############################################################################################################################
############################################################################################################################
######### start ElasticNet -- COX proportional model
############################################################################################################################
############################################################################################################################
x <- t(MATRIX_TS)
y <- Surv(y_OS_TS[,1],y_OS_TS[,2])
CI_TS <- c()
CI_VS <- c()
CI_VS2 <- c()
CI_TS_S <- c()
CI_VS_S <- c()
CI_VS2_S <- c()
alphas <- exp(-1*seq(0,10,1))
lambdas <- exp(seq(-4,3,1))
GRID <- expand.grid(.family="cox",.alpha=alphas,.lambda=lambdas)
for(i in 1:dim(GRID)[1]){
fit <- try(glmnet(x,y,family="cox",alpha=GRID$.alpha[i],lambda=GRID$.lambda[i]))
if( class(fit) == "try-error" ){
CI_TS <- c(CI_TS, NA)
CI_VS <- c(CI_VS, NA)
CI_VS2 <- c(CI_VS2, NA)
CI_TS_S <- c(CI_TS_S, NA)
CI_VS_S <- c(CI_VS_S, NA)
CI_VS2_S <- c(CI_VS2_S, NA)
} else{
y_E_TS <- predict(fit,x,type="link")
CI_TS <- c(CI_TS,concordance.index(y_E_TS,y_OS_TS[,1],y_OS_TS[,2],na.rm=T,method="noether")[1])
y_E_VS <- predict(fit,t(MATRIX_VS),type="link")
CI_VS <- c(CI_VS,concordance.index(y_E_VS,y_OS_VS[,1],y_OS_VS[,2],na.rm=T,method="noether")[1])
y_E_VS2 <- predict(fit,t(MATRIX_VS2),type="link")
CI_VS2 <- c(CI_VS2,concordance.index(y_E_VS2,y_OS_VS2[,1],y_OS_VS2[,2],na.rm=T,method="noether")[1])
y_E_TS_S <- predict(fit,x,type="link")
CI_TS_S <- c(CI_TS_S,concordance.index(y_E_TS_S,y_OS_TS[,1],y_OS_TS[,2],na.rm=T,method="noether")[1])
y_E_VS_S <- predict(fit,t(MATRIX_VS_S),type="link")
CI_VS_S <- c(CI_VS_S,concordance.index(y_E_VS_S,y_OS_VS[,1],y_OS_VS[,2],na.rm=T,method="noether")[1])
y_E_VS2_S <- predict(fit,t(MATRIX_VS2_S),type="link")
CI_VS2_S <- c(CI_VS2_S,concordance.index(y_E_VS2_S,y_OS_VS2[,1],y_OS_VS2[,2],na.rm=T,method="noether")[1])
}
}
summary(as.numeric(CI_TS[names(CI_TS)=="c.index"][is.na(CI_TS[names(CI_TS)=="c.index"])==F]))
summary(as.numeric(CI_VS[names(CI_VS)=="c.index"][is.na(CI_VS[names(CI_VS)=="c.index"])==F]))
summary(as.numeric(CI_VS2[names(CI_VS2)=="c.index"][is.na(CI_VS2[names(CI_VS2)=="c.index"])==F]))
summary(as.numeric(CI_TS_S[names(CI_TS_S)=="c.index"][is.na(CI_TS_S[names(CI_TS_S)=="c.index"])==F]))
summary(as.numeric(CI_VS_S[names(CI_VS_S)=="c.index"][is.na(CI_VS_S[names(CI_VS_S)=="c.index"])==F]))
summary(as.numeric(CI_VS2_S[names(CI_VS2_S)=="c.index"][is.na(CI_VS2_S[names(CI_VS2_S)=="c.index"])==F]))
GRID[which(CI_TS==max(as.numeric(CI_TS[names(CI_TS)=="c.index"][is.na(CI_TS[names(CI_TS)=="c.index"])==F]))),]
GRID[which(CI_VS==max(as.numeric(CI_VS[names(CI_VS)=="c.index"][is.na(CI_VS[names(CI_VS)=="c.index"])==F]))),]
GRID[which(CI_VS2==max(as.numeric(CI_VS2[names(CI_VS2)=="c.index"][is.na(CI_VS2[names(CI_VS2)=="c.index"])==F]))),]
CI_TOTAL <- cbind(GRID$.alpha,GRID$.lambda,CI_TS,CI_VS,CI_VS2)
rownames(CI_TOTAL)<- c(1:88)
colnames(CI_TOTAL)[1]<-"alpha"
colnames(CI_TOTAL)[2]<-"lambda"
CI_TOTAL<-as.data.frame(CI_TOTAL)
CI_TOTAL$GAL <- ifelse(is.na(CI_TOTAL$CI_TS),"blue","red")
CI_TOTAL$method <- method
setwd("~/FELLOW/cferte/NSCLC_MA/ANALYSIS/results_elasticnet_grid/")
png(paste("GRID_",method,".png",sep=""))
plot(log(as.numeric(CI_TOTAL$alpha)),log(as.numeric(CI_TOTAL$lambda)), col=CI_TOTAL$GAL, main=paste("alpha lambda GRID for",method),xlab="log(alpha)",ylab="log(lambda)",pch=20)
dev.off()
tmp <- paste("CI_",method,sep="")
assign(tmp,CI_TOTAL)
save(list=paste("CI_",method,sep=""), file=paste("CI_",method,".Rdata",sep="")
| /ANALYSIS/Elasticnet_Cox.R | no_license | chferte/NSCLC_Sig | R | false | false | 5,153 | r | ### Charles Ferté
### Sage Bionetworks
### Seattle, WA
### January, 6th 2012
### script for running modelling prediction
#load the packages
library(affy)
library(survival)
library(Biobase)
library(MASS)
library(glmnet)
library(corpcor)
library(ROCR)
library(synapseClient)
library(survival)
library(risksetROC)
library(caret)
library(survcomp)
# point the directory (choose method among = RMA, GCRMA, MAS5, dCHIP, metaGEO, fRMA or barcode)
method= "barcode"
PATH <- "/home/cferte/FELLOW/cferte/NSCLC_MA/MATRIX_RESP_OBJECTS/"
setwd(paste(PATH,method,sep=""))
## load the matrix and response files
load("MATRIX_TS.Rdata")
load("MATRIX_VS.Rdata")
load("MATRIX_VS2.Rdata")
load("y_TS.Rdata")
load("y_VS.Rdata")
load("y_VS2.Rdata")
load("y_OS_TS.Rdata")
load("y_OS_VS.Rdata")
load("y_OS_VS2.Rdata")
##############################################################################################
### rescale the VS according to the TS and call the new p . n matrix YSCALED
##############################################################################################
normalize_to_X <- function(mean.x, sd.x, Y){
m.y <- rowMeans(Y)
sd.y <- apply(Y, 1, sd)
Y.adj <- (Y - m.y) * sd.x / sd.y + mean.x
Y.adj
}
X <- MATRIX_TS # this my p x n training set
mean_x <- apply(X,1,mean)
sd_x <- apply(X,1,sd)
MATRIX_TS_S <- MATRIX_TS
MATRIX_VS_S <- normalize_to_X(mean_x,sd_x,MATRIX_VS)
MATRIX_VS2_S <- normalize_to_X(mean_x,sd_x,MATRIX_VS2)
############################################################################################################################
############################################################################################################################
######### start ElasticNet -- COX proportional model
############################################################################################################################
############################################################################################################################
x <- t(MATRIX_TS)
y <- Surv(y_OS_TS[,1],y_OS_TS[,2])
CI_TS <- c()
CI_VS <- c()
CI_VS2 <- c()
CI_TS_S <- c()
CI_VS_S <- c()
CI_VS2_S <- c()
alphas <- exp(-1*seq(0,10,1))
lambdas <- exp(seq(-4,3,1))
GRID <- expand.grid(.family="cox",.alpha=alphas,.lambda=lambdas)
for(i in 1:dim(GRID)[1]){
fit <- try(glmnet(x,y,family="cox",alpha=GRID$.alpha[i],lambda=GRID$.lambda[i]))
if( class(fit) == "try-error" ){
CI_TS <- c(CI_TS, NA)
CI_VS <- c(CI_VS, NA)
CI_VS2 <- c(CI_VS2, NA)
CI_TS_S <- c(CI_TS_S, NA)
CI_VS_S <- c(CI_VS_S, NA)
CI_VS2_S <- c(CI_VS2_S, NA)
} else{
y_E_TS <- predict(fit,x,type="link")
CI_TS <- c(CI_TS,concordance.index(y_E_TS,y_OS_TS[,1],y_OS_TS[,2],na.rm=T,method="noether")[1])
y_E_VS <- predict(fit,t(MATRIX_VS),type="link")
CI_VS <- c(CI_VS,concordance.index(y_E_VS,y_OS_VS[,1],y_OS_VS[,2],na.rm=T,method="noether")[1])
y_E_VS2 <- predict(fit,t(MATRIX_VS2),type="link")
CI_VS2 <- c(CI_VS2,concordance.index(y_E_VS2,y_OS_VS2[,1],y_OS_VS2[,2],na.rm=T,method="noether")[1])
y_E_TS_S <- predict(fit,x,type="link")
CI_TS_S <- c(CI_TS_S,concordance.index(y_E_TS_S,y_OS_TS[,1],y_OS_TS[,2],na.rm=T,method="noether")[1])
y_E_VS_S <- predict(fit,t(MATRIX_VS_S),type="link")
CI_VS_S <- c(CI_VS_S,concordance.index(y_E_VS_S,y_OS_VS[,1],y_OS_VS[,2],na.rm=T,method="noether")[1])
y_E_VS2_S <- predict(fit,t(MATRIX_VS2_S),type="link")
CI_VS2_S <- c(CI_VS2_S,concordance.index(y_E_VS2_S,y_OS_VS2[,1],y_OS_VS2[,2],na.rm=T,method="noether")[1])
}
}
summary(as.numeric(CI_TS[names(CI_TS)=="c.index"][is.na(CI_TS[names(CI_TS)=="c.index"])==F]))
summary(as.numeric(CI_VS[names(CI_VS)=="c.index"][is.na(CI_VS[names(CI_VS)=="c.index"])==F]))
summary(as.numeric(CI_VS2[names(CI_VS2)=="c.index"][is.na(CI_VS2[names(CI_VS2)=="c.index"])==F]))
summary(as.numeric(CI_TS_S[names(CI_TS_S)=="c.index"][is.na(CI_TS_S[names(CI_TS_S)=="c.index"])==F]))
summary(as.numeric(CI_VS_S[names(CI_VS_S)=="c.index"][is.na(CI_VS_S[names(CI_VS_S)=="c.index"])==F]))
summary(as.numeric(CI_VS2_S[names(CI_VS2_S)=="c.index"][is.na(CI_VS2_S[names(CI_VS2_S)=="c.index"])==F]))
GRID[which(CI_TS==max(as.numeric(CI_TS[names(CI_TS)=="c.index"][is.na(CI_TS[names(CI_TS)=="c.index"])==F]))),]
GRID[which(CI_VS==max(as.numeric(CI_VS[names(CI_VS)=="c.index"][is.na(CI_VS[names(CI_VS)=="c.index"])==F]))),]
GRID[which(CI_VS2==max(as.numeric(CI_VS2[names(CI_VS2)=="c.index"][is.na(CI_VS2[names(CI_VS2)=="c.index"])==F]))),]
CI_TOTAL <- cbind(GRID$.alpha,GRID$.lambda,CI_TS,CI_VS,CI_VS2)
rownames(CI_TOTAL)<- c(1:88)
colnames(CI_TOTAL)[1]<-"alpha"
colnames(CI_TOTAL)[2]<-"lambda"
CI_TOTAL<-as.data.frame(CI_TOTAL)
CI_TOTAL$GAL <- ifelse(is.na(CI_TOTAL$CI_TS),"blue","red")
CI_TOTAL$method <- method
setwd("~/FELLOW/cferte/NSCLC_MA/ANALYSIS/results_elasticnet_grid/")
png(paste("GRID_",method,".png",sep=""))
plot(log(as.numeric(CI_TOTAL$alpha)),log(as.numeric(CI_TOTAL$lambda)), col=CI_TOTAL$GAL, main=paste("alpha lambda GRID for",method),xlab="log(alpha)",ylab="log(lambda)",pch=20)
dev.off()
tmp <- paste("CI_",method,sep="")
assign(tmp,CI_TOTAL)
save(list=paste("CI_",method,sep=""), file=paste("CI_",method,".Rdata",sep="")
|
install.packages("DBI")
install.packages("RMySQL")
install.packages("dplyr")
install.packages("ggplot2")
library(dplyr)
library(DBI)
library(RMySQL)
library(ggplot2)
MyDataBase <- dbConnect(
drv = RMySQL::MySQL(),
dbname = "shinydemo",
host = "shiny-demo.csa7qlmguqrf.us-east-1.rds.amazonaws.com",
username = "guest",
password = "guest")
dbListTables(MyDataBase)
dbListFields(MyDataBase, 'CountryLanguage')
DataDB <- dbGetQuery(MyDataBase, "select * from CountryLanguage")
names(DataDB)
SP <- DataDB %>% filter(Language == "Spanish")
SP.df <- as.data.frame(SP)
SP.df %>% ggplot(aes( x = CountryCode, y=Percentage, fill = IsOfficial )) +
geom_bin2d() +
coord_flip()
| /queries.R | no_license | JorgeMge/Reto_Sesion_7 | R | false | false | 688 | r | install.packages("DBI")
install.packages("RMySQL")
install.packages("dplyr")
install.packages("ggplot2")
library(dplyr)
library(DBI)
library(RMySQL)
library(ggplot2)
MyDataBase <- dbConnect(
drv = RMySQL::MySQL(),
dbname = "shinydemo",
host = "shiny-demo.csa7qlmguqrf.us-east-1.rds.amazonaws.com",
username = "guest",
password = "guest")
dbListTables(MyDataBase)
dbListFields(MyDataBase, 'CountryLanguage')
DataDB <- dbGetQuery(MyDataBase, "select * from CountryLanguage")
names(DataDB)
SP <- DataDB %>% filter(Language == "Spanish")
SP.df <- as.data.frame(SP)
SP.df %>% ggplot(aes( x = CountryCode, y=Percentage, fill = IsOfficial )) +
geom_bin2d() +
coord_flip()
|
# Aula 5 - ML
m <- mtcars
# Tratamento das variáveis
m$cyl_F <- as.factor(m$cyl)
m <- cbind(m, dummy(m$cyl))
set.seed(33)
va <- sample(32)
treino <- m[va[1:24],]
teste <- m[va[25:32],]
#mod <- lm(mpg~wt, data=treino)
#mod <- lm(mpg~log(wt), data=treino) # transf log-lin
#mod <- lm(mpg~poly(wt,2), data=treino) # transf polinomial
# Simulação do overfitting = fenômeno do sobreajuste
#mod <- lm(mpg~poly(wt,3), data=treino) # transf polinomial
#mod <- lm(mpg~poly(wt,4), data=treino) # transf polinomial
#mod <- lm(mpg~poly(wt,9), data=treino) # transf polinomial
#mod <- lm(mpg~poly(wt,14), data=treino) # transf polinomial
#mod <- lm(mpg~wt+cyl, data=treino) # cyl numerico
mod <- lm(mpg~wt+cyl_F, data=treino) # cyl categorico=cyl_F
summary(mod)
p <- predict(mod, newdata=teste)
sse <- sum((p-teste$mpg)^2)
# Análise de log-lineariedade
cor(m$mpg, m$wt)
# aplicando o log
cor(m$mpg, log(m$wt))
# análise visual
plot(m$mpg~log(m$wt))
# Variável categórica
v1 <- c(1,3,5,5)
as.factor(v1)
# Criação de dummies atra´ves de um package
install.packages("dummies")
library(dummies)
dummy(as.factor(v1))
##################################33
# Classificadores
# Teoria de probabilidades
tit <- read.csv("https://raw.githubusercontent.com/diogenesjusto/FIAP/master/dados/train_titanic.csv")
# Contar # sobreviventes
nrow(tit[tit$Survived==1,])/nrow(tit)
# Análise probabilidades
table(tit$Survived)
prop.table(table(tit$Survived))
# Usando sql no R
install.packages("sqldf")
library(sqldf)
nt <- sqldf("select Survived,count(*) from tit group by 1")
# Probabilidades de eventos combinado
prop.table(table(tit[,c("Sex", "Survived")]))
# Árvore de probabilidades condicionais
install.packages("party")
library(party)
mod <- ctree(Survived~as.factor(Sex)+Pclass+Age, data=tit)
plot(mod, type="simple")
| /SHIFT/202109/t31_aula5.R | no_license | Uemura84/FIAP | R | false | false | 1,834 | r | # Aula 5 - ML
m <- mtcars
# Tratamento das variáveis
m$cyl_F <- as.factor(m$cyl)
m <- cbind(m, dummy(m$cyl))
set.seed(33)
va <- sample(32)
treino <- m[va[1:24],]
teste <- m[va[25:32],]
#mod <- lm(mpg~wt, data=treino)
#mod <- lm(mpg~log(wt), data=treino) # transf log-lin
#mod <- lm(mpg~poly(wt,2), data=treino) # transf polinomial
# Simulação do overfitting = fenômeno do sobreajuste
#mod <- lm(mpg~poly(wt,3), data=treino) # transf polinomial
#mod <- lm(mpg~poly(wt,4), data=treino) # transf polinomial
#mod <- lm(mpg~poly(wt,9), data=treino) # transf polinomial
#mod <- lm(mpg~poly(wt,14), data=treino) # transf polinomial
#mod <- lm(mpg~wt+cyl, data=treino) # cyl numerico
mod <- lm(mpg~wt+cyl_F, data=treino) # cyl categorico=cyl_F
summary(mod)
p <- predict(mod, newdata=teste)
sse <- sum((p-teste$mpg)^2)
# Análise de log-lineariedade
cor(m$mpg, m$wt)
# aplicando o log
cor(m$mpg, log(m$wt))
# análise visual
plot(m$mpg~log(m$wt))
# Variável categórica
v1 <- c(1,3,5,5)
as.factor(v1)
# Criação de dummies atra´ves de um package
install.packages("dummies")
library(dummies)
dummy(as.factor(v1))
##################################33
# Classificadores
# Teoria de probabilidades
tit <- read.csv("https://raw.githubusercontent.com/diogenesjusto/FIAP/master/dados/train_titanic.csv")
# Contar # sobreviventes
nrow(tit[tit$Survived==1,])/nrow(tit)
# Análise probabilidades
table(tit$Survived)
prop.table(table(tit$Survived))
# Usando sql no R
install.packages("sqldf")
library(sqldf)
nt <- sqldf("select Survived,count(*) from tit group by 1")
# Probabilidades de eventos combinado
prop.table(table(tit[,c("Sex", "Survived")]))
# Árvore de probabilidades condicionais
install.packages("party")
library(party)
mod <- ctree(Survived~as.factor(Sex)+Pclass+Age, data=tit)
plot(mod, type="simple")
|
# For loop sync to counts
## need next line to call arguments:
args <- commandArgs(trailingOnly = TRUE)
## Convert a .sync file into long format, filter somewhat, and have only position, treatment, Cage, Generation and Maj/Min counts
## Packages source code: only need these two for this script (need to be this order)
require('tidyr')
require('dplyr')
#1) Need to change details as needed above and below string of #####
#2) Needs a .sync file made by popoolation2
#3) Need to change most importantly for analysis the read in and read out names
# Read in Data: Big Data Sets
#pwd a direcotry containing only the directories of interest (made with other sed -n script)
mydirs <- list.dirs(path = args[1], recursive = FALSE)
#includes that actual dir.. not with recursive = FALSE
for (dir in mydirs){
setwd(dir)
mysyncs <- list.files(pattern=".sync")
for (sync in mysyncs){
episodic_counts <- read.table(sync)
#adjust colnames
print("data read in")
name.Columns <- c("Chromosome", "Position", "ref", "ConR1_115", "ConR2_115", "SelR2_115", "SelR1_115", "ConR1_38", "ConR2_38", "SelR1_38", "SelR2_38", "ConR1_77", "ConR2_77", "SelR1_77", "SelR2_77", "SelR1_0")
colnames(episodic_counts) <- name.Columns
#Add "replicates" of ancestor -- all are equal
episodic_counts$SelR2_0 <- episodic_counts$SelR1_0
episodic_counts$ConR1_0 <- episodic_counts$SelR1_0
episodic_counts$ConR2_0 <- episodic_counts$SelR1_0
#Need the ancestor to stay (after making long) to call major/minor alleles later
episodic_counts$Ancestor <- episodic_counts$SelR1_0
# Make long by bring populations down
print("making long")
long_episodic <- gather(episodic_counts, Population, Allele_Freq , ConR1_115:ConR2_0, factor_key=TRUE)
rm(episodic_counts)
print("removed counts")
#Error???
# All geneneric below for sync files (only real issue through file is population naming convention)
###################################################
#Seperate the allele counts into independent columns for each base
print("splitting allele freq")
Episodic_split_2 <- long_episodic %>%
separate(Allele_Freq, c("A","T","C","G","N","del"), ":")
rm(long_episodic)
print("removed long")
#Seperate the ancestor to base later things on
Episodic_split_2 <- Episodic_split_2 %>%
separate(Ancestor, c("A_0","T_0","C_0","G_0","N_0","del_0"), ":")
# as.numeric to multiple columns:
cols.num <- c("A_0", "T_0", "C_0", "G_0", "N_0", "del_0", "A", "T", "C", "G", "N", "del")
#Seems to take a long time for this step?
Episodic_split_2[cols.num] <- sapply(Episodic_split_2[cols.num],as.numeric)
#Get the sum of all the rows (all the different bases) for each population position:
print("getting row sums")
Episodic_split_2$sum <- (rowSums(Episodic_split_2[,11:16]))
#Ancestor Major_Allele and minor allele:
# Major allele of ancestor == the maximum positional count
Episodic_split_2$anc_max <- apply(Episodic_split_2[,4:9], 1, max)
# Minor is the ancestor second highest count
Episodic_split_2$anc_min <- apply(Episodic_split_2[,4:9], 1,
function(x)max(x[x!=max(x)]))
#Major / Minor Base name: match the number of anc_max with the column to call the correct base:
Episodic_split_2 <- within(Episodic_split_2, {
MajorAllele = ifelse(anc_max== Episodic_split_2[,4], "A", ifelse(anc_max== Episodic_split_2[,5], "T", ifelse(anc_max== Episodic_split_2[,6], "C",ifelse(anc_max== Episodic_split_2[,7], "G", ifelse(anc_max== Episodic_split_2[,8], "N", ifelse(anc_max== Episodic_split_2[,9], "del", "N/A" ))))))})
#Major Allele Count of evolved populations; match the Major allele with the count of certain columns for each population
Episodic_split_2 <- within(Episodic_split_2, {
Maj_count = ifelse (MajorAllele == "A", Episodic_split_2[,11], ifelse (MajorAllele == "T", Episodic_split_2[,12], ifelse (MajorAllele == "C", Episodic_split_2[,13], ifelse (MajorAllele == "G", Episodic_split_2[,14], ifelse (MajorAllele == "N", Episodic_split_2[,15], ifelse (MajorAllele == "del", Episodic_split_2[,16], "N/A"))))))})
# Same thing for minor allele: first ensure that if the sum of all counts == the Major coutn and the ancestor had no minor allele, their is no minor allele (N/A), then follow the same match of anc_min to a certain base
Episodic_split_2 <- within(Episodic_split_2, {
MinorAllele = ifelse(Maj_count==Episodic_split_2[,17] & anc_min==0, "N/A", ifelse(anc_min== Episodic_split_2[,4], "A", ifelse(anc_min== Episodic_split_2[,5], "T", ifelse(anc_min== Episodic_split_2[,6], "C",ifelse(anc_min== Episodic_split_2[,7], "G", ifelse(anc_min== Episodic_split_2[,8], "N", ifelse(anc_min== Episodic_split_2[,9], "del", "Z") ))))))})
#Minor Allele Count of the ancestreal minor allele count
Episodic_split_2 <- within(Episodic_split_2, {
Min_count = ifelse (MinorAllele == "A", Episodic_split_2[,11], ifelse (MinorAllele == "T", Episodic_split_2[,12], ifelse (MinorAllele == "C", Episodic_split_2[,13], ifelse (MinorAllele == "G", Episodic_split_2[,14], ifelse (MinorAllele == "N", Episodic_split_2[,15],ifelse (MinorAllele == "del", Episodic_split_2[,16],"N/A"))))))})
print("called major and minor alleles and counts")
# To determine the minor allele base if not specified by the ancestor (new allele brough up etc.)
#max for the population (could be the minor allele)
Episodic_split_2$maj_all <- apply(Episodic_split_2[,11:16], 1, max)
#alt== second highest count for populations
Episodic_split_2$alt_allele <- apply(Episodic_split_2[,11:16], 1,
function(x)max(x[x!=max(x)]))
print("define unknown alleles")
Episodic_split_2 <- within(Episodic_split_2, {
Min_count_2 = ifelse (Maj_count == sum, 0, ifelse(Maj_count==maj_all, alt_allele, maj_all))})
Episodic_split_2 <- within(Episodic_split_2, {
MinorAllele_base = ifelse(Min_count_2==0, "N/A", ifelse(Min_count_2== Episodic_split_2[,11], "A", ifelse(Min_count_2== Episodic_split_2[,12], "T", ifelse(Min_count_2== Episodic_split_2[,13], "C",ifelse(Min_count_2== Episodic_split_2[,14], "G", ifelse(Min_count_2== Episodic_split_2[,15], "N", ifelse(Min_count_2== Episodic_split_2[,16], "del", "Z") ))))))})
# Remove unneeded columns (6,7,8,9,10,11,13,14,15)
Episodic_split_2 <- subset(Episodic_split_2, select = -c(A_0,T_0,C_0,G_0,N_0,del_0,A,T,C,G,N,del,anc_max,anc_min, MinorAllele, Min_count, maj_all, alt_allele))
print("removed unneeded columns")
nam.col <- c("chr", "pos", "ref", "Population", "sum", "MajorAllele", "Major_count", "Minor_count", "MinorAllele")
colnames(Episodic_split_2) <- nam.col
#Remove unneccessary Columns (as needed)
#Keep them all for now (except sum) as may be needed later
#Episodic_split_2 <- subset( Episodic_split_2, select = -ref )
#Episodic_split_2 <- subset( Episodic_split_2, select = -chr)
#Episodic_split_2 <- subset( Episodic_split_2, select = -MajorAllele )
#Episodic_split_2 <- subset( Episodic_split_2, select = -MinorAllele)
Episodic_split_2<- subset( Episodic_split_2, select = -sum)
## Depends on the filter method:
print("begin filtering")
#Filter method: take the sum of each position, and must have at least 5 counts called (i.e over the 16 populations, the total of alleles called for the minor allele must be over 5)
grp <- Episodic_split_2 %>%
group_by(pos) %>%
summarise(sum=sum(Minor_count))
grp2 <- grp[which(grp$sum<=5),]
Episodic_split_2 <- Episodic_split_2[!(Episodic_split_2$pos %in% grp2$pos),]
#check that the number of obs for episodic_long2 == obs for those without 0's sum (*16 for number of "populations") (needed later as well == grp3)
#grp3 <- grp[-which(grp$sum<=5),]
rm(grp)
rm(grp2)
print("remove filter inermediates")
#################################################
#Should be all genetic above (from start specificed)
## Below depends on the population name layout etc. made above
#Split Population into Treatment, Rep, and Generation - need to do twice, different seperators (change above??)
print("seperate population to Treatment, Generation and Cage")
episodic_long <- Episodic_split_2 %>%
separate(Population, c("Treatment", "Generation"), "_")
rm(Episodic_split_2)
episodic_long <- episodic_long %>%
separate(Treatment, c("Treatment", "Cage"), "R")
cols.num <- c("Cage", "Generation", "Major_count", "Minor_count")
episodic_long[cols.num] <- sapply(episodic_long[cols.num],as.numeric)
print("Have final episodic long; now write a csv")
#will need to rename .csv files
write.csv(episodic_long, file=paste(sync, ".csv", sep=""))
print("wrote csv and now done this .sync file")
}
}
| /Analysis_after_BAM_Scripts/Sync_to_counts.R | no_license | PaulKnoops/Experimental_Evolution_Sequence_Repo | R | false | false | 9,180 | r | # For loop sync to counts
## need next line to call arguments:
args <- commandArgs(trailingOnly = TRUE)
## Convert a .sync file into long format, filter somewhat, and have only position, treatment, Cage, Generation and Maj/Min counts
## Packages source code: only need these two for this script (need to be this order)
require('tidyr')
require('dplyr')
#1) Need to change details as needed above and below string of #####
#2) Needs a .sync file made by popoolation2
#3) Need to change most importantly for analysis the read in and read out names
# Read in Data: Big Data Sets
#pwd a direcotry containing only the directories of interest (made with other sed -n script)
mydirs <- list.dirs(path = args[1], recursive = FALSE)
#includes that actual dir.. not with recursive = FALSE
for (dir in mydirs){
setwd(dir)
mysyncs <- list.files(pattern=".sync")
for (sync in mysyncs){
episodic_counts <- read.table(sync)
#adjust colnames
print("data read in")
name.Columns <- c("Chromosome", "Position", "ref", "ConR1_115", "ConR2_115", "SelR2_115", "SelR1_115", "ConR1_38", "ConR2_38", "SelR1_38", "SelR2_38", "ConR1_77", "ConR2_77", "SelR1_77", "SelR2_77", "SelR1_0")
colnames(episodic_counts) <- name.Columns
#Add "replicates" of ancestor -- all are equal
episodic_counts$SelR2_0 <- episodic_counts$SelR1_0
episodic_counts$ConR1_0 <- episodic_counts$SelR1_0
episodic_counts$ConR2_0 <- episodic_counts$SelR1_0
#Need the ancestor to stay (after making long) to call major/minor alleles later
episodic_counts$Ancestor <- episodic_counts$SelR1_0
# Make long by bring populations down
print("making long")
long_episodic <- gather(episodic_counts, Population, Allele_Freq , ConR1_115:ConR2_0, factor_key=TRUE)
rm(episodic_counts)
print("removed counts")
#Error???
# All geneneric below for sync files (only real issue through file is population naming convention)
###################################################
#Seperate the allele counts into independent columns for each base
print("splitting allele freq")
Episodic_split_2 <- long_episodic %>%
separate(Allele_Freq, c("A","T","C","G","N","del"), ":")
rm(long_episodic)
print("removed long")
#Seperate the ancestor to base later things on
Episodic_split_2 <- Episodic_split_2 %>%
separate(Ancestor, c("A_0","T_0","C_0","G_0","N_0","del_0"), ":")
# as.numeric to multiple columns:
cols.num <- c("A_0", "T_0", "C_0", "G_0", "N_0", "del_0", "A", "T", "C", "G", "N", "del")
#Seems to take a long time for this step?
Episodic_split_2[cols.num] <- sapply(Episodic_split_2[cols.num],as.numeric)
#Get the sum of all the rows (all the different bases) for each population position:
print("getting row sums")
Episodic_split_2$sum <- (rowSums(Episodic_split_2[,11:16]))
#Ancestor Major_Allele and minor allele:
# Major allele of ancestor == the maximum positional count
Episodic_split_2$anc_max <- apply(Episodic_split_2[,4:9], 1, max)
# Minor is the ancestor second highest count
Episodic_split_2$anc_min <- apply(Episodic_split_2[,4:9], 1,
function(x)max(x[x!=max(x)]))
#Major / Minor Base name: match the number of anc_max with the column to call the correct base:
Episodic_split_2 <- within(Episodic_split_2, {
MajorAllele = ifelse(anc_max== Episodic_split_2[,4], "A", ifelse(anc_max== Episodic_split_2[,5], "T", ifelse(anc_max== Episodic_split_2[,6], "C",ifelse(anc_max== Episodic_split_2[,7], "G", ifelse(anc_max== Episodic_split_2[,8], "N", ifelse(anc_max== Episodic_split_2[,9], "del", "N/A" ))))))})
#Major Allele Count of evolved populations; match the Major allele with the count of certain columns for each population
Episodic_split_2 <- within(Episodic_split_2, {
Maj_count = ifelse (MajorAllele == "A", Episodic_split_2[,11], ifelse (MajorAllele == "T", Episodic_split_2[,12], ifelse (MajorAllele == "C", Episodic_split_2[,13], ifelse (MajorAllele == "G", Episodic_split_2[,14], ifelse (MajorAllele == "N", Episodic_split_2[,15], ifelse (MajorAllele == "del", Episodic_split_2[,16], "N/A"))))))})
# Same thing for minor allele: first ensure that if the sum of all counts == the Major coutn and the ancestor had no minor allele, their is no minor allele (N/A), then follow the same match of anc_min to a certain base
Episodic_split_2 <- within(Episodic_split_2, {
MinorAllele = ifelse(Maj_count==Episodic_split_2[,17] & anc_min==0, "N/A", ifelse(anc_min== Episodic_split_2[,4], "A", ifelse(anc_min== Episodic_split_2[,5], "T", ifelse(anc_min== Episodic_split_2[,6], "C",ifelse(anc_min== Episodic_split_2[,7], "G", ifelse(anc_min== Episodic_split_2[,8], "N", ifelse(anc_min== Episodic_split_2[,9], "del", "Z") ))))))})
#Minor Allele Count of the ancestreal minor allele count
Episodic_split_2 <- within(Episodic_split_2, {
Min_count = ifelse (MinorAllele == "A", Episodic_split_2[,11], ifelse (MinorAllele == "T", Episodic_split_2[,12], ifelse (MinorAllele == "C", Episodic_split_2[,13], ifelse (MinorAllele == "G", Episodic_split_2[,14], ifelse (MinorAllele == "N", Episodic_split_2[,15],ifelse (MinorAllele == "del", Episodic_split_2[,16],"N/A"))))))})
print("called major and minor alleles and counts")
# To determine the minor allele base if not specified by the ancestor (new allele brough up etc.)
#max for the population (could be the minor allele)
Episodic_split_2$maj_all <- apply(Episodic_split_2[,11:16], 1, max)
#alt== second highest count for populations
Episodic_split_2$alt_allele <- apply(Episodic_split_2[,11:16], 1,
function(x)max(x[x!=max(x)]))
print("define unknown alleles")
Episodic_split_2 <- within(Episodic_split_2, {
Min_count_2 = ifelse (Maj_count == sum, 0, ifelse(Maj_count==maj_all, alt_allele, maj_all))})
Episodic_split_2 <- within(Episodic_split_2, {
MinorAllele_base = ifelse(Min_count_2==0, "N/A", ifelse(Min_count_2== Episodic_split_2[,11], "A", ifelse(Min_count_2== Episodic_split_2[,12], "T", ifelse(Min_count_2== Episodic_split_2[,13], "C",ifelse(Min_count_2== Episodic_split_2[,14], "G", ifelse(Min_count_2== Episodic_split_2[,15], "N", ifelse(Min_count_2== Episodic_split_2[,16], "del", "Z") ))))))})
# Remove unneeded columns (6,7,8,9,10,11,13,14,15)
Episodic_split_2 <- subset(Episodic_split_2, select = -c(A_0,T_0,C_0,G_0,N_0,del_0,A,T,C,G,N,del,anc_max,anc_min, MinorAllele, Min_count, maj_all, alt_allele))
print("removed unneeded columns")
nam.col <- c("chr", "pos", "ref", "Population", "sum", "MajorAllele", "Major_count", "Minor_count", "MinorAllele")
colnames(Episodic_split_2) <- nam.col
#Remove unneccessary Columns (as needed)
#Keep them all for now (except sum) as may be needed later
#Episodic_split_2 <- subset( Episodic_split_2, select = -ref )
#Episodic_split_2 <- subset( Episodic_split_2, select = -chr)
#Episodic_split_2 <- subset( Episodic_split_2, select = -MajorAllele )
#Episodic_split_2 <- subset( Episodic_split_2, select = -MinorAllele)
Episodic_split_2<- subset( Episodic_split_2, select = -sum)
## Depends on the filter method:
print("begin filtering")
#Filter method: take the sum of each position, and must have at least 5 counts called (i.e over the 16 populations, the total of alleles called for the minor allele must be over 5)
grp <- Episodic_split_2 %>%
group_by(pos) %>%
summarise(sum=sum(Minor_count))
grp2 <- grp[which(grp$sum<=5),]
Episodic_split_2 <- Episodic_split_2[!(Episodic_split_2$pos %in% grp2$pos),]
#check that the number of obs for episodic_long2 == obs for those without 0's sum (*16 for number of "populations") (needed later as well == grp3)
#grp3 <- grp[-which(grp$sum<=5),]
rm(grp)
rm(grp2)
print("remove filter inermediates")
#################################################
#Should be all genetic above (from start specificed)
## Below depends on the population name layout etc. made above
#Split Population into Treatment, Rep, and Generation - need to do twice, different seperators (change above??)
print("seperate population to Treatment, Generation and Cage")
episodic_long <- Episodic_split_2 %>%
separate(Population, c("Treatment", "Generation"), "_")
rm(Episodic_split_2)
episodic_long <- episodic_long %>%
separate(Treatment, c("Treatment", "Cage"), "R")
cols.num <- c("Cage", "Generation", "Major_count", "Minor_count")
episodic_long[cols.num] <- sapply(episodic_long[cols.num],as.numeric)
print("Have final episodic long; now write a csv")
#will need to rename .csv files
write.csv(episodic_long, file=paste(sync, ".csv", sep=""))
print("wrote csv and now done this .sync file")
}
}
|
#' Script for data pretreatment
#'
#' This script call a series of data pretreatment functions for TL dating.
#' It only requires the name of the files with the TL curves and the relative error on the measurement.
#'
#' @param file.name
#' \link{character} (\bold{required}): Name of the file containing the luminescence data.
#' @param relative.error
#' \link{numeric} (with default): Relative error of the TL signals.
#' @param remove.discs
#' \link{numeric} (with default): list containing the position of the aliquots to remove.
#' @param file.parameters
#' \link{list} (with default): list containing the file parameters. See details.
#' @param aligning.parameters
#' \link{list} (with default): list containing the aligning parameters. See details.
#' @param plotting.parameters
#' \link{list} (with default): list containing the plotting parameters. See details.
#'
#' @details
#' \bold{Aligning parameters} \cr
#' The aligning parameters are: \cr
#' \describe{
#' \item{\code{peak.Tmin}}{
#' \link{numeric}: Lower boundary for looking at the peak maximum position.}
#' \item{\code{peak.Tmax}}{
#' \link{numeric}: Upper boundary for looking at the peak maximum position.}
#' \item{\code{no.testdose}}{
#' \link{logical}: If \code{TRUE}, the function will use the Lx curves rather the Tx curves as reference for the peak maximum position.}
#' }
#'
#' \bold{Plotting parameters} \cr
#' The plotting parameters are: \cr
#' \describe{
#' \item{\code{plot.Tmin}}{
#' \link{numeric}: Lower temperature plotted.}
#' \item{\code{plot.Tmax}}{
#' \link{numeric}: Higher temperature plotted.}
#' \item{\code{no.plot}}{
#' \link{logical}: If \code{TRUE}, the results will not be plotted.}
#' }
#' See also \link{plot_TL.MAAD}. \cr
#'
#' \bold{File parameters} \cr
#' The plotting parameters are: \cr
#' \describe{
#' \item{\code{file.extension}}{
#' \link{character} (with default): extension of the file containing the luminescence data (.bin or .binx)}
#' \item{\code{folder.in}}{
#' \link{character} (with default): Folder containing the file with the luminescene data.}
#' \item{\code{folder.out}}{
#' \link{character} (with default): Folder containing the file with the new luminescene data.}
#' }
#' see also \link{mod_update.dType}.
#'
#' @return
#' This function return a \code{\linkS4class{TLum.Analysis}} where the preheat were removed, the background substract and the peaks aligned.
#' Its save the result as a .binx file il the specified folder.
#' And, its plots the results from the differents functions called using:
#' \link{plot_extract.TL},
#' \link{plot_remove.preheat},
#' \link{plot_substract.background} and
#' \link{plot_align.peaks}. \cr
#'
#'
#' @seealso
#' \link{read_BIN2R},
#' \link{Risoe.BINfileData2TLum.BIN.File},
#' \link{mod_extract.TL},
#' \link{mod_update.dType},
#' \link{mod_remove.aliquot},
#' \link{mod_remove.preheat},
#' \link{mod_substract.background},
#' \link{mod_align.peaks},
#' \link{write_R2BIN}.
#'
#' @author David Strebler, University of Cologne (Germany), \cr David Strebler
#'
#' @export script_TL.pretreatment
script_TL.pretreatment <- function(
file.name,
relative.error= 0.05,
remove.discs=NULL,
file.parameters=list(file.extension =".binx",
folder.in = "./",
folder.out = "./"),
aligning.parameters=list(peak.Tmin=NULL,
peak.Tmax=NULL,
no.testdose=FALSE),
plotting.parameters=list(plot.Tmin=0,
plot.Tmax=NA,
no.plot=FALSE)
){
# ------------------------------------------------------------------------------
# Integrity Check
# ------------------------------------------------------------------------------
if(missing(file.name)){
stop("[script_TL.pretreatment] Error: Input 'file.name' is missing.")
}else if(!is.character(file.name)){
stop("[script_TL.pretreatment] Error: Input 'file.name' is not of type 'character'.")
}
if(!is.numeric(relative.error)){
stop("[script_TL.pretreatment] Error: Input 'relative.error' is not of type 'numeric'.")
}
if(!is.list(file.parameters)){
stop("[script_TL.pretreatment] Error: Input 'plotting.parameters' is not of type 'list'.")
}
if(!is.list(aligning.parameters)){
stop("[script_TL.pretreatment] Error: Input 'aligning.parameters' is not of type 'list'.")
}
if(!is.list(plotting.parameters)){
stop("[script_TL.pretreatment] Error: Input 'plotting.parameters' is not of type 'list'.")
}
# ------------------------------------------------------------------------------
folder.out <- file.parameters$folder.out
file.extension <- file.parameters$file.extension
# ------------------------------------------------------------------------------
# Check Value
if(!is.character(folder.out)){
warning("[script_TL.pretreatment] Error: Input 'folder.out' is not of type 'character'.")
folder.out = "./"
}
if(!is.character(file.extension)){
stop("[script_TL.pretreatment] Error: Input 'file.extension' is not of type 'character'.")
}else if(file.extension != ".bin" && file.extension != ".binx"){
stop("[script_TL.pretreatment] Error: Input 'file.extension' is not of '.bin' or '.binx'.")
file.extension <- ".binx"
}
# ------------------------------------------------------------------------------
# TL curve recovery
data <- script_TL.import(file.name = file.name,
relative.error = relative.error,
file.parameters = file.parameters,
plotting.parameters = plotting.parameters)
#Problematic aliquots removal
if(!is.null(remove.discs)){
data <- mod_remove.aliquot(object = data,
list = remove.discs)
print(paste("Aliquot", remove.discs, "removed"))
}
# Preheat removal
data <- mod_remove.preheat(object = data,
plotting.parameters = plotting.parameters)
print("Preheat removed")
# Background substraction
data <- mod_substract.background(object = data)
print("Background substracted")
# Peaks alignement
data <- mod_align.peaks(object=data,
aligning.parameters=aligning.parameters,
plotting.parameters=plotting.parameters)
print("Peaks Shifted")
#Saving of preliminary results
file.out <- paste("new_",file.name,sep="")
script_TL.export(object = data,
file.name = file.out,
file.parameters = file.parameters)
# path.out <- paste(folder.out,"new_",file.name,file.extension,sep="")
#
# data.out <- TLum.Analysis2TLum.BIN.File(data)
# data.out <- TLum.BIN.File2Risoe.BINfileData(data.out)
#
# write_R2BIN(object = data.out,
# file = path.out)
print("File saved")
return(data)
}
| /TLdating/R/script_TL.pretreatment.R | no_license | ingted/R-Examples | R | false | false | 7,107 | r | #' Script for data pretreatment
#'
#' This script call a series of data pretreatment functions for TL dating.
#' It only requires the name of the files with the TL curves and the relative error on the measurement.
#'
#' @param file.name
#' \link{character} (\bold{required}): Name of the file containing the luminescence data.
#' @param relative.error
#' \link{numeric} (with default): Relative error of the TL signals.
#' @param remove.discs
#' \link{numeric} (with default): list containing the position of the aliquots to remove.
#' @param file.parameters
#' \link{list} (with default): list containing the file parameters. See details.
#' @param aligning.parameters
#' \link{list} (with default): list containing the aligning parameters. See details.
#' @param plotting.parameters
#' \link{list} (with default): list containing the plotting parameters. See details.
#'
#' @details
#' \bold{Aligning parameters} \cr
#' The aligning parameters are: \cr
#' \describe{
#' \item{\code{peak.Tmin}}{
#' \link{numeric}: Lower boundary for looking at the peak maximum position.}
#' \item{\code{peak.Tmax}}{
#' \link{numeric}: Upper boundary for looking at the peak maximum position.}
#' \item{\code{no.testdose}}{
#' \link{logical}: If \code{TRUE}, the function will use the Lx curves rather the Tx curves as reference for the peak maximum position.}
#' }
#'
#' \bold{Plotting parameters} \cr
#' The plotting parameters are: \cr
#' \describe{
#' \item{\code{plot.Tmin}}{
#' \link{numeric}: Lower temperature plotted.}
#' \item{\code{plot.Tmax}}{
#' \link{numeric}: Higher temperature plotted.}
#' \item{\code{no.plot}}{
#' \link{logical}: If \code{TRUE}, the results will not be plotted.}
#' }
#' See also \link{plot_TL.MAAD}. \cr
#'
#' \bold{File parameters} \cr
#' The plotting parameters are: \cr
#' \describe{
#' \item{\code{file.extension}}{
#' \link{character} (with default): extension of the file containing the luminescence data (.bin or .binx)}
#' \item{\code{folder.in}}{
#' \link{character} (with default): Folder containing the file with the luminescene data.}
#' \item{\code{folder.out}}{
#' \link{character} (with default): Folder containing the file with the new luminescene data.}
#' }
#' see also \link{mod_update.dType}.
#'
#' @return
#' This function return a \code{\linkS4class{TLum.Analysis}} where the preheat were removed, the background substract and the peaks aligned.
#' Its save the result as a .binx file il the specified folder.
#' And, its plots the results from the differents functions called using:
#' \link{plot_extract.TL},
#' \link{plot_remove.preheat},
#' \link{plot_substract.background} and
#' \link{plot_align.peaks}. \cr
#'
#'
#' @seealso
#' \link{read_BIN2R},
#' \link{Risoe.BINfileData2TLum.BIN.File},
#' \link{mod_extract.TL},
#' \link{mod_update.dType},
#' \link{mod_remove.aliquot},
#' \link{mod_remove.preheat},
#' \link{mod_substract.background},
#' \link{mod_align.peaks},
#' \link{write_R2BIN}.
#'
#' @author David Strebler, University of Cologne (Germany), \cr David Strebler
#'
#' @export script_TL.pretreatment
script_TL.pretreatment <- function(
file.name,
relative.error= 0.05,
remove.discs=NULL,
file.parameters=list(file.extension =".binx",
folder.in = "./",
folder.out = "./"),
aligning.parameters=list(peak.Tmin=NULL,
peak.Tmax=NULL,
no.testdose=FALSE),
plotting.parameters=list(plot.Tmin=0,
plot.Tmax=NA,
no.plot=FALSE)
){
# ------------------------------------------------------------------------------
# Integrity Check
# ------------------------------------------------------------------------------
if(missing(file.name)){
stop("[script_TL.pretreatment] Error: Input 'file.name' is missing.")
}else if(!is.character(file.name)){
stop("[script_TL.pretreatment] Error: Input 'file.name' is not of type 'character'.")
}
if(!is.numeric(relative.error)){
stop("[script_TL.pretreatment] Error: Input 'relative.error' is not of type 'numeric'.")
}
if(!is.list(file.parameters)){
stop("[script_TL.pretreatment] Error: Input 'plotting.parameters' is not of type 'list'.")
}
if(!is.list(aligning.parameters)){
stop("[script_TL.pretreatment] Error: Input 'aligning.parameters' is not of type 'list'.")
}
if(!is.list(plotting.parameters)){
stop("[script_TL.pretreatment] Error: Input 'plotting.parameters' is not of type 'list'.")
}
# ------------------------------------------------------------------------------
folder.out <- file.parameters$folder.out
file.extension <- file.parameters$file.extension
# ------------------------------------------------------------------------------
# Check Value
if(!is.character(folder.out)){
warning("[script_TL.pretreatment] Error: Input 'folder.out' is not of type 'character'.")
folder.out = "./"
}
if(!is.character(file.extension)){
stop("[script_TL.pretreatment] Error: Input 'file.extension' is not of type 'character'.")
}else if(file.extension != ".bin" && file.extension != ".binx"){
stop("[script_TL.pretreatment] Error: Input 'file.extension' is not of '.bin' or '.binx'.")
file.extension <- ".binx"
}
# ------------------------------------------------------------------------------
# TL curve recovery
data <- script_TL.import(file.name = file.name,
relative.error = relative.error,
file.parameters = file.parameters,
plotting.parameters = plotting.parameters)
#Problematic aliquots removal
if(!is.null(remove.discs)){
data <- mod_remove.aliquot(object = data,
list = remove.discs)
print(paste("Aliquot", remove.discs, "removed"))
}
# Preheat removal
data <- mod_remove.preheat(object = data,
plotting.parameters = plotting.parameters)
print("Preheat removed")
# Background substraction
data <- mod_substract.background(object = data)
print("Background substracted")
# Peaks alignement
data <- mod_align.peaks(object=data,
aligning.parameters=aligning.parameters,
plotting.parameters=plotting.parameters)
print("Peaks Shifted")
#Saving of preliminary results
file.out <- paste("new_",file.name,sep="")
script_TL.export(object = data,
file.name = file.out,
file.parameters = file.parameters)
# path.out <- paste(folder.out,"new_",file.name,file.extension,sep="")
#
# data.out <- TLum.Analysis2TLum.BIN.File(data)
# data.out <- TLum.BIN.File2Risoe.BINfileData(data.out)
#
# write_R2BIN(object = data.out,
# file = path.out)
print("File saved")
return(data)
}
|
#' Create a mapping between factor/character vector and corresponding numeric values
#'
#' @param x a \code{factor} or \code{character} vector
#' @return returns a two dimensional \code{data.table}, where the first column is the numeric value and the second column the corresponding label
map_labels = function(x) {
if (!requireNamespace("gdata", quietly = TRUE))
stop("requires gdata package to be installed")
lmap = gdata::mapLevels(x)
lab_mapping = data.table(
id = as.integer(lmap),
label = names(lmap)
)
return(lab_mapping)
} | /2021_FB/R/map_labels.R | permissive | baruuum/Replication_Code | R | false | false | 603 | r | #' Create a mapping between factor/character vector and corresponding numeric values
#'
#' @param x a \code{factor} or \code{character} vector
#' @return returns a two dimensional \code{data.table}, where the first column is the numeric value and the second column the corresponding label
map_labels = function(x) {
if (!requireNamespace("gdata", quietly = TRUE))
stop("requires gdata package to be installed")
lmap = gdata::mapLevels(x)
lab_mapping = data.table(
id = as.integer(lmap),
label = names(lmap)
)
return(lab_mapping)
} |
# Human Activity Recognition database built from the recordings of 30 subjects
# performing activities of daily living (ADL) while carrying a waist-mounted smartphone with embedded inertial sensors.
# This R script runanalysis.R performe the following --
# Merges the training and the test sets to create one data set.
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Uses descriptive activity names to name the activities in the data set
# Appropriately labels the data set with descriptive variable names.
# From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# This function is used to download the data
download.data = function() {
# download the data
fileURL <- "http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, destfile="data/UCI_HAR_data.zip")
unzip("data/UCI_HAR_data.zip", exdir="data")
}
# This function is used to merge the data sets
# Merges the training and the test sets to create one data set.
merge.datasets = function() {
training.x <- read.table("data/UCI HAR Dataset/train/X_train.txt")
training.y <- read.table("data/UCI HAR Dataset/train/y_train.txt")
training.subject <- read.table("data/UCI HAR Dataset/train/subject_train.txt")
tocombine.x <- read.table("data/UCI HAR Dataset/test/X_test.txt")
tocombine.y <- read.table("data/UCI HAR Dataset/test/y_test.txt")
test.subject <- read.table("data/UCI HAR Dataset/test/subject_test.txt")
merged.x <- rbind(training.x, tocombine.x)
merged.y <- rbind(training.y, tocombine.y)
merged.subject <- rbind(training.subject, test.subject)
# merge train and test datasets and return
list(x=merged.x, y=merged.y, subject=merged.subject)
}
# Extracts only the measurements on the mean and standard deviation for each measurement.
extract.mean.and.std = function(df) {
features <- read.table("data/UCI HAR Dataset/features.txt")
mean.col <- sapply(features[,2], function(x) grepl("mean()", x, fixed=T))
std.col <- sapply(features[,2], function(x) grepl("std()", x, fixed=T))
var1 <- df[, (mean.col | std.col)]
colnames(var1) <- features[(mean.col | std.col), 2]
var1
}
# Uses descriptive activity names to name the activities in the data set
name.activities = function(df) {
colnames(df) <- "activity"
df$activity[df$activity == 1] = "WALKING"
df$activity[df$activity == 2] = "WALKING_UPSTAIRS"
df$activity[df$activity == 3] = "WALKING_DOWNSTAIRS"
df$activity[df$activity == 4] = "SITTING"
df$activity[df$activity == 5] = "STANDING"
df$activity[df$activity == 6] = "LAYING"
df
}
bind.data <- function(x, y, subjects) {
# Combine mean-std values (x), activities (y) and subjects into one data
# frame.
cbind(x, y, subjects)
}
create.tidy.dataset = function(df) {
# Given X values, y values and subjects, create an independent tidy dataset
# with the average of each variable for each activity and each subject.
tidy <- ddply(df, .(subject, activity), function(x) colMeans(x[,1:60]))
tidy
}
clean.data = function() {
# Download data
download.data()
# merge training and test datasets. merge.datasets function returns a list
# of three dataframes: X, y, and subject
merged <- merge.datasets()
# Extract only the measurements of the mean and standard deviation for each
# measurement
cx <- extract.mean.and.std(merged$x)
# Name activities
cy <- name.activities(merged$y)
# Use descriptive column name for subjects
colnames(merged$subject) <- c("subject")
# Combine data frames into one
combined <- bind.data(cx, cy, merged$subject)
# Create tidy dataset
tidy <- create.tidy.dataset(combined)
# Write tidy dataset as csv
write.csv(tidy, "UCI_HAR_tidy.csv", row.names=FALSE)
# Write tidy dataset as text
write.table(tidy, "UCI_HAR_tidy.txt", row.names=FALSE)
} | /run_analysis.R | no_license | avvenkat/datasciencecoursera | R | false | false | 4,060 | r | # Human Activity Recognition database built from the recordings of 30 subjects
# performing activities of daily living (ADL) while carrying a waist-mounted smartphone with embedded inertial sensors.
# This R script runanalysis.R performe the following --
# Merges the training and the test sets to create one data set.
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Uses descriptive activity names to name the activities in the data set
# Appropriately labels the data set with descriptive variable names.
# From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# This function is used to download the data
download.data = function() {
# download the data
fileURL <- "http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, destfile="data/UCI_HAR_data.zip")
unzip("data/UCI_HAR_data.zip", exdir="data")
}
# This function is used to merge the data sets
# Merges the training and the test sets to create one data set.
merge.datasets = function() {
training.x <- read.table("data/UCI HAR Dataset/train/X_train.txt")
training.y <- read.table("data/UCI HAR Dataset/train/y_train.txt")
training.subject <- read.table("data/UCI HAR Dataset/train/subject_train.txt")
tocombine.x <- read.table("data/UCI HAR Dataset/test/X_test.txt")
tocombine.y <- read.table("data/UCI HAR Dataset/test/y_test.txt")
test.subject <- read.table("data/UCI HAR Dataset/test/subject_test.txt")
merged.x <- rbind(training.x, tocombine.x)
merged.y <- rbind(training.y, tocombine.y)
merged.subject <- rbind(training.subject, test.subject)
# merge train and test datasets and return
list(x=merged.x, y=merged.y, subject=merged.subject)
}
# Extracts only the measurements on the mean and standard deviation for each measurement.
extract.mean.and.std = function(df) {
features <- read.table("data/UCI HAR Dataset/features.txt")
mean.col <- sapply(features[,2], function(x) grepl("mean()", x, fixed=T))
std.col <- sapply(features[,2], function(x) grepl("std()", x, fixed=T))
var1 <- df[, (mean.col | std.col)]
colnames(var1) <- features[(mean.col | std.col), 2]
var1
}
# Uses descriptive activity names to name the activities in the data set
name.activities = function(df) {
colnames(df) <- "activity"
df$activity[df$activity == 1] = "WALKING"
df$activity[df$activity == 2] = "WALKING_UPSTAIRS"
df$activity[df$activity == 3] = "WALKING_DOWNSTAIRS"
df$activity[df$activity == 4] = "SITTING"
df$activity[df$activity == 5] = "STANDING"
df$activity[df$activity == 6] = "LAYING"
df
}
bind.data <- function(x, y, subjects) {
# Combine mean-std values (x), activities (y) and subjects into one data
# frame.
cbind(x, y, subjects)
}
create.tidy.dataset = function(df) {
# Given X values, y values and subjects, create an independent tidy dataset
# with the average of each variable for each activity and each subject.
tidy <- ddply(df, .(subject, activity), function(x) colMeans(x[,1:60]))
tidy
}
clean.data = function() {
# Download data
download.data()
# merge training and test datasets. merge.datasets function returns a list
# of three dataframes: X, y, and subject
merged <- merge.datasets()
# Extract only the measurements of the mean and standard deviation for each
# measurement
cx <- extract.mean.and.std(merged$x)
# Name activities
cy <- name.activities(merged$y)
# Use descriptive column name for subjects
colnames(merged$subject) <- c("subject")
# Combine data frames into one
combined <- bind.data(cx, cy, merged$subject)
# Create tidy dataset
tidy <- create.tidy.dataset(combined)
# Write tidy dataset as csv
write.csv(tidy, "UCI_HAR_tidy.csv", row.names=FALSE)
# Write tidy dataset as text
write.table(tidy, "UCI_HAR_tidy.txt", row.names=FALSE)
} |
\name{uy}
\alias{uy}
\title{
Convert unit on y direction in data coordinate
}
\description{
Convert unit on y direction in data coordinate
}
\usage{
uy(...)
}
\arguments{
\item{...}{pass to \code{\link{convert_y}}.}
}
\details{
Please do not use this function. Use \code{\link{mm_y}}/\code{\link{cm_y}}/inches_y` instead.
}
\examples{
# There is no example
NULL
}
| /man/uy.Rd | permissive | jokergoo/circlize | R | false | false | 369 | rd | \name{uy}
\alias{uy}
\title{
Convert unit on y direction in data coordinate
}
\description{
Convert unit on y direction in data coordinate
}
\usage{
uy(...)
}
\arguments{
\item{...}{pass to \code{\link{convert_y}}.}
}
\details{
Please do not use this function. Use \code{\link{mm_y}}/\code{\link{cm_y}}/inches_y` instead.
}
\examples{
# There is no example
NULL
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cloudfront_operations.R
\name{update_cloud_front_origin_access_identity}
\alias{update_cloud_front_origin_access_identity}
\title{Update an origin access identity}
\usage{
update_cloud_front_origin_access_identity(CloudFrontOriginAccessIdentityConfig,
Id, IfMatch = NULL)
}
\arguments{
\item{CloudFrontOriginAccessIdentityConfig}{[required] The identity's configuration information.}
\item{Id}{[required] The identity's id.}
\item{IfMatch}{The value of the \code{ETag} header that you received when retrieving the identity's configuration. For example: \code{E2QWRUHAPOMQZL}.}
}
\description{
Update an origin access identity.
}
\section{Accepted Parameters}{
\preformatted{update_cloud_front_origin_access_identity(
CloudFrontOriginAccessIdentityConfig = list(
CallerReference = "string",
Comment = "string"
),
Id = "string",
IfMatch = "string"
)
}
}
| /service/paws.cloudfront/man/update_cloud_front_origin_access_identity.Rd | permissive | CR-Mercado/paws | R | false | true | 959 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cloudfront_operations.R
\name{update_cloud_front_origin_access_identity}
\alias{update_cloud_front_origin_access_identity}
\title{Update an origin access identity}
\usage{
update_cloud_front_origin_access_identity(CloudFrontOriginAccessIdentityConfig,
Id, IfMatch = NULL)
}
\arguments{
\item{CloudFrontOriginAccessIdentityConfig}{[required] The identity's configuration information.}
\item{Id}{[required] The identity's id.}
\item{IfMatch}{The value of the \code{ETag} header that you received when retrieving the identity's configuration. For example: \code{E2QWRUHAPOMQZL}.}
}
\description{
Update an origin access identity.
}
\section{Accepted Parameters}{
\preformatted{update_cloud_front_origin_access_identity(
CloudFrontOriginAccessIdentityConfig = list(
CallerReference = "string",
Comment = "string"
),
Id = "string",
IfMatch = "string"
)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_doc.R
\docType{data}
\name{locations}
\alias{locations}
\title{Location of Spots in the Mouse Olfactory Bulb Data}
\format{A data frame with the following columns:
\describe{
\item{X1}{Spots names}
\item{x}{x coordinate of the location of the spots}
\item{y}{y coordinate of the location of the spots}
}}
\source{
\url{https://doi.org/10.1126/science.aaf2403}
}
\usage{
locations
}
\description{
Location of Spots in the Mouse Olfactory Bulb Data
}
\keyword{datasets}
| /R-package/SpatialDE/man/locations.Rd | permissive | seninfobio/SpatialDE | R | false | true | 557 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_doc.R
\docType{data}
\name{locations}
\alias{locations}
\title{Location of Spots in the Mouse Olfactory Bulb Data}
\format{A data frame with the following columns:
\describe{
\item{X1}{Spots names}
\item{x}{x coordinate of the location of the spots}
\item{y}{y coordinate of the location of the spots}
}}
\source{
\url{https://doi.org/10.1126/science.aaf2403}
}
\usage{
locations
}
\description{
Location of Spots in the Mouse Olfactory Bulb Data
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{scripts_list_sql_projects}
\alias{scripts_list_sql_projects}
\title{List the projects a scripts belongs to}
\usage{
scripts_list_sql_projects(id)
}
\arguments{
\item{id}{integer required. The ID of the resource.}
}
\value{
An array containing the following fields:
\item{id}{integer, The ID for this project.}
\item{author}{object, A list containing the following elements:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{name}{string, The name of this project.}
\item{description}{string, A description of the project}
\item{users}{array, An array containing the following fields:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{autoShare}{boolean, }
\item{createdAt}{string, }
\item{updatedAt}{string, }
\item{archived}{string, The archival status of the requested object(s).}
}
\description{
List the projects a scripts belongs to
}
| /man/scripts_list_sql_projects.Rd | permissive | wlattner/civis-r | R | false | true | 1,317 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{scripts_list_sql_projects}
\alias{scripts_list_sql_projects}
\title{List the projects a scripts belongs to}
\usage{
scripts_list_sql_projects(id)
}
\arguments{
\item{id}{integer required. The ID of the resource.}
}
\value{
An array containing the following fields:
\item{id}{integer, The ID for this project.}
\item{author}{object, A list containing the following elements:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{name}{string, The name of this project.}
\item{description}{string, A description of the project}
\item{users}{array, An array containing the following fields:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{autoShare}{boolean, }
\item{createdAt}{string, }
\item{updatedAt}{string, }
\item{archived}{string, The archival status of the requested object(s).}
}
\description{
List the projects a scripts belongs to
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_cv.r
\name{plot_cv}
\alias{plot_cv}
\title{Plot bycatch estimation CV vs. observer coverage}
\usage{
plot_cv(te, bpue, d = 2, targetcv = 0.3, showplot = TRUE, silent = FALSE, ...)
}
\arguments{
\item{te}{an integer greater than 1. Total effort in fishery (e.g., trips
or sets).}
\item{bpue}{a positive number. Bycatch per unit effort.}
\item{d}{a number greater than or equal to 1. Dispersion index. The dispersion
index corresponds to the variance-to-mean ratio of effort-unit-level bycatch,
so \code{d = 1} corresponds to Poisson-distributed bycatch, and \code{d > 1}
to overdispersed bycatch.}
\item{targetcv}{a non-negative number less than 1. Target CV (as a proportion).
If set to 0, no corresponding minimum observer coverage will be
highlighted or returned.}
\item{showplot}{logical. If \code{FALSE}, plotting is suppressed.}
\item{silent}{logical. If \code{TRUE}, print output to terminal is suppressed.}
\item{...}{additional arguments for compatibility with Shiny.}
}
\value{
If \code{targetcv} is non-zero, a list with one component:
\item{targetoc}{minimum observer coverage in terms of percentage.}
Returned invisibly.
}
\description{
\code{plot_cv} plots projected bycatch estimation CVs vs observer
coverage, and returns minimum observer coverage needed to achieve
user-specified target CV and percentile.
}
\details{
\strong{Caveat:} \code{plot_cv} assumes that (1) observer coverage is
representative, (2) bycatch (\code{bpue}) is in terms of individuals (not
weight) per unit effort, and (3) the specified dispersion index reflects
the highest level of any hierarchical variance (e.g., using dispersion index
at trip level if greater than that at set level). Violating these assumptions
will likely result in negatively biased projections of the observer coverage
needed to meet a specified objective. More conservative (higher) projections
can be obtained by using a higher dispersion index \code{d}. Users may want
to explore uncertainty in dispersion index and in bycatch per unit effort by
varying those inputs.
}
| /man/plot_cv.Rd | no_license | kacurtis/ObsCovgTools | R | false | true | 2,146 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_cv.r
\name{plot_cv}
\alias{plot_cv}
\title{Plot bycatch estimation CV vs. observer coverage}
\usage{
plot_cv(te, bpue, d = 2, targetcv = 0.3, showplot = TRUE, silent = FALSE, ...)
}
\arguments{
\item{te}{an integer greater than 1. Total effort in fishery (e.g., trips
or sets).}
\item{bpue}{a positive number. Bycatch per unit effort.}
\item{d}{a number greater than or equal to 1. Dispersion index. The dispersion
index corresponds to the variance-to-mean ratio of effort-unit-level bycatch,
so \code{d = 1} corresponds to Poisson-distributed bycatch, and \code{d > 1}
to overdispersed bycatch.}
\item{targetcv}{a non-negative number less than 1. Target CV (as a proportion).
If set to 0, no corresponding minimum observer coverage will be
highlighted or returned.}
\item{showplot}{logical. If \code{FALSE}, plotting is suppressed.}
\item{silent}{logical. If \code{TRUE}, print output to terminal is suppressed.}
\item{...}{additional arguments for compatibility with Shiny.}
}
\value{
If \code{targetcv} is non-zero, a list with one component:
\item{targetoc}{minimum observer coverage in terms of percentage.}
Returned invisibly.
}
\description{
\code{plot_cv} plots projected bycatch estimation CVs vs observer
coverage, and returns minimum observer coverage needed to achieve
user-specified target CV and percentile.
}
\details{
\strong{Caveat:} \code{plot_cv} assumes that (1) observer coverage is
representative, (2) bycatch (\code{bpue}) is in terms of individuals (not
weight) per unit effort, and (3) the specified dispersion index reflects
the highest level of any hierarchical variance (e.g., using dispersion index
at trip level if greater than that at set level). Violating these assumptions
will likely result in negatively biased projections of the observer coverage
needed to meet a specified objective. More conservative (higher) projections
can be obtained by using a higher dispersion index \code{d}. Users may want
to explore uncertainty in dispersion index and in bycatch per unit effort by
varying those inputs.
}
|
# Define server logic
server <- function(input, output, session) {
{ # About ----
{ # Images ----
{ # Disclaimer Pics ----
output$disc_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Laurie_Montgomery/1 (2).jpg',
height = "100%")}, delete = FALSE)
}
{ # App Basics Pics ----
output$basics_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (4).jpg',
height = "100%")}, delete = FALSE)
output$basics_pic_2 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (12).jpg',
height = "100%")}, delete = FALSE)
output$basics_pic_3 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (14).jpg',
height = "100%")}, delete = FALSE)
}
{ # KFMP History Pics -----
output$history_pic_1 <- renderImage({list(
src = 'www/Maps/Satellite/CHIS.png',
height = "100%")}, delete = FALSE)
output$history_pic_2 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (10).jpg',
height = "100%")}, delete = FALSE)
output$history_pic_3 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kelly_Moore/1 (1).jpg',
height = "100%")}, delete = FALSE)
}
{ # Acknowledgments Pics ----
output$ack_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Laurie_Montgomery/1 (3).jpg',
height = "100%")}, delete = FALSE)
output$ack_pic_2 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (11).jpg',
height = "100%")}, delete = FALSE)
output$ack_pic_3 <- renderImage({list(
src = 'www/Photos/Protocols/boating/boat (2).jpg',
height = "100%")}, delete = FALSE)
}
{ # Acronyms Pics ----
output$acr_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (6).jpg',
height = "100%")}, delete = FALSE)
output$acr_pic_2 <- renderImage({list(
src = 'www/Photos/Protocols/rpcs/rpcs (1).jpg',
height = "100%")}, delete = FALSE)
}
{ # Blog Pics ----
output$blog_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (5).jpg',
height = "100%")}, delete = FALSE)
}
{ # FAQ Pics ----
output$faq_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (5).jpg',
height = "100%")}, delete = FALSE)
}
}
{ # Acronyms ----
output$Acro_Table <- renderDT({
datatable(
Acronyms, rownames = FALSE,
options = list(
searching = FALSE, paging = FALSE,
ordering = TRUE, info = FALSE, scrollX = TRUE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#3c8dbc', 'color': '#fff'});}"))) %>%
formatStyle(names(Acronyms), color = "black", backgroundColor = 'white')
})
}
}
{ # Protocols -----
protocol_Server(id = "protocol")
}
{ # Species ----
foundation_Server(id = "kelp")
foundation_Server(id = "p_urchin")
foundation_Server(id = "r_urchin")
foundation_Server(id = "r_abalone")
foundation_Server(id = "lobsta")
foundation_Server(id = "sheep")
foundation_Server(id = "sunflower")
foundation_Server(id = "giant-spined")
# Invasives
foundation_Server(id = "sargassum")
foundation_Server(id = "undaria")
# Disease
output$SSWD <- renderUI({tags$iframe(
style = "height:650px; width:100%; scrolling=yes",
src = "Handbook/Outside_Program_Guides/stars_disease_guide.pdf")
})
output$urchins <- renderUI({tags$iframe(
style = "height:650px; width:100%; scrolling=yes",
src = "Handbook/Outside_Program_Guides/urchin_disease_guide.pdf")
})
output$abalone <- renderImage({list(
src = "www/Handbook/Outside_Program_Guides/healthyVshrunken.jpg",
height = "100%")}, delete = FALSE)
species_guide_Server(id = "species")
Taxa_Server(id = "species")
}
{ # Sampling Locations ----
{ # Images ----
output$site_image1 <- renderImage({list(
src = 'www/Maps/Other/tempmap.jpg',
height = "100%")}, delete = FALSE)
output$site_image2 <- renderImage({list(
src = 'www/Photos/Protocols/site/1 (1).jpg',
height = "100%")}, delete = FALSE)
output$site_image3 <- renderImage({list(
src = "www/Photos/Protocols/boating/boat (1).jpg",
height = "100%")}, delete = FALSE)
output$site_image4 <- renderImage({list(
src = 'www/Photos/Protocols/boating/boat (4).jpg',
height = "100%")}, delete = FALSE)
output$site_image5 <- renderImage({list(
src = 'www/Photos/Protocols/boating/boat (7).jpg',
height = "100%")}, delete = FALSE)
output$site_image6 <- renderImage({list(
src = "www/Photos/Protocols/boating/boat (8).jpg",
height = "100%")}, delete = FALSE)
output$site_image7 <- renderImage({list(
src = "www/Photos/Protocols/boating/boat (6).jpg",
height = "100%")}, delete = FALSE)
}
{ # .... Leaflet Maps ----
output$Leaflet <- renderLeaflet({
leaflet() %>%
setView(lng = -119.7277, lat = 33.76416, zoom = 9) %>%
addProviderTiles(providers$Esri.OceanBasemap, group = "Ocean Base") %>%
addTiles(group = "OSM") %>%
addProviderTiles(providers$Esri, group = "ESRI") %>%
addProviderTiles(providers$Esri.WorldImagery, group = "Sat. Imagery") %>%
addProviderTiles(providers$Esri.WorldTopoMap, group = "Topography") %>%
addProviderTiles(providers$Esri.NatGeoWorldMap, group = "Nat. Geo.") %>%
addPolygons(data = mpa, color = mpa$Color, weight = 1,
fillOpacity = 0.1, opacity = 0.25, label = mpa$NAME, group = "MPA Boundaries") %>%
addPolygons(data = NPS_boundary, weight = 2, color = "green", fill = FALSE,
label = "Channel Islands National Park (CINP) Boundary", group = "CINP Boundary") %>%
addPolygons(data = CINMS_boundary, weight = 2, color = "blue", fill = FALSE,
label = "Channel Islands National Marine Sanctuary (CINMS) Boundary", group = "CINMS Boundary") %>%
addPolylines(data = GPS_Transects, group = "Transects") %>%
addCircles(radius = 1, group = "Transect End Points", color = "green",
lng = Site_Info$Start_Longitude, lat = Site_Info$Start_Latitude, label = Site_Info$Start_Label) %>%
addCircles(radius = 1, group = "Transect End Points", color = "red",
lng = Site_Info$End_Longitude, lat = Site_Info$End_Latitude, label = Site_Info$End_Label) %>%
addMarkers(data = Site_Info, label = paste(Site_Info$IslandCode, Site_Info$SiteName), group = "Site Markers") %>%
addCircleMarkers(data = Buoys_List, label = Buoys_List$DC.description, group = "Buoy Stations") %>%
addLayersControl(
baseGroups = c("Ocean Base", "OSM", "ESRI", "Sat. Imagery", "Topography", "Nat. Geo."),
overlayGroups = c("Site Markers", "Transects", "Transect End Points",
"MPA Boundaries", "CINP Boundary", "CINMS Boundary", "Buoy Stations"),
options = layersControlOptions(collapsed = TRUE)) %>%
addMeasure(position = "bottomleft", primaryLengthUnit = "meters", primaryAreaUnit = "sqmeters",
activeColor = "#3D535D", completedColor = "#7D4479")
})
}
{ # .... Static Imagery -----
Sat_Map_Site <- Site_Selector_Server(id = 'Site_Sat')
satMapCode <- reactive({
if (input$Sat_Isl_Site == "Park") {
return("CHIS")
}
else if (input$Sat_Isl_Site == "Island") {
return(dplyr::filter(Site_Info, IslandName == input$Sat_Isl)$IslandCode[1])
}
else if (input$Sat_Isl_Site == "MPA") {
return(dplyr::filter(Site_Info, Reference == TRUE, IslandName == input$Sat_MPA)$MPA_Code[1])
}
else {
return(Sat_Map_Site()$SiteCode)
}
})
output$satMap <- renderImage({
list(
src = glue("www/Maps/Satellite/{satMapCode()}.png"),
contentType = "image/png",
width = if (input$Sat_Isl_Site == "Park") {1000} else {750},
height = if (input$Sat_Isl_Site == "Park") {772.72} else {750}
)
}, deleteFile = FALSE)
map_text_filename <- reactive({
if (input$Sat_Isl_Site == 'Site') {"Text/Sites/gps_transects.md"}
else if (input$Sat_Isl_Site == 'Park') {NULL}
else {glue::glue("Text/Sites/{satMapCode()}.md")}
})
output$map_text <- renderUI({includeMarkdown(path = map_text_filename())})
site_table_data <- reactive({
if (input$Sat_Isl_Site == 'Island') {
site_data %>%
dplyr::filter(IslandName == input$Sat_Isl) %>%
dplyr::select(-IslandName)
}
else if (input$Sat_Isl_Site == 'MPA') {
site_data %>%
dplyr::filter(IslandName == input$Sat_MPA, Reference == TRUE) %>%
dplyr::select(-IslandName)
}
else if (input$Sat_Isl_Site == 'Site') {
site_data %>%
dplyr::filter(Site == Sat_Map_Site()$SiteName) %>%
dplyr::select(-IslandName)
}
})
output$Site_Table <- renderDT({
datatable(
site_table_data(), rownames = FALSE,
options = list(searching = FALSE, paging = FALSE,
ordering = TRUE, info = FALSE, scrollX = TRUE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#3c8dbc', 'color': '#fff'});}"))) %>%
formatStyle(names(site_table_data()), color = "black", backgroundColor = 'white')
})
output$Park_Table <- renderDT({
datatable(
dplyr::select(site_data, -IslandName), rownames = FALSE, extensions = 'ColReorder',
options = list(
scrollY = "500px", scrollX = TRUE, paging = FALSE,
ordering = TRUE, info = FALSE, dom = 'Bfrtip', colReorder = TRUE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#3c8dbc', 'color': '#fff'});}"))) %>%
formatStyle(names(dplyr::select(site_data, -IslandName)), color = "black", backgroundColor = 'white')
})
}
{ # .... Bathymetry Maps ----
Bath_Site <- reactive(
dplyr::filter(Site_Info, SiteName == input$Bath_Maps_Site)$SiteNumber)
output$Bathymetry_Map <- renderImage({
list(
src = glue::glue("www/Maps/Bathymetry/{Bath_Site()}.png"),
contentType = "image/png",
width = 1000,
height = 750
)
} , deleteFile = FALSE)
}
{ # .... ARM Maps ----
ARM_Site <- reactive(dplyr::filter(Site_Info, Isl_SiteName == input$Arm_Maps_Site)$SiteNumber)
output$ARM_Map <- renderImage({
list(src = glue("www/Maps/ARMs/{ARM_Site()}.png"),
contentType = "image/png", height = '100%')
}, deleteFile = FALSE)
}
{ # .... Site Descriptions ----
Site_Desc_Site <- reactive(dplyr::filter(Site_Info, Isl_SiteName == input$Site_Description_Site)$SiteNumber)
output$Site_Description <- renderImage({
list(src = glue::glue(
"www/Handbook/Site_Descriptions/{Site_Desc_Site()}.png"),
contentType = "image/png", height = '100%')
}, deleteFile = FALSE)
}
}
{ # Biodiversity ----
{ # Images -----
output$diversity_pic1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (3).jpg',
height = "100%")}, delete = FALSE)
output$diversity_pic2 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (15).jpg",
height = "100%")}, delete = FALSE)
output$diversity_pic3 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (6).jpg',
height = "100%")}, delete = FALSE)
output$diversity_pic4 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (2).jpg",
height = "100%")}, delete = FALSE)
output$diversity_pic5 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (1).jpg",
height = "100%")}, delete = FALSE)
}
diversity_Server(id = "richness")
diversity_Server(id = "shannon")
diversity_Server(id = "simpson")
}
{ # Community Similarity ----
{ # Images -----
output$com_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (3).jpg',
height = "100%")}, delete = FALSE)
output$com_pic_2 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (15).jpg",
height = "100%")}, delete = FALSE)
}
{ # 2D ----
Two_D_data <- reactive({
if (input$radio_2D_years == "All Years (Fewer Species)"
& input$radio_2D_color == "Reserve Status") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider2d_all,
Type == '2D_All') %>%
dplyr::mutate(Color = ReserveStatus)
}
else if (input$radio_2D_years == "All Years (Fewer Species)"
& input$radio_2D_color == "Island Name") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider2d_all,
Type == '2D_All') %>%
dplyr::mutate(Color = IslandName)
}
else if (input$radio_2D_years == "Years > 2004 (All Species)"
& input$radio_2D_color == "Reserve Status") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider2d_2005,
Type == '2D_2005') %>%
dplyr::mutate(Color = ReserveStatus)
}
else if (input$radio_2D_years == "Years > 2004 (All Species)"
& input$radio_2D_color == "Island Name") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider2d_2005,
Type == '2D_2005') %>%
dplyr::mutate(Color = IslandName)
}
})
output$Two_D <- renderPlot({
ggplot(data = Two_D_data(), aes(x = `Dim 1`, y = `Dim 2`)) +
geom_point(size = 4, aes(shape = ReserveStatus, color = Color)) +
geom_text(size = 3, vjust = 2, aes(label = SiteCode)) +
# stat_ellipse(aes(color = IslandName), level = 0.95) +
# stat_stars(aes(color = ReserveStatus)) +
scale_colour_manual(values = Island_Colors) +
coord_fixed() +
scale_x_reverse() +
# coord_flip() +
labs(title = Two_D_data()$SurveyYear, color = input$radio_2D_color, shape = "Reserve Status") +
nMDS_theme()
}) %>%
shiny::bindCache(Two_D_data(), cache = cachem::cache_disk("./cache/2d-cache"))
}
{ # 3D ----
Three_D_data <- reactive({
if (input$radio_3D_years == "All Years (Fewer Species)"
& input$radio_3D_color == "Reserve Status") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider3d_all,
Type == '3D_All') %>%
dplyr::mutate(Color = ReserveStatus)
}
else if (input$radio_3D_years == "All Years (Fewer Species)"
& input$radio_3D_color == "Island Name") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider3d_all,
Type == '3D_All') %>%
dplyr::mutate(Color = IslandName)
}
else if (input$radio_3D_years == "Years > 2004 (All Species)"
& input$radio_3D_color == "Reserve Status") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider3d_2005,
Type == '3D_2005') %>%
dplyr::mutate(Color = ReserveStatus)
}
else if (input$radio_3D_years == "Years > 2004 (All Species)"
& input$radio_3D_color == "Island Name") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider3d_2005,
Type == '3D_2005') %>%
dplyr::mutate(Color = IslandName)
}
})
output$Three_D <- renderPlotly({
plotly::plot_ly(Three_D_data(), x = ~`Dim 1`, y = ~`Dim 2`, z = ~`Dim 3`,
# frame = ~SurveyYear,
text = ~SiteName, hoverinfo = "text",
color = ~Color, colors = Island_Colors) %>%
plotly::add_markers(symbol = ~ReserveStatus,
symbols = c('Inside' = "cross-open", 'Outside' = "square")) %>%
plotly::add_text(text = ~SiteCode, showlegend = FALSE) %>%
plotly::layout(title = list(text = paste(Three_D_data()$SurveyYear)),
scene = list(xaxis = list(title = 'X'),
yaxis = list(title = 'Y'),
zaxis = list(title = 'Z')))
# %>%
# plotly::animation_opts(1500, easing = "linear")
}) %>%
shiny::bindCache(Three_D_data(), cache = cachem::cache_disk("./cache/3d-cache"))
}
}
{ # Variable Importance ----
{ # Images -----
output$cucumba <- renderImage({list(
src = "www/Photos/Indicator_Species/11007.jpg",
height = "100%")}, delete = FALSE)
output$lobsta <- renderImage({list(
src = "www/Photos/Indicator_Species/8001.jpg",
height = "100%")}, delete = FALSE)
output$rose <- renderImage({list(
src = "www/Photos/Indicator_Species/6002.jpg",
height = "100%")}, delete = FALSE)
output$kelkel <- renderImage({list(
src = "www/Photos/Indicator_Species/9006.jpg",
height = "100%")}, delete = FALSE)
}
{ # Random Forest Models ----
VI_Server(id = "reserve")
VI_Server(id = "island")
}
{ # Indicator Species Analysis ----
}
}
{ # Biomass and Density ----
{ # Images ----
output$Biomass_pic_1 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (4).jpg",
height = "100%")}, delete = FALSE)
output$Biomass_pic_2 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (10).jpg",
height = "100%")}, delete = FALSE)
output$Biomass_pic_3 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Shaun_Wolfe/1 (1).jpg",
height = "100%")}, delete = FALSE)
output$Biomass_pic_4 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Shaun_Wolfe/1 (3).jpg",
height = "100%")}, delete = FALSE)
output$Biomass_pic_5 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Shaun_Wolfe/1 (5).jpg",
height = "100%")}, delete = FALSE)
output$Density_pic_1 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (3).jpg",
height = "100%")}, delete = FALSE)
output$Density_pic_2 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (6).jpg",
height = "100%")}, delete = FALSE)
output$Density_pic_3 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (8).jpg",
height = "100%")}, delete = FALSE)
output$Density_pic_4 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Shaun_Wolfe/1 (4).jpg",
height = "100%")}, delete = FALSE)
}
{ # Time Series ----
Time_Server(id = "biomass")
Time_Server(id = "density")
}
{ # Ratios ----
Ratio_Server(id = 'biomass_ratio')
Ratio_Server(id = 'density_ratio')
}
{ # Map Bubbles ----
bubbles_Server(id = "biomass_bubbles")
bubbles_Server(id = "density_bubbles")
}
}
{ # Size Frequencies ----
{ # Images ----
output$Size_pic_1 <- renderImage({list(
src = "www/Photos/Protocols/nhsf/nhsf (1).jpg",
height = "100%")}, delete = FALSE)
output$Size_pic_2 <- renderImage({list(
src = "www/Photos/Protocols/nhsf/nhsf (4).jpg",
height = "100%")}, delete = FALSE)
}
{ # Box Plots ----
Site <- Site_Selector_Server(id = "sizes")
Size_Data <- reactive({
if (input$size_category == "Invertebrates") {Benthic_Sizes %>%
dplyr::filter(ScientificName != "Macrocystis pyrifera",
CommonName != "Coronado urchin", CommonName != "Chestnut Cowrie" & SurveyYear > 1990)}
else if (input$size_category == "Algae") {Benthic_Sizes %>% dplyr::filter(ScientificName == "Macrocystis pyrifera")}
else if (input$size_category == "Fish") {Fish_Sizes}
})
output$size_site_year <- renderUI({
if (input$size_site_radio == "One Site") {
Site_Selector_UI(id = "sizes")
}
else if (input$size_site_radio == "All Sites") {
tagList(
sliderInput(inputId = "size_year_slider", label = "Year:",
min = min(Size_Year_Species()$SurveyYear),
max = max(Size_Year_Species()$SurveyYear),
value = min(Size_Year_Species()$SurveyYear),
sep = "", step = 1, animate = TRUE),
h5("Animation Note: Animals with many measurements take a long time to plot. ",
"Plots are cached within a session. ",
"Run the animation once and allow all plots to complete (watch year in top left corner). ",
"Re-run to show smooth animation from cached plots.")
)
}
})
Size_Year_Species <- reactive({Size_Data() %>% dplyr::filter(CommonName == input$size_species)})
Site_Levels <- reactive({
if (input$size_year_slider < 2001) {Site_Info %>% dplyr::filter(SiteNumber < 17) %>% dplyr::arrange(Longitude)}
else if (input$size_year_slider > 2000 & input$size_year_slider < 2005) {
Site_Info %>% dplyr::filter(SiteNumber < 22) %>% dplyr::arrange(Longitude)}
else if (input$size_year_slider > 2004) {Site_Info %>% dplyr::arrange(Longitude)}
})
Size_Year_Data <- reactive({
Size_Year_Species() %>%
dplyr::filter(SurveyYear == input$size_year_slider) %>%
dplyr::mutate(SiteCode = factor(SiteCode, levels = Site_Levels()$SiteCode))
})
Size_Site_Data <- reactive(Size_Data() %>% dplyr::filter(SiteName == Site()$SiteName))
species_choice <- reactive({
if (input$size_site_radio == "One Site") {levels(factor(Size_Site_Data()$CommonName))}
else if (input$size_site_radio == "All Sites") {levels(factor(Size_Data()$CommonName))}
})
output$size_species_UI <- renderUI({
selectInput(inputId = "size_species", label = "Species:", choices = species_choice())
})
Size_Site_Data_Subset <- reactive({Size_Site_Data() %>% dplyr::filter(CommonName == input$size_species)})
output$size_site_plot <- renderPlot({
ggplot2::ggplot() +
ggplot2::geom_boxplot(data = Size_Site_Data_Subset(), width = 150,
aes(x = Date, y = Size, group = SurveyYear, color = CommonName)) +
ggplot2::geom_point(data = Size_Site_Data_Subset(), size = 1, color = "black",
aes(x = Date, y = Mean_Size, group = SurveyYear)) +
ggplot2::geom_label(data = Size_Site_Data_Subset(), size = 3, hjust = .5, vjust = 0,
aes(x = Date, y = -Inf, label = Size_Site_Data_Subset()$Total_Count)) +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::scale_y_continuous(limits = c(0, NA), expand = expansion(mult = c(0.1, 0))) +
ggplot2::scale_x_date(date_labels = "%Y", breaks = unique(Size_Site_Data_Subset()$Date), expand = expansion(mult = c(0.01, 0.01)),
limits = c(min(Size_Site_Data_Subset()$Date) - 150, max(Size_Site_Data_Subset()$Date) + 150)) +
ggplot2::labs(title = Size_Site_Data_Subset()$ScientificName,
subtitle = glue("{Size_Site_Data_Subset()$IslandName} {Size_Site_Data_Subset()$SiteName}"),
color = "Common Name", x = "Year", y = "Size Distribution") +
ggplot2::scale_color_manual(values = SpeciesColor, limits = force) +
Boxplot_theme()
}) %>%
shiny::bindCache(Size_Site_Data_Subset(), cache = cachem::cache_disk("./cache/sizes-cache"))
output$size_year_plot <- renderPlot({
ggplot2::ggplot() +
ggplot2::geom_boxplot(data = Size_Year_Data(), aes(x = SiteCode, y = Size, group = SiteCode, color = CommonName)) +
ggplot2::geom_point(data = Size_Year_Data(), size = 1, color = "black", aes(x = SiteCode, y = Mean_Size, group = SurveyYear)) +
ggplot2::geom_label(data = Size_Year_Data(), size = 3, hjust = .5, vjust = 0, aes(x = SiteCode, y = -Inf, label = Size_Year_Data()$Total_Count)) +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::scale_y_continuous(limits = c(0, NA), expand = expansion(mult = c(0.1, 0.01))) +
ggplot2::scale_x_discrete(drop = FALSE) +
ggplot2::labs(title = Size_Year_Data()$SurveyYear, color = "Common Name", x = NULL, y = "Size Distribution",
caption = "Sites arranged by longitude (west to east)") +
ggplot2::scale_color_manual(values = SpeciesColor, limits = force) +
Boxplot_theme()
}) %>%
shiny::bindCache(Size_Year_Data(), cache = cachem::cache_disk("./cache/sizes-cache"))
}
{ # ARMs ----
ARM_Data <- reactive({
ARM_Sizes
# %>%
# dplyr::filter(Size_mm == input$Size_Limit)
})
output$arm_site_year <- renderUI({
if (input$arm_site_radio == "One Site") {
selectInput(inputId = "ARM_Sites",
label = "Site:",
choices = dplyr::arrange(dplyr::filter(Site_Info, ARMs == T), Longitude)$SiteName)
}
else if (input$arm_site_radio == "All Sites") {
tagList(
sliderInput(inputId = "arm_year_slider", label = "Year:",
min = min(ARM_Year_Species()$SurveyYear),
max = max(ARM_Year_Species()$SurveyYear),
value = min(ARM_Year_Species()$SurveyYear),
sep = "", step = 1, animate = TRUE),
h5("Animation Note: Animals with many measurements take a long time to plot. ",
"Plots are cached within a session. ",
"Run the animation once and allow all plots to complete (watch year in top left corner). ",
"Re-run to show smooth animation from cached plots.")
)
}
})
ARM_Year_Species <- reactive({ARM_Data() %>% dplyr::filter(CommonName == input$arm_species)})
ARM_Site_Levels <- reactive({
if (input$arm_year_slider < 2001) {Site_Info %>% dplyr::filter(SiteNumber < 17) %>% dplyr::arrange(Longitude)}
else if (input$arm_year_slider > 2000 & input$arm_year_slider < 2005) {
Site_Info %>% dplyr::filter(SiteNumber < 22) %>% dplyr::arrange(Longitude)}
else if (input$arm_year_slider > 2004) {Site_Info %>% dplyr::arrange(Longitude)}
})
ARM_Size_Year_Data <- reactive({
ARM_Year_Species() %>%
dplyr::filter(SurveyYear == input$arm_year_slider) %>%
dplyr::mutate(SiteCode = factor(SiteCode, levels = ARM_Site_Levels()$SiteCode))
})
ARM_Size_Site_Data <- reactive(ARM_Data() %>% dplyr::filter(SiteName == input$ARM_Sites))
arm_species_choice <- reactive({
if (input$arm_site_radio == "One Site") {levels(factor(ARM_Size_Site_Data()$CommonName))}
else if (input$arm_site_radio == "All Sites") {levels(factor(ARM_Data()$CommonName))}
})
output$arm_species_UI <- renderUI({
selectInput(inputId = "arm_species", label = "Species:", choices = arm_species_choice())
})
ARM_Size_Site_Data_Subset <- reactive({ARM_Size_Site_Data() %>% dplyr::filter(CommonName == input$arm_species)})
output$arm_site_plot <- renderPlot({
ggplot2::ggplot() +
ggplot2::geom_boxplot(data = ARM_Size_Site_Data_Subset(), width = 150,
aes(x = Date, y = Size_mm, group = SurveyYear, color = CommonName)) +
ggplot2::geom_point(data = ARM_Size_Site_Data_Subset(), size = 1, color = "black",
aes(x = Date, y = Mean_Size, group = SurveyYear)) +
ggplot2::geom_label(data = ARM_Size_Site_Data_Subset(), size = 3, hjust = .5, vjust = 0,
aes(x = Date, y = -Inf, label = ARM_Size_Site_Data_Subset()$Total_Count)) +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::scale_y_continuous(limits = c(0, NA), expand = expansion(mult = c(0.1, 0))) +
ggplot2::scale_x_date(date_labels = "%Y", breaks = unique(ARM_Size_Site_Data_Subset()$Date), expand = expansion(mult = c(0.01, 0.01)),
limits = c(min(ARM_Size_Site_Data_Subset()$Date) - 150, max(ARM_Size_Site_Data_Subset()$Date) + 150)) +
ggplot2::labs(title = ARM_Size_Site_Data_Subset()$ScientificName,
subtitle = glue("{ARM_Size_Site_Data_Subset()$IslandName} {ARM_Size_Site_Data_Subset()$SiteName}"),
color = "Common Name", x = "Year", y = "Size Distribution") +
ggplot2::scale_color_manual(values = SpeciesColor, limits = force) +
Boxplot_theme()
}) %>%
shiny::bindCache(ARM_Size_Site_Data_Subset(), cache = cachem::cache_disk("./cache/sizes-cache"))
output$arm_year_plot <- renderPlot({
ggplot2::ggplot() +
ggplot2::geom_boxplot(data = ARM_Size_Year_Data(), aes(x = SiteCode, y = Size_mm, group = SiteCode, color = CommonName)) +
ggplot2::geom_point(data = ARM_Size_Year_Data(), size = 1, color = "black", aes(x = SiteCode, y = Mean_Size, group = SurveyYear)) +
ggplot2::geom_label(data = ARM_Size_Year_Data(), size = 3, hjust = .5, vjust = 0, aes(x = SiteCode, y = -Inf, label = ARM_Size_Year_Data()$Total_Count)) +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::scale_y_continuous(limits = c(0, NA), expand = expansion(mult = c(0.1, 0.01))) +
ggplot2::scale_x_discrete(drop = FALSE) +
ggplot2::labs(title = ARM_Size_Year_Data()$SurveyYear, color = "Common Name", x = NULL, y = "Size Distribution",
caption = "Sites arranged by longitude (west to east)") +
ggplot2::scale_color_manual(values = SpeciesColor, limits = force) +
Boxplot_theme()
}) %>%
shiny::bindCache(ARM_Size_Year_Data(), cache = cachem::cache_disk("./cache/sizes-cache"))
}
}
{ # Reports -----
output$Annual_Report <- renderUI({
tags$iframe(style="height:750px; width:100%; scrolling=yes", src = glue("Annual_Reports/{input$Report}.pdf"))
})
Text_Data <- reactive(Text %>% dplyr::filter(Year == input$Cloud))
output$cloud_plot <- renderPlot(bg = "black", {
wordcloud::wordcloud(
words = Text_Data()$word,
freq = Text_Data()$n, min.freq = 1, scale = c(4, .75),
max.words = input$cloud_n, random.order = FALSE, rot.per = 0.25,
colors = brewer.pal(8, "Dark2"))
}) %>%
shiny::bindCache(input$cloud_n, Text_Data(), cache = cachem::cache_disk("./cache/word-cache"))
output$Handbook <- renderUI({
tags$iframe(style="height:750px; width:100%; scrolling=yes", src = glue("Handbook/Full_Versions/{input$old_handy}.pdf"))
})
output$ReviewsOutput <- renderUI({
tags$iframe(style="height:750px; width:100%; scrolling=yes", src = glue("Handbook/Reviews/{input$reviews}.pdf"))
})
output$CollaborativeOutput <- renderUI({
tags$iframe(style="height:750px; width:100%; scrolling=yes", src = glue("Handbook/Collaborative_Reports/{input$collab}.pdf"))
})
}
}
# TODO add kelp and gorgonian species guide and protocol guide
# TODO add shell size frequency guides
| /App/server.R | no_license | cullen-molitor/KFM_Shiny_App | R | false | false | 33,879 | r |
# Define server logic
server <- function(input, output, session) {
{ # About ----
{ # Images ----
{ # Disclaimer Pics ----
output$disc_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Laurie_Montgomery/1 (2).jpg',
height = "100%")}, delete = FALSE)
}
{ # App Basics Pics ----
output$basics_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (4).jpg',
height = "100%")}, delete = FALSE)
output$basics_pic_2 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (12).jpg',
height = "100%")}, delete = FALSE)
output$basics_pic_3 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (14).jpg',
height = "100%")}, delete = FALSE)
}
{ # KFMP History Pics -----
output$history_pic_1 <- renderImage({list(
src = 'www/Maps/Satellite/CHIS.png',
height = "100%")}, delete = FALSE)
output$history_pic_2 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (10).jpg',
height = "100%")}, delete = FALSE)
output$history_pic_3 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kelly_Moore/1 (1).jpg',
height = "100%")}, delete = FALSE)
}
{ # Acknowledgments Pics ----
output$ack_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Laurie_Montgomery/1 (3).jpg',
height = "100%")}, delete = FALSE)
output$ack_pic_2 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (11).jpg',
height = "100%")}, delete = FALSE)
output$ack_pic_3 <- renderImage({list(
src = 'www/Photos/Protocols/boating/boat (2).jpg',
height = "100%")}, delete = FALSE)
}
{ # Acronyms Pics ----
output$acr_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (6).jpg',
height = "100%")}, delete = FALSE)
output$acr_pic_2 <- renderImage({list(
src = 'www/Photos/Protocols/rpcs/rpcs (1).jpg',
height = "100%")}, delete = FALSE)
}
{ # Blog Pics ----
output$blog_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (5).jpg',
height = "100%")}, delete = FALSE)
}
{ # FAQ Pics ----
output$faq_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (5).jpg',
height = "100%")}, delete = FALSE)
}
}
{ # Acronyms ----
output$Acro_Table <- renderDT({
datatable(
Acronyms, rownames = FALSE,
options = list(
searching = FALSE, paging = FALSE,
ordering = TRUE, info = FALSE, scrollX = TRUE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#3c8dbc', 'color': '#fff'});}"))) %>%
formatStyle(names(Acronyms), color = "black", backgroundColor = 'white')
})
}
}
{ # Protocols -----
protocol_Server(id = "protocol")
}
{ # Species ----
foundation_Server(id = "kelp")
foundation_Server(id = "p_urchin")
foundation_Server(id = "r_urchin")
foundation_Server(id = "r_abalone")
foundation_Server(id = "lobsta")
foundation_Server(id = "sheep")
foundation_Server(id = "sunflower")
foundation_Server(id = "giant-spined")
# Invasives
foundation_Server(id = "sargassum")
foundation_Server(id = "undaria")
# Disease
output$SSWD <- renderUI({tags$iframe(
style = "height:650px; width:100%; scrolling=yes",
src = "Handbook/Outside_Program_Guides/stars_disease_guide.pdf")
})
output$urchins <- renderUI({tags$iframe(
style = "height:650px; width:100%; scrolling=yes",
src = "Handbook/Outside_Program_Guides/urchin_disease_guide.pdf")
})
output$abalone <- renderImage({list(
src = "www/Handbook/Outside_Program_Guides/healthyVshrunken.jpg",
height = "100%")}, delete = FALSE)
species_guide_Server(id = "species")
Taxa_Server(id = "species")
}
{ # Sampling Locations ----
{ # Images ----
output$site_image1 <- renderImage({list(
src = 'www/Maps/Other/tempmap.jpg',
height = "100%")}, delete = FALSE)
output$site_image2 <- renderImage({list(
src = 'www/Photos/Protocols/site/1 (1).jpg',
height = "100%")}, delete = FALSE)
output$site_image3 <- renderImage({list(
src = "www/Photos/Protocols/boating/boat (1).jpg",
height = "100%")}, delete = FALSE)
output$site_image4 <- renderImage({list(
src = 'www/Photos/Protocols/boating/boat (4).jpg',
height = "100%")}, delete = FALSE)
output$site_image5 <- renderImage({list(
src = 'www/Photos/Protocols/boating/boat (7).jpg',
height = "100%")}, delete = FALSE)
output$site_image6 <- renderImage({list(
src = "www/Photos/Protocols/boating/boat (8).jpg",
height = "100%")}, delete = FALSE)
output$site_image7 <- renderImage({list(
src = "www/Photos/Protocols/boating/boat (6).jpg",
height = "100%")}, delete = FALSE)
}
{ # .... Leaflet Maps ----
output$Leaflet <- renderLeaflet({
leaflet() %>%
setView(lng = -119.7277, lat = 33.76416, zoom = 9) %>%
addProviderTiles(providers$Esri.OceanBasemap, group = "Ocean Base") %>%
addTiles(group = "OSM") %>%
addProviderTiles(providers$Esri, group = "ESRI") %>%
addProviderTiles(providers$Esri.WorldImagery, group = "Sat. Imagery") %>%
addProviderTiles(providers$Esri.WorldTopoMap, group = "Topography") %>%
addProviderTiles(providers$Esri.NatGeoWorldMap, group = "Nat. Geo.") %>%
addPolygons(data = mpa, color = mpa$Color, weight = 1,
fillOpacity = 0.1, opacity = 0.25, label = mpa$NAME, group = "MPA Boundaries") %>%
addPolygons(data = NPS_boundary, weight = 2, color = "green", fill = FALSE,
label = "Channel Islands National Park (CINP) Boundary", group = "CINP Boundary") %>%
addPolygons(data = CINMS_boundary, weight = 2, color = "blue", fill = FALSE,
label = "Channel Islands National Marine Sanctuary (CINMS) Boundary", group = "CINMS Boundary") %>%
addPolylines(data = GPS_Transects, group = "Transects") %>%
addCircles(radius = 1, group = "Transect End Points", color = "green",
lng = Site_Info$Start_Longitude, lat = Site_Info$Start_Latitude, label = Site_Info$Start_Label) %>%
addCircles(radius = 1, group = "Transect End Points", color = "red",
lng = Site_Info$End_Longitude, lat = Site_Info$End_Latitude, label = Site_Info$End_Label) %>%
addMarkers(data = Site_Info, label = paste(Site_Info$IslandCode, Site_Info$SiteName), group = "Site Markers") %>%
addCircleMarkers(data = Buoys_List, label = Buoys_List$DC.description, group = "Buoy Stations") %>%
addLayersControl(
baseGroups = c("Ocean Base", "OSM", "ESRI", "Sat. Imagery", "Topography", "Nat. Geo."),
overlayGroups = c("Site Markers", "Transects", "Transect End Points",
"MPA Boundaries", "CINP Boundary", "CINMS Boundary", "Buoy Stations"),
options = layersControlOptions(collapsed = TRUE)) %>%
addMeasure(position = "bottomleft", primaryLengthUnit = "meters", primaryAreaUnit = "sqmeters",
activeColor = "#3D535D", completedColor = "#7D4479")
})
}
{ # .... Static Imagery -----
Sat_Map_Site <- Site_Selector_Server(id = 'Site_Sat')
satMapCode <- reactive({
if (input$Sat_Isl_Site == "Park") {
return("CHIS")
}
else if (input$Sat_Isl_Site == "Island") {
return(dplyr::filter(Site_Info, IslandName == input$Sat_Isl)$IslandCode[1])
}
else if (input$Sat_Isl_Site == "MPA") {
return(dplyr::filter(Site_Info, Reference == TRUE, IslandName == input$Sat_MPA)$MPA_Code[1])
}
else {
return(Sat_Map_Site()$SiteCode)
}
})
output$satMap <- renderImage({
list(
src = glue("www/Maps/Satellite/{satMapCode()}.png"),
contentType = "image/png",
width = if (input$Sat_Isl_Site == "Park") {1000} else {750},
height = if (input$Sat_Isl_Site == "Park") {772.72} else {750}
)
}, deleteFile = FALSE)
map_text_filename <- reactive({
if (input$Sat_Isl_Site == 'Site') {"Text/Sites/gps_transects.md"}
else if (input$Sat_Isl_Site == 'Park') {NULL}
else {glue::glue("Text/Sites/{satMapCode()}.md")}
})
output$map_text <- renderUI({includeMarkdown(path = map_text_filename())})
site_table_data <- reactive({
if (input$Sat_Isl_Site == 'Island') {
site_data %>%
dplyr::filter(IslandName == input$Sat_Isl) %>%
dplyr::select(-IslandName)
}
else if (input$Sat_Isl_Site == 'MPA') {
site_data %>%
dplyr::filter(IslandName == input$Sat_MPA, Reference == TRUE) %>%
dplyr::select(-IslandName)
}
else if (input$Sat_Isl_Site == 'Site') {
site_data %>%
dplyr::filter(Site == Sat_Map_Site()$SiteName) %>%
dplyr::select(-IslandName)
}
})
output$Site_Table <- renderDT({
datatable(
site_table_data(), rownames = FALSE,
options = list(searching = FALSE, paging = FALSE,
ordering = TRUE, info = FALSE, scrollX = TRUE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#3c8dbc', 'color': '#fff'});}"))) %>%
formatStyle(names(site_table_data()), color = "black", backgroundColor = 'white')
})
output$Park_Table <- renderDT({
datatable(
dplyr::select(site_data, -IslandName), rownames = FALSE, extensions = 'ColReorder',
options = list(
scrollY = "500px", scrollX = TRUE, paging = FALSE,
ordering = TRUE, info = FALSE, dom = 'Bfrtip', colReorder = TRUE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#3c8dbc', 'color': '#fff'});}"))) %>%
formatStyle(names(dplyr::select(site_data, -IslandName)), color = "black", backgroundColor = 'white')
})
}
{ # .... Bathymetry Maps ----
Bath_Site <- reactive(
dplyr::filter(Site_Info, SiteName == input$Bath_Maps_Site)$SiteNumber)
output$Bathymetry_Map <- renderImage({
list(
src = glue::glue("www/Maps/Bathymetry/{Bath_Site()}.png"),
contentType = "image/png",
width = 1000,
height = 750
)
} , deleteFile = FALSE)
}
{ # .... ARM Maps ----
ARM_Site <- reactive(dplyr::filter(Site_Info, Isl_SiteName == input$Arm_Maps_Site)$SiteNumber)
output$ARM_Map <- renderImage({
list(src = glue("www/Maps/ARMs/{ARM_Site()}.png"),
contentType = "image/png", height = '100%')
}, deleteFile = FALSE)
}
{ # .... Site Descriptions ----
Site_Desc_Site <- reactive(dplyr::filter(Site_Info, Isl_SiteName == input$Site_Description_Site)$SiteNumber)
output$Site_Description <- renderImage({
list(src = glue::glue(
"www/Handbook/Site_Descriptions/{Site_Desc_Site()}.png"),
contentType = "image/png", height = '100%')
}, deleteFile = FALSE)
}
}
{ # Biodiversity ----
{ # Images -----
output$diversity_pic1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (3).jpg',
height = "100%")}, delete = FALSE)
output$diversity_pic2 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (15).jpg",
height = "100%")}, delete = FALSE)
output$diversity_pic3 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (6).jpg',
height = "100%")}, delete = FALSE)
output$diversity_pic4 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (2).jpg",
height = "100%")}, delete = FALSE)
output$diversity_pic5 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (1).jpg",
height = "100%")}, delete = FALSE)
}
diversity_Server(id = "richness")
diversity_Server(id = "shannon")
diversity_Server(id = "simpson")
}
{ # Community Similarity ----
{ # Images -----
output$com_pic_1 <- renderImage({list(
src = 'www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (3).jpg',
height = "100%")}, delete = FALSE)
output$com_pic_2 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (15).jpg",
height = "100%")}, delete = FALSE)
}
{ # 2D ----
Two_D_data <- reactive({
if (input$radio_2D_years == "All Years (Fewer Species)"
& input$radio_2D_color == "Reserve Status") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider2d_all,
Type == '2D_All') %>%
dplyr::mutate(Color = ReserveStatus)
}
else if (input$radio_2D_years == "All Years (Fewer Species)"
& input$radio_2D_color == "Island Name") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider2d_all,
Type == '2D_All') %>%
dplyr::mutate(Color = IslandName)
}
else if (input$radio_2D_years == "Years > 2004 (All Species)"
& input$radio_2D_color == "Reserve Status") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider2d_2005,
Type == '2D_2005') %>%
dplyr::mutate(Color = ReserveStatus)
}
else if (input$radio_2D_years == "Years > 2004 (All Species)"
& input$radio_2D_color == "Island Name") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider2d_2005,
Type == '2D_2005') %>%
dplyr::mutate(Color = IslandName)
}
})
output$Two_D <- renderPlot({
ggplot(data = Two_D_data(), aes(x = `Dim 1`, y = `Dim 2`)) +
geom_point(size = 4, aes(shape = ReserveStatus, color = Color)) +
geom_text(size = 3, vjust = 2, aes(label = SiteCode)) +
# stat_ellipse(aes(color = IslandName), level = 0.95) +
# stat_stars(aes(color = ReserveStatus)) +
scale_colour_manual(values = Island_Colors) +
coord_fixed() +
scale_x_reverse() +
# coord_flip() +
labs(title = Two_D_data()$SurveyYear, color = input$radio_2D_color, shape = "Reserve Status") +
nMDS_theme()
}) %>%
shiny::bindCache(Two_D_data(), cache = cachem::cache_disk("./cache/2d-cache"))
}
{ # 3D ----
Three_D_data <- reactive({
if (input$radio_3D_years == "All Years (Fewer Species)"
& input$radio_3D_color == "Reserve Status") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider3d_all,
Type == '3D_All') %>%
dplyr::mutate(Color = ReserveStatus)
}
else if (input$radio_3D_years == "All Years (Fewer Species)"
& input$radio_3D_color == "Island Name") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider3d_all,
Type == '3D_All') %>%
dplyr::mutate(Color = IslandName)
}
else if (input$radio_3D_years == "Years > 2004 (All Species)"
& input$radio_3D_color == "Reserve Status") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider3d_2005,
Type == '3D_2005') %>%
dplyr::mutate(Color = ReserveStatus)
}
else if (input$radio_3D_years == "Years > 2004 (All Species)"
& input$radio_3D_color == "Island Name") {
nMDS %>%
dplyr::filter(SurveyYear == input$slider3d_2005,
Type == '3D_2005') %>%
dplyr::mutate(Color = IslandName)
}
})
output$Three_D <- renderPlotly({
plotly::plot_ly(Three_D_data(), x = ~`Dim 1`, y = ~`Dim 2`, z = ~`Dim 3`,
# frame = ~SurveyYear,
text = ~SiteName, hoverinfo = "text",
color = ~Color, colors = Island_Colors) %>%
plotly::add_markers(symbol = ~ReserveStatus,
symbols = c('Inside' = "cross-open", 'Outside' = "square")) %>%
plotly::add_text(text = ~SiteCode, showlegend = FALSE) %>%
plotly::layout(title = list(text = paste(Three_D_data()$SurveyYear)),
scene = list(xaxis = list(title = 'X'),
yaxis = list(title = 'Y'),
zaxis = list(title = 'Z')))
# %>%
# plotly::animation_opts(1500, easing = "linear")
}) %>%
shiny::bindCache(Three_D_data(), cache = cachem::cache_disk("./cache/3d-cache"))
}
}
{ # Variable Importance ----
{ # Images -----
output$cucumba <- renderImage({list(
src = "www/Photos/Indicator_Species/11007.jpg",
height = "100%")}, delete = FALSE)
output$lobsta <- renderImage({list(
src = "www/Photos/Indicator_Species/8001.jpg",
height = "100%")}, delete = FALSE)
output$rose <- renderImage({list(
src = "www/Photos/Indicator_Species/6002.jpg",
height = "100%")}, delete = FALSE)
output$kelkel <- renderImage({list(
src = "www/Photos/Indicator_Species/9006.jpg",
height = "100%")}, delete = FALSE)
}
{ # Random Forest Models ----
VI_Server(id = "reserve")
VI_Server(id = "island")
}
{ # Indicator Species Analysis ----
}
}
{ # Biomass and Density ----
{ # Images ----
output$Biomass_pic_1 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (4).jpg",
height = "100%")}, delete = FALSE)
output$Biomass_pic_2 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Kenan_Chan/1 (10).jpg",
height = "100%")}, delete = FALSE)
output$Biomass_pic_3 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Shaun_Wolfe/1 (1).jpg",
height = "100%")}, delete = FALSE)
output$Biomass_pic_4 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Shaun_Wolfe/1 (3).jpg",
height = "100%")}, delete = FALSE)
output$Biomass_pic_5 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Shaun_Wolfe/1 (5).jpg",
height = "100%")}, delete = FALSE)
output$Density_pic_1 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (3).jpg",
height = "100%")}, delete = FALSE)
output$Density_pic_2 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (6).jpg",
height = "100%")}, delete = FALSE)
output$Density_pic_3 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Brett_Seymour/1 (8).jpg",
height = "100%")}, delete = FALSE)
output$Density_pic_4 <- renderImage({list(
src = "www/Photos/Kelp_Forest_Scenes/Shaun_Wolfe/1 (4).jpg",
height = "100%")}, delete = FALSE)
}
{ # Time Series ----
Time_Server(id = "biomass")
Time_Server(id = "density")
}
{ # Ratios ----
Ratio_Server(id = 'biomass_ratio')
Ratio_Server(id = 'density_ratio')
}
{ # Map Bubbles ----
bubbles_Server(id = "biomass_bubbles")
bubbles_Server(id = "density_bubbles")
}
}
{ # Size Frequencies ----
{ # Images ----
output$Size_pic_1 <- renderImage({list(
src = "www/Photos/Protocols/nhsf/nhsf (1).jpg",
height = "100%")}, delete = FALSE)
output$Size_pic_2 <- renderImage({list(
src = "www/Photos/Protocols/nhsf/nhsf (4).jpg",
height = "100%")}, delete = FALSE)
}
{ # Box Plots ----
Site <- Site_Selector_Server(id = "sizes")
Size_Data <- reactive({
if (input$size_category == "Invertebrates") {Benthic_Sizes %>%
dplyr::filter(ScientificName != "Macrocystis pyrifera",
CommonName != "Coronado urchin", CommonName != "Chestnut Cowrie" & SurveyYear > 1990)}
else if (input$size_category == "Algae") {Benthic_Sizes %>% dplyr::filter(ScientificName == "Macrocystis pyrifera")}
else if (input$size_category == "Fish") {Fish_Sizes}
})
output$size_site_year <- renderUI({
if (input$size_site_radio == "One Site") {
Site_Selector_UI(id = "sizes")
}
else if (input$size_site_radio == "All Sites") {
tagList(
sliderInput(inputId = "size_year_slider", label = "Year:",
min = min(Size_Year_Species()$SurveyYear),
max = max(Size_Year_Species()$SurveyYear),
value = min(Size_Year_Species()$SurveyYear),
sep = "", step = 1, animate = TRUE),
h5("Animation Note: Animals with many measurements take a long time to plot. ",
"Plots are cached within a session. ",
"Run the animation once and allow all plots to complete (watch year in top left corner). ",
"Re-run to show smooth animation from cached plots.")
)
}
})
Size_Year_Species <- reactive({Size_Data() %>% dplyr::filter(CommonName == input$size_species)})
Site_Levels <- reactive({
if (input$size_year_slider < 2001) {Site_Info %>% dplyr::filter(SiteNumber < 17) %>% dplyr::arrange(Longitude)}
else if (input$size_year_slider > 2000 & input$size_year_slider < 2005) {
Site_Info %>% dplyr::filter(SiteNumber < 22) %>% dplyr::arrange(Longitude)}
else if (input$size_year_slider > 2004) {Site_Info %>% dplyr::arrange(Longitude)}
})
Size_Year_Data <- reactive({
Size_Year_Species() %>%
dplyr::filter(SurveyYear == input$size_year_slider) %>%
dplyr::mutate(SiteCode = factor(SiteCode, levels = Site_Levels()$SiteCode))
})
Size_Site_Data <- reactive(Size_Data() %>% dplyr::filter(SiteName == Site()$SiteName))
species_choice <- reactive({
if (input$size_site_radio == "One Site") {levels(factor(Size_Site_Data()$CommonName))}
else if (input$size_site_radio == "All Sites") {levels(factor(Size_Data()$CommonName))}
})
output$size_species_UI <- renderUI({
selectInput(inputId = "size_species", label = "Species:", choices = species_choice())
})
Size_Site_Data_Subset <- reactive({Size_Site_Data() %>% dplyr::filter(CommonName == input$size_species)})
output$size_site_plot <- renderPlot({
ggplot2::ggplot() +
ggplot2::geom_boxplot(data = Size_Site_Data_Subset(), width = 150,
aes(x = Date, y = Size, group = SurveyYear, color = CommonName)) +
ggplot2::geom_point(data = Size_Site_Data_Subset(), size = 1, color = "black",
aes(x = Date, y = Mean_Size, group = SurveyYear)) +
ggplot2::geom_label(data = Size_Site_Data_Subset(), size = 3, hjust = .5, vjust = 0,
aes(x = Date, y = -Inf, label = Size_Site_Data_Subset()$Total_Count)) +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::scale_y_continuous(limits = c(0, NA), expand = expansion(mult = c(0.1, 0))) +
ggplot2::scale_x_date(date_labels = "%Y", breaks = unique(Size_Site_Data_Subset()$Date), expand = expansion(mult = c(0.01, 0.01)),
limits = c(min(Size_Site_Data_Subset()$Date) - 150, max(Size_Site_Data_Subset()$Date) + 150)) +
ggplot2::labs(title = Size_Site_Data_Subset()$ScientificName,
subtitle = glue("{Size_Site_Data_Subset()$IslandName} {Size_Site_Data_Subset()$SiteName}"),
color = "Common Name", x = "Year", y = "Size Distribution") +
ggplot2::scale_color_manual(values = SpeciesColor, limits = force) +
Boxplot_theme()
}) %>%
shiny::bindCache(Size_Site_Data_Subset(), cache = cachem::cache_disk("./cache/sizes-cache"))
output$size_year_plot <- renderPlot({
ggplot2::ggplot() +
ggplot2::geom_boxplot(data = Size_Year_Data(), aes(x = SiteCode, y = Size, group = SiteCode, color = CommonName)) +
ggplot2::geom_point(data = Size_Year_Data(), size = 1, color = "black", aes(x = SiteCode, y = Mean_Size, group = SurveyYear)) +
ggplot2::geom_label(data = Size_Year_Data(), size = 3, hjust = .5, vjust = 0, aes(x = SiteCode, y = -Inf, label = Size_Year_Data()$Total_Count)) +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::scale_y_continuous(limits = c(0, NA), expand = expansion(mult = c(0.1, 0.01))) +
ggplot2::scale_x_discrete(drop = FALSE) +
ggplot2::labs(title = Size_Year_Data()$SurveyYear, color = "Common Name", x = NULL, y = "Size Distribution",
caption = "Sites arranged by longitude (west to east)") +
ggplot2::scale_color_manual(values = SpeciesColor, limits = force) +
Boxplot_theme()
}) %>%
shiny::bindCache(Size_Year_Data(), cache = cachem::cache_disk("./cache/sizes-cache"))
}
{ # ARMs ----
ARM_Data <- reactive({
ARM_Sizes
# %>%
# dplyr::filter(Size_mm == input$Size_Limit)
})
output$arm_site_year <- renderUI({
if (input$arm_site_radio == "One Site") {
selectInput(inputId = "ARM_Sites",
label = "Site:",
choices = dplyr::arrange(dplyr::filter(Site_Info, ARMs == T), Longitude)$SiteName)
}
else if (input$arm_site_radio == "All Sites") {
tagList(
sliderInput(inputId = "arm_year_slider", label = "Year:",
min = min(ARM_Year_Species()$SurveyYear),
max = max(ARM_Year_Species()$SurveyYear),
value = min(ARM_Year_Species()$SurveyYear),
sep = "", step = 1, animate = TRUE),
h5("Animation Note: Animals with many measurements take a long time to plot. ",
"Plots are cached within a session. ",
"Run the animation once and allow all plots to complete (watch year in top left corner). ",
"Re-run to show smooth animation from cached plots.")
)
}
})
ARM_Year_Species <- reactive({ARM_Data() %>% dplyr::filter(CommonName == input$arm_species)})
ARM_Site_Levels <- reactive({
if (input$arm_year_slider < 2001) {Site_Info %>% dplyr::filter(SiteNumber < 17) %>% dplyr::arrange(Longitude)}
else if (input$arm_year_slider > 2000 & input$arm_year_slider < 2005) {
Site_Info %>% dplyr::filter(SiteNumber < 22) %>% dplyr::arrange(Longitude)}
else if (input$arm_year_slider > 2004) {Site_Info %>% dplyr::arrange(Longitude)}
})
ARM_Size_Year_Data <- reactive({
ARM_Year_Species() %>%
dplyr::filter(SurveyYear == input$arm_year_slider) %>%
dplyr::mutate(SiteCode = factor(SiteCode, levels = ARM_Site_Levels()$SiteCode))
})
ARM_Size_Site_Data <- reactive(ARM_Data() %>% dplyr::filter(SiteName == input$ARM_Sites))
arm_species_choice <- reactive({
if (input$arm_site_radio == "One Site") {levels(factor(ARM_Size_Site_Data()$CommonName))}
else if (input$arm_site_radio == "All Sites") {levels(factor(ARM_Data()$CommonName))}
})
output$arm_species_UI <- renderUI({
selectInput(inputId = "arm_species", label = "Species:", choices = arm_species_choice())
})
ARM_Size_Site_Data_Subset <- reactive({ARM_Size_Site_Data() %>% dplyr::filter(CommonName == input$arm_species)})
output$arm_site_plot <- renderPlot({
ggplot2::ggplot() +
ggplot2::geom_boxplot(data = ARM_Size_Site_Data_Subset(), width = 150,
aes(x = Date, y = Size_mm, group = SurveyYear, color = CommonName)) +
ggplot2::geom_point(data = ARM_Size_Site_Data_Subset(), size = 1, color = "black",
aes(x = Date, y = Mean_Size, group = SurveyYear)) +
ggplot2::geom_label(data = ARM_Size_Site_Data_Subset(), size = 3, hjust = .5, vjust = 0,
aes(x = Date, y = -Inf, label = ARM_Size_Site_Data_Subset()$Total_Count)) +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::scale_y_continuous(limits = c(0, NA), expand = expansion(mult = c(0.1, 0))) +
ggplot2::scale_x_date(date_labels = "%Y", breaks = unique(ARM_Size_Site_Data_Subset()$Date), expand = expansion(mult = c(0.01, 0.01)),
limits = c(min(ARM_Size_Site_Data_Subset()$Date) - 150, max(ARM_Size_Site_Data_Subset()$Date) + 150)) +
ggplot2::labs(title = ARM_Size_Site_Data_Subset()$ScientificName,
subtitle = glue("{ARM_Size_Site_Data_Subset()$IslandName} {ARM_Size_Site_Data_Subset()$SiteName}"),
color = "Common Name", x = "Year", y = "Size Distribution") +
ggplot2::scale_color_manual(values = SpeciesColor, limits = force) +
Boxplot_theme()
}) %>%
shiny::bindCache(ARM_Size_Site_Data_Subset(), cache = cachem::cache_disk("./cache/sizes-cache"))
output$arm_year_plot <- renderPlot({
ggplot2::ggplot() +
ggplot2::geom_boxplot(data = ARM_Size_Year_Data(), aes(x = SiteCode, y = Size_mm, group = SiteCode, color = CommonName)) +
ggplot2::geom_point(data = ARM_Size_Year_Data(), size = 1, color = "black", aes(x = SiteCode, y = Mean_Size, group = SurveyYear)) +
ggplot2::geom_label(data = ARM_Size_Year_Data(), size = 3, hjust = .5, vjust = 0, aes(x = SiteCode, y = -Inf, label = ARM_Size_Year_Data()$Total_Count)) +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::scale_y_continuous(limits = c(0, NA), expand = expansion(mult = c(0.1, 0.01))) +
ggplot2::scale_x_discrete(drop = FALSE) +
ggplot2::labs(title = ARM_Size_Year_Data()$SurveyYear, color = "Common Name", x = NULL, y = "Size Distribution",
caption = "Sites arranged by longitude (west to east)") +
ggplot2::scale_color_manual(values = SpeciesColor, limits = force) +
Boxplot_theme()
}) %>%
shiny::bindCache(ARM_Size_Year_Data(), cache = cachem::cache_disk("./cache/sizes-cache"))
}
}
{ # Reports -----
output$Annual_Report <- renderUI({
tags$iframe(style="height:750px; width:100%; scrolling=yes", src = glue("Annual_Reports/{input$Report}.pdf"))
})
Text_Data <- reactive(Text %>% dplyr::filter(Year == input$Cloud))
output$cloud_plot <- renderPlot(bg = "black", {
wordcloud::wordcloud(
words = Text_Data()$word,
freq = Text_Data()$n, min.freq = 1, scale = c(4, .75),
max.words = input$cloud_n, random.order = FALSE, rot.per = 0.25,
colors = brewer.pal(8, "Dark2"))
}) %>%
shiny::bindCache(input$cloud_n, Text_Data(), cache = cachem::cache_disk("./cache/word-cache"))
output$Handbook <- renderUI({
tags$iframe(style="height:750px; width:100%; scrolling=yes", src = glue("Handbook/Full_Versions/{input$old_handy}.pdf"))
})
output$ReviewsOutput <- renderUI({
tags$iframe(style="height:750px; width:100%; scrolling=yes", src = glue("Handbook/Reviews/{input$reviews}.pdf"))
})
output$CollaborativeOutput <- renderUI({
tags$iframe(style="height:750px; width:100%; scrolling=yes", src = glue("Handbook/Collaborative_Reports/{input$collab}.pdf"))
})
}
}
# TODO add kelp and gorgonian species guide and protocol guide
# TODO add shell size frequency guides
|
#Descargando fichero
url<-"https://raw.githubusercontent.com/rafalab/dslabs/master/inst/extdata/murders.csv"
dest_file<-"data/murders.csv"
download.file(url,destfile = dest_file)
| /BjarDatos-DataR.R | no_license | Franciscopan/murders | R | false | false | 179 | r | #Descargando fichero
url<-"https://raw.githubusercontent.com/rafalab/dslabs/master/inst/extdata/murders.csv"
dest_file<-"data/murders.csv"
download.file(url,destfile = dest_file)
|
#============================ Diet =============================================
#------------------ available for SMH ---------- ------------------------------
library(gemini)
lib.pa()
rm(list = ls())
smh <- readg(smh, diet)
names(smh)
sum(duplicated(smh))
smh <- smh[!duplicated(smh)]
apply(smh, MARGIN = 2, FUN = function(x)sum(is.na(x)))
write.csv(smh, "H:/GEMINI/Data/SMH/Diet/smh.diet.csv", row.names = F, na = "")
msh <- readg(msh, diet)
apply(msh, 2, function(x)sum(is.na(x)))
| /unify_data/diet.R | no_license | yishan-guo/gemini | R | false | false | 490 | r | #============================ Diet =============================================
#------------------ available for SMH ---------- ------------------------------
library(gemini)
lib.pa()
rm(list = ls())
smh <- readg(smh, diet)
names(smh)
sum(duplicated(smh))
smh <- smh[!duplicated(smh)]
apply(smh, MARGIN = 2, FUN = function(x)sum(is.na(x)))
write.csv(smh, "H:/GEMINI/Data/SMH/Diet/smh.diet.csv", row.names = F, na = "")
msh <- readg(msh, diet)
apply(msh, 2, function(x)sum(is.na(x)))
|
rm(list = ls())
lambda.0 <- 6.5 ; l <- 2.5 ; alpha <- 0.1
round(c(lambda.0 = lambda.0,
x.L = qchisq(alpha / 2, 2 * lambda.0) / 2,
x.H = qchisq(1 - alpha / 2, 2 * (lambda.0 + 1)) / 2, l = l,
alpha = alpha / 2), 3)
(lower.p <- pchisq(2 * l, 2 * lambda.0))
(upper.p <- 1 - pchisq(2 * l, 2 * (lambda.0 + 1)))
p.value <- min(lower.p, upper.p)
round(c(lambda.0 = lambda.0, l = l, alpha = alpha / 2,
p.value = p.value), 3)
x <- seq(0, 40, length = 201)
# lower-tailed density
# openg(4.5, 2.5)
par(mfrow = c(1, 2))
plot(x, dchisq(x, 2 * lambda.0) / 2, type = 'l', ylab = 'density', axes = FALSE)
axis(2)
abline(h = 0)
# lower significance polygon
x.low <- seq(0, qchisq(alpha / 2, 2 * lambda.0) / 2, length = 201)
x.poly <- c(0, x.low, x.low[201], 0)
y.low <- dchisq(x.low, 2 * lambda.0) / 2
y.poly <- c(0, y.low, 0, 0)
polygon(x.poly, y.poly, col = 'gray90')
# p-value polygon
x.low <- seq(0, l, length = 201)
x.poly <- c(0, x.low, x.low[201], 0)
y.low <- dchisq(x.low, 2 * lambda.0) / 2
y.poly <- c(0, y.low, 0, 0)
polygon(x.poly, y.poly, col = 'black')
axis(1, pos = 0, at = c(0, l, 10, 20, 30, 40),
labels = c(0, expression(italic(l[0])), 10, 20, 30, 40))
# upper significance polygon
x.low <- seq(qchisq(1 - alpha / 2, 2 * (lambda.0 + 1)) / 2, 60, length = 201)
x.poly <- c(x.low[1], x.low[1], x.low, x.low[1])
y.low <- dchisq(x.low, 2 * (lambda.0 + 1)) / 2
y.poly <- c(0, y.low[1], y.low, 0)
polygon(x.poly, y.poly, col = 'gray90')
lines(x, dchisq(x, 2 * (lambda.0 + 1)) / 2, lty = 2)
# second panel
plot(x, dchisq(x, 2 * lambda.0) / 2, type = 'l', ylab = '',
xlim = c(1, 4.1), ylim = c(0, 0.0025), axes = FALSE)
axis(2)
abline(h = 0)
x.low <- seq(0, qchisq(alpha / 2, 2 * lambda.0) / 2, length = 201)
x.poly <- c(0, x.low, x.low[201], -0)
y.low <- dchisq(x.low, 2 * lambda.0) / 2
y.poly <- c(0, y.low, 0, 0)
polygon(x.poly, y.poly, col = 'gray90')
x.low <- seq(0, l, length = 201)
x.poly <- c(0, x.low, x.low[201], -0)
y.low <- dchisq(x.low, 2 * lambda.0) / 2
y.poly <- c(0, y.low, 0, 0)
polygon(x.poly, y.poly, col = 'black')
lines(x, dchisq(x, 2 * (lambda.0 + 1)) / 2, lty = 2)
axis(1, pos = 0, at = c(1, l, qchisq(alpha / 2, 2 * lambda.0) / 2, 4),
labels = c(1, expression(italic(l[0])), expression(italic(x[L])), 4))
# saveg('p-value-chi-square', 4.5, 2.5)
| /scripts/ch10/p-value-chi-square.r | no_license | StefanoCiotti/MyProgectsFirst | R | false | false | 2,295 | r | rm(list = ls())
lambda.0 <- 6.5 ; l <- 2.5 ; alpha <- 0.1
round(c(lambda.0 = lambda.0,
x.L = qchisq(alpha / 2, 2 * lambda.0) / 2,
x.H = qchisq(1 - alpha / 2, 2 * (lambda.0 + 1)) / 2, l = l,
alpha = alpha / 2), 3)
(lower.p <- pchisq(2 * l, 2 * lambda.0))
(upper.p <- 1 - pchisq(2 * l, 2 * (lambda.0 + 1)))
p.value <- min(lower.p, upper.p)
round(c(lambda.0 = lambda.0, l = l, alpha = alpha / 2,
p.value = p.value), 3)
x <- seq(0, 40, length = 201)
# lower-tailed density
# openg(4.5, 2.5)
par(mfrow = c(1, 2))
plot(x, dchisq(x, 2 * lambda.0) / 2, type = 'l', ylab = 'density', axes = FALSE)
axis(2)
abline(h = 0)
# lower significance polygon
x.low <- seq(0, qchisq(alpha / 2, 2 * lambda.0) / 2, length = 201)
x.poly <- c(0, x.low, x.low[201], 0)
y.low <- dchisq(x.low, 2 * lambda.0) / 2
y.poly <- c(0, y.low, 0, 0)
polygon(x.poly, y.poly, col = 'gray90')
# p-value polygon
x.low <- seq(0, l, length = 201)
x.poly <- c(0, x.low, x.low[201], 0)
y.low <- dchisq(x.low, 2 * lambda.0) / 2
y.poly <- c(0, y.low, 0, 0)
polygon(x.poly, y.poly, col = 'black')
axis(1, pos = 0, at = c(0, l, 10, 20, 30, 40),
labels = c(0, expression(italic(l[0])), 10, 20, 30, 40))
# upper significance polygon
x.low <- seq(qchisq(1 - alpha / 2, 2 * (lambda.0 + 1)) / 2, 60, length = 201)
x.poly <- c(x.low[1], x.low[1], x.low, x.low[1])
y.low <- dchisq(x.low, 2 * (lambda.0 + 1)) / 2
y.poly <- c(0, y.low[1], y.low, 0)
polygon(x.poly, y.poly, col = 'gray90')
lines(x, dchisq(x, 2 * (lambda.0 + 1)) / 2, lty = 2)
# second panel
plot(x, dchisq(x, 2 * lambda.0) / 2, type = 'l', ylab = '',
xlim = c(1, 4.1), ylim = c(0, 0.0025), axes = FALSE)
axis(2)
abline(h = 0)
x.low <- seq(0, qchisq(alpha / 2, 2 * lambda.0) / 2, length = 201)
x.poly <- c(0, x.low, x.low[201], -0)
y.low <- dchisq(x.low, 2 * lambda.0) / 2
y.poly <- c(0, y.low, 0, 0)
polygon(x.poly, y.poly, col = 'gray90')
x.low <- seq(0, l, length = 201)
x.poly <- c(0, x.low, x.low[201], -0)
y.low <- dchisq(x.low, 2 * lambda.0) / 2
y.poly <- c(0, y.low, 0, 0)
polygon(x.poly, y.poly, col = 'black')
lines(x, dchisq(x, 2 * (lambda.0 + 1)) / 2, lty = 2)
axis(1, pos = 0, at = c(1, l, qchisq(alpha / 2, 2 * lambda.0) / 2, 4),
labels = c(1, expression(italic(l[0])), expression(italic(x[L])), 4))
# saveg('p-value-chi-square', 4.5, 2.5)
|
## This code is to reproduce the graphs in the PDF presentations
setwd("/Users/ggmhf/Desktop/Teaching/Multilevel Short Course")
set.seed(999)
a <- 0
Xb <- rep(1:5, each=5)
Xw <- rep(-2:2, 5)/5
x <- Xw + Xb
u <- rep(rnorm(5), each=5)/5
e <- rnorm(25)
y <- a + Xb - Xw + u + e
summary(lm(y~x))
pdf("BW1.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, xlim=c(-1,6), ylim=c(-1,6), pch=19)
dev.off()
pdf("BW2.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, xlim=c(-1,6), ylim=c(-1,6), pch=19)
segments(x0=min(x), y0=min(range(predict(lm(y~x)))), x1=max(x), y1=max(range(predict(lm(y~x)))), lwd=2)
dev.off()
pdf("BW3.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
dev.off()
pdf("BW4.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW5.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
dev.off()
pdf("BW5b.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
dev.off()
pdf("BW5c.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19, type="n")
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
dev.off()
pdf("BW6.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
segments(x0=min(unique(Xb)), y0=min(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), x1=max(unique(Xb)), y1=max(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), lwd=2)
dev.off()
pdf("BW6b.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19, type="n")
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
segments(x0=min(unique(Xb)), y0=min(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), x1=max(unique(Xb)), y1=max(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), lwd=2)
dev.off()
pdf("BW62.pdf", height=4, width=9)
par(cex.lab=1.5)
par(cex.axis=1.5)
par(mfrow=c(1,2))
plot(x, y, xlim=c(-1,6), ylim=c(-1,6), pch=19)
segments(x0=min(x), y0=min(range(predict(lm(y~x)))), x1=max(x), y1=max(range(predict(lm(y~x)))), lwd=2)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
segments(x0=min(unique(Xb)), y0=min(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), x1=max(unique(Xb)), y1=max(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), lwd=2)
dev.off()
dat <- data.frame(a, Xb, Xw, x, u, e, y)
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))
means <- by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))
means <- data.frame(Xb=names(means), do.call(rbind, means))
names(means)[2:3] <- c("xM", "yM")
dat <- merge(dat, means)
dat$xD <- dat$x-dat$xM
dat$yD <- dat$y-dat$yM
summary(lm(y ~ x + as.factor(Xb), dat))
summary(lm(yD ~ xD, dat)) # same beta coefficient
pdf("BW7.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
abline(h=0, col="lightgrey")
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
dev.off()
pdf("BW8.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
abline(h=0, col="lightgrey")
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
by(dat, dat$Xb, function(Z) arrows(x0=min(Z$Xb), x1=min(Z$Xb), y0=min(Z$yM), y1=0, col=min(Z$Xb), lwd=2))
dev.off()
pdf("BW9.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(dat$x, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$x), mean(Z$yD)))), col=unique(Xb), pch=19, cex=2)
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$yD~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$yD~Z$x)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW10.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(dat$x, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$x), mean(Z$yD)))), col=unique(Xb), pch=19, cex=2)
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$yD~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$yD~Z$x)))), col=which(unique(Z$u)==unique(u))))
by(dat, dat$Xb, function(Z) arrows(x0=min(Z$Xb), x1=0, y0=(3-min(Z$Xb))/5, y1=(3-min(Z$Xb))/5, col=min(Z$Xb), lwd=2))
dev.off()
pdf("BW11.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(dat$xD, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x)-min(Z$Xb), y0=max(range(predict(lm(Z$yD~Z$x)))), x1=max(Z$x)-min(Z$Xb), y1=min(range(predict(lm(Z$yD~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$xD), mean(Z$yD)))), pch=19, cex=2)
dev.off()
pdf("BW12.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(dat$xD, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x)-min(Z$Xb), y0=max(range(predict(lm(Z$yD~Z$x)))), x1=max(Z$x)-min(Z$Xb), y1=min(range(predict(lm(Z$yD~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$xD), mean(Z$yD)))), pch=19, cex=2)
abline(a=0, b=-1.3614, lwd=2)
dev.off()
pdf("BW13.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(dat$xD, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$xD), mean(Z$yD)))), pch=19, cex=2)
abline(a=0, b=-1.3614, lwd=2)
dev.off()
pdf("BW132.pdf", height=4, width=9)
par(cex.lab=1.5)
par(cex.axis=1.5)
par(mfrow=c(1,2))
plot(dat$xD, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$xD), mean(Z$yD)))), pch=19, cex=2)
abline(a=0, b=-1.3614, lwd=2)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
abline(h=0, col="lightgrey")
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
segments(x0=min(unique(Xb)), y0=min(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), x1=max(unique(Xb)), y1=max(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), lwd=2)
dev.off()
pdf("BW14.pdf", height=4, width=9)
par(cex.lab=1.5)
par(cex.axis=1.5)
par(mfrow=c(1,2))
plot(dat$xD, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$xD), mean(Z$yD)))), pch=19, cex=2)
abline(a=0, b=-1.3614, lwd=2)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
abline(h=0, col="lightgrey")
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
segments(x0=min(unique(Xb)), y0=min(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), x1=max(unique(Xb)), y1=max(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), lwd=2)
abline(a=(3+1.3614*3), b=-1.3614, lwd=2)
dev.off()
# Trento Lecture 3
lm(yD~xD, dat)$coefficients
lm(y~x + as.factor(Xb), dat)$coefficients # coefficient on x matches...
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) lm(Z$y~Z$x)$coefficients)
colMeans(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) lm(Z$y~Z$x)$coefficients))) # MEAN coefficient matches
pdf("BW15.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x), y0=mean(Z$y)+max(range(predict(lm(dat$yD~dat$xD)))), x1=max(Z$x), y1=mean(Z$y)+min(range(predict(lm(dat$yD~dat$xD)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW16.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x), y0=mean(Z$y)+max(range(predict(lm(dat$yD~dat$xD)))), x1=max(Z$x), y1=mean(Z$y)+min(range(predict(lm(dat$yD~dat$xD)))), col=which(unique(Z$u)==unique(u))))
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW17.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(dat, dat$Xb, function(Z) abline(mean(Z$y)-lm(Z$yD~Z$xD)$coefficients[2]*Z$xM, lm(Z$yD~Z$xD)$coefficients[2], col=which(unique(Z$u)==unique(u))))
by(dat, dat$Xb, function(Z) abline(mean(Z$y)-lm(dat$yD~dat$xD)$coefficients[2]*Z$xM, lm(dat$yD~dat$xD)$coefficients[2], col=which(unique(Z$u)==unique(u))))
dev.off()
####### here we start a new series, illustrating random intercepts
set.seed(999)
u <- rep(rnorm(5), each=5)/5
e <- rnorm(25)
set.seed(123)
Xb <- rep(rep(1:5, each=5), 4)
Xw <- rep(rep(-2:2, 5)/5, 4)
x <- Xw + Xb
u <- c(u, rep(rnorm(15), each=5)/5)
e <- c(e, rnorm(75))
y <- a + Xb - Xw + u + e
grp <- rep(1:20, each=5)
dat <- data.frame(a, Xb, Xw, x, u, e, y, grp)
means <- by(dat, dat$grp, function(Z) c(mean(Z$x), mean(Z$y)))
means <- data.frame(grp=names(means), do.call(rbind, means))
names(means)[2:3] <- c("xM", "yM")
dat <- merge(dat, means)
dat$xD <- dat$x-dat$xM
dat$yD <- dat$y-dat$yM
palette(c(palette()[1:5], rainbow(15)))
pdf("BW18.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
dev.off()
pdf("BW19.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(y~x, Z), col=Z$grp))
dev.off()
mods <- do.call(rbind, by(dat, dat$grp, function(Z) lm(y ~ x, Z)$coefficients))
mods <- data.frame(mods, matrix(t(do.call(rbind, by(dat, dat$grp, function(Z) confint(lm(y ~ x, Z))))), ncol=4, byrow=T))
names(mods) <- c("Intercept.est", "Slope.est", "Intercept.lo", "Intercept.hi", "Slope.lo", "Slope.hi")
mods$col <- c(palette()[1:5], rainbow(15))
mods <- mods[order(mods$Intercept.est),]
pdf("BW20.pdf")
par(cex.lab=1.25)
par(cex.axis=1.25)
par(mfrow=c(1,2))
plot(mods$Intercept.est, 1:20, pch=19, type="n", xlim=c(min(mods$Intercept.est)-10, max(mods$Intercept.est)+10), ylab="Group", xlab="Intercept", axes=F)
box("plot")
axis(1)
abline(h=seq(20), col="lightgray", lwd=0.5)
abline(v=seq(-20, 30, 10), col="lightgray", lwd=0.5)
segments(y0=1:20, y1=1:20, x0=mods$Intercept.lo, x1=mods$Intercept.hi, lwd=2, col=mods$col)
points(mods$Intercept.est, 1:20, pch=19, col=mods$col)
plot(mods$Slope.est, 1:20, pch=19, type="n", xlim=c(min(mods$Slope.est)-2, max(mods$Slope.est)+2), ylab="", xlab="Slope", axes=F)
box("plot")
axis(1)
abline(h=seq(20), col="lightgray", lwd=0.5)
abline(v=seq(-6, 6, 2), col="lightgray", lwd=0.5)
segments(y0=1:20, y1=1:20, x0=mods$Slope.lo, x1=mods$Slope.hi, lwd=2, col=mods$col)
points(mods$Slope.est, 1:20, pch=19, col=mods$col)
dev.off()
pdf("BW21.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
abline(a=(3-with(dat, lm(y ~ x + as.factor(grp)))$coefficients[2]*3), b=with(dat, lm(y ~ x + as.factor(grp)))$coefficients[2], lwd=5)
dev.off()
pdf("BW22.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(y~x, Z), col=Z$grp))
abline(a=(3-with(dat, lm(y ~ x + as.factor(grp)))$coefficients[2]*3), b=with(dat, lm(y ~ x + as.factor(grp)))$coefficients[2], lwd=5)
dev.off()
dat$yMLM <- predict(lmer(y ~ xD + (1 | grp), dat))
pdf("BW23.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
dev.off()
pdf("BW24.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat[dat$grp<6,], plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
dev.off()
pdf("BW25.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat[dat$grp<6,], plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW26.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat[dat$grp<6,], plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) segments(x0=min(Z$x), y0=mean(Z$y)+max(range(predict(lm(dat$yD~dat$xD)))), x1=max(Z$x), y1=mean(Z$y)+min(range(predict(lm(dat$yD~dat$xD)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW27.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
abline(c(fixef(lmer(y ~ xD + (1 | grp), dat))[1]-3*fixef(lmer(y ~ xD + (1 | grp), dat))[2], fixef(lmer(y ~ xD + (1 | grp), dat))[2]), lwd=5)
dev.off()
pdf("BW28.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
by(dat, dat$grp, function(Z) points(mean(Z$x), mean(Z$y), col=Z$grp, pch=19, cex=2))
abline(c(fixef(lmer(y ~ xD + (1 | grp), dat))[1]-3*fixef(lmer(y ~ xD + (1 | grp), dat))[2], fixef(lmer(y ~ xD + (1 | grp), dat))[2]), lwd=5)
abline(fixef(lmer(y ~ xM + (1 | grp), dat)), lwd=5)
dev.off()
###### now show random slopes too
library(MASS)
a <- 0
set.seed(999)
u <- rep(rnorm(5), each=5)/5
e <- rnorm(25)
set.seed(123)
Xb <- rep(rep(1:5, each=5), 4)
Xw <- rep(rep(-2:2, 5)/5, 4)
x <- Xw + Xb
u <- c(u, rep(rnorm(15), each=5)/5)
e <- c(e, rnorm(75))
y <- a + Xb - Xw + u + e
grp <- rep(1:20, each=5)
dat <- data.frame(a, Xb, Xw, x, u, e, y, grp)
means <- by(dat, dat$grp, function(Z) c(mean(Z$x), mean(Z$y)))
means <- data.frame(grp=names(means), do.call(rbind, means))
names(means)[2:3] <- c("xM", "yM")
dat <- merge(dat, means)
dat$xD <- dat$x-dat$xM
dat$yD <- dat$y-dat$yM
set.seed(123)
U0U1 <- mvrnorm(n=20, mu=c(0,0), Sigma=matrix(c(0.2^2, 0.01, 0.01, 0.2^2), ncol=2)) # 0.5 correlation
dat <- data.frame(dat, U0U1[rep(1:nrow(U0U1), each=5),])
names(dat)[13:14] <- c("U0", "U1")
dat <- within(dat, y <- y - u + U0 + U1*Xw)
dat$yMLM <- predict(lmer(y ~ xD + (1 | grp), dat))
dat$yMLMs <- predict(lmer(y ~ xD + (xD | grp), dat))
palette(c(palette()[1:5], rainbow(15)))
pdf("BW29a.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
dev.off()
pdf("BW29.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(yMLMs ~ x, Z), col=Z$grp))
dev.off()
pdf("BW30.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat[dat$grp<6,], plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) abline(lm(yMLMs ~ x, Z), col=Z$grp))
dev.off()
pdf("BW31.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(y~x, Z), col=Z$grp))
dev.off()
pdf("BW32.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
dev.off()
pdf("BW33.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) points(mean(Z$x), mean(Z$y), col=Z$grp, pch=19, cex=2))
by(dat, dat$grp, function(Z) abline(lm(yMLMs ~ x, Z), col=Z$grp))
abline(fixef(lmer(y ~ xM + (1 | grp), dat)), lwd=5)
abline(c(fixef(lmer(y ~ xD + (1 | grp), dat))[1]-3*fixef(lmer(y ~ xD + (1 | grp), dat))[2], fixef(lmer(y ~ xD + (1 | grp), dat))[2]), lwd=5)
dev.off()
# fixed effects not only "zero out" the differences in the means of x and y, but also "zero out" the slopes...
# show next:
# random intercept models
# random slope models
# shrinkage of random intercepts
# shrinkage of random slopes
# look at SEs (how they change with random slopes)
# look at variances (think about number of parameters, and the variances)
# show pictures to illustrate... overall shrinkage (use Produc data?)
-> fit models with lmer, and MCMCglmm
# use the Produc data to show shrinkage
# then use EVS data to show what all this gets you (use GODIMP)
# add slides on the application... why AUTH, GODIMP?
# use MCMCglmm
# work with a binary outcome, and apply the binomial trick
| /Introduction_Multilevel/Longitudinal-Multilevel-short_course/Reproduce_PDF_graphics.R | no_license | cimentadaj/random-stuff | R | false | false | 19,748 | r | ## This code is to reproduce the graphs in the PDF presentations
setwd("/Users/ggmhf/Desktop/Teaching/Multilevel Short Course")
set.seed(999)
a <- 0
Xb <- rep(1:5, each=5)
Xw <- rep(-2:2, 5)/5
x <- Xw + Xb
u <- rep(rnorm(5), each=5)/5
e <- rnorm(25)
y <- a + Xb - Xw + u + e
summary(lm(y~x))
pdf("BW1.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, xlim=c(-1,6), ylim=c(-1,6), pch=19)
dev.off()
pdf("BW2.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, xlim=c(-1,6), ylim=c(-1,6), pch=19)
segments(x0=min(x), y0=min(range(predict(lm(y~x)))), x1=max(x), y1=max(range(predict(lm(y~x)))), lwd=2)
dev.off()
pdf("BW3.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
dev.off()
pdf("BW4.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW5.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
dev.off()
pdf("BW5b.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
dev.off()
pdf("BW5c.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19, type="n")
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
dev.off()
pdf("BW6.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
segments(x0=min(unique(Xb)), y0=min(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), x1=max(unique(Xb)), y1=max(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), lwd=2)
dev.off()
pdf("BW6b.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19, type="n")
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
segments(x0=min(unique(Xb)), y0=min(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), x1=max(unique(Xb)), y1=max(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), lwd=2)
dev.off()
pdf("BW62.pdf", height=4, width=9)
par(cex.lab=1.5)
par(cex.axis=1.5)
par(mfrow=c(1,2))
plot(x, y, xlim=c(-1,6), ylim=c(-1,6), pch=19)
segments(x0=min(x), y0=min(range(predict(lm(y~x)))), x1=max(x), y1=max(range(predict(lm(y~x)))), lwd=2)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
segments(x0=min(unique(Xb)), y0=min(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), x1=max(unique(Xb)), y1=max(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), lwd=2)
dev.off()
dat <- data.frame(a, Xb, Xw, x, u, e, y)
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))
means <- by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))
means <- data.frame(Xb=names(means), do.call(rbind, means))
names(means)[2:3] <- c("xM", "yM")
dat <- merge(dat, means)
dat$xD <- dat$x-dat$xM
dat$yD <- dat$y-dat$yM
summary(lm(y ~ x + as.factor(Xb), dat))
summary(lm(yD ~ xD, dat)) # same beta coefficient
pdf("BW7.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
abline(h=0, col="lightgrey")
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
dev.off()
pdf("BW8.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
abline(h=0, col="lightgrey")
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
by(dat, dat$Xb, function(Z) arrows(x0=min(Z$Xb), x1=min(Z$Xb), y0=min(Z$yM), y1=0, col=min(Z$Xb), lwd=2))
dev.off()
pdf("BW9.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(dat$x, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$x), mean(Z$yD)))), col=unique(Xb), pch=19, cex=2)
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$yD~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$yD~Z$x)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW10.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(dat$x, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$x), mean(Z$yD)))), col=unique(Xb), pch=19, cex=2)
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$yD~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$yD~Z$x)))), col=which(unique(Z$u)==unique(u))))
by(dat, dat$Xb, function(Z) arrows(x0=min(Z$Xb), x1=0, y0=(3-min(Z$Xb))/5, y1=(3-min(Z$Xb))/5, col=min(Z$Xb), lwd=2))
dev.off()
pdf("BW11.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(dat$xD, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x)-min(Z$Xb), y0=max(range(predict(lm(Z$yD~Z$x)))), x1=max(Z$x)-min(Z$Xb), y1=min(range(predict(lm(Z$yD~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$xD), mean(Z$yD)))), pch=19, cex=2)
dev.off()
pdf("BW12.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(dat$xD, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x)-min(Z$Xb), y0=max(range(predict(lm(Z$yD~Z$x)))), x1=max(Z$x)-min(Z$Xb), y1=min(range(predict(lm(Z$yD~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$xD), mean(Z$yD)))), pch=19, cex=2)
abline(a=0, b=-1.3614, lwd=2)
dev.off()
pdf("BW13.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(dat$xD, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$xD), mean(Z$yD)))), pch=19, cex=2)
abline(a=0, b=-1.3614, lwd=2)
dev.off()
pdf("BW132.pdf", height=4, width=9)
par(cex.lab=1.5)
par(cex.axis=1.5)
par(mfrow=c(1,2))
plot(dat$xD, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$xD), mean(Z$yD)))), pch=19, cex=2)
abline(a=0, b=-1.3614, lwd=2)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
abline(h=0, col="lightgrey")
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
segments(x0=min(unique(Xb)), y0=min(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), x1=max(unique(Xb)), y1=max(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), lwd=2)
dev.off()
pdf("BW14.pdf", height=4, width=9)
par(cex.lab=1.5)
par(cex.axis=1.5)
par(mfrow=c(1,2))
plot(dat$xD, dat$yD, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-3.5,3.5), pch=19, ylab="y", xlab="x")
abline(h=0, col="lightgrey")
points(do.call(rbind, by(dat, dat$Xb, function(Z) c(mean(Z$xD), mean(Z$yD)))), pch=19, cex=2)
abline(a=0, b=-1.3614, lwd=2)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
abline(h=0, col="lightgrey")
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
points(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) c(mean(Z$x), mean(Z$y)))), col=unique(Xb), pch=19, cex=2)
segments(x0=min(unique(Xb)), y0=min(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), x1=max(unique(Xb)), y1=max(predict(lm(as.numeric(by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) mean(Z$y)))~unique(Xb)))), lwd=2)
abline(a=(3+1.3614*3), b=-1.3614, lwd=2)
dev.off()
# Trento Lecture 3
lm(yD~xD, dat)$coefficients
lm(y~x + as.factor(Xb), dat)$coefficients # coefficient on x matches...
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) lm(Z$y~Z$x)$coefficients)
colMeans(do.call(rbind, by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) lm(Z$y~Z$x)$coefficients))) # MEAN coefficient matches
pdf("BW15.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x), y0=mean(Z$y)+max(range(predict(lm(dat$yD~dat$xD)))), x1=max(Z$x), y1=mean(Z$y)+min(range(predict(lm(dat$yD~dat$xD)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW16.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(dat, dat$Xb, function(Z) segments(x0=min(Z$x), y0=mean(Z$y)+max(range(predict(lm(dat$yD~dat$xD)))), x1=max(Z$x), y1=mean(Z$y)+min(range(predict(lm(dat$yD~dat$xD)))), col=which(unique(Z$u)==unique(u))))
by(data.frame(a, Xb, Xw, x, u, e, y), Xb, function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW17.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
plot(x, y, col=rep(1:5, each=5), xlim=c(-1,6), ylim=c(-1,6), pch=19)
by(dat, dat$Xb, function(Z) abline(mean(Z$y)-lm(Z$yD~Z$xD)$coefficients[2]*Z$xM, lm(Z$yD~Z$xD)$coefficients[2], col=which(unique(Z$u)==unique(u))))
by(dat, dat$Xb, function(Z) abline(mean(Z$y)-lm(dat$yD~dat$xD)$coefficients[2]*Z$xM, lm(dat$yD~dat$xD)$coefficients[2], col=which(unique(Z$u)==unique(u))))
dev.off()
####### here we start a new series, illustrating random intercepts
set.seed(999)
u <- rep(rnorm(5), each=5)/5
e <- rnorm(25)
set.seed(123)
Xb <- rep(rep(1:5, each=5), 4)
Xw <- rep(rep(-2:2, 5)/5, 4)
x <- Xw + Xb
u <- c(u, rep(rnorm(15), each=5)/5)
e <- c(e, rnorm(75))
y <- a + Xb - Xw + u + e
grp <- rep(1:20, each=5)
dat <- data.frame(a, Xb, Xw, x, u, e, y, grp)
means <- by(dat, dat$grp, function(Z) c(mean(Z$x), mean(Z$y)))
means <- data.frame(grp=names(means), do.call(rbind, means))
names(means)[2:3] <- c("xM", "yM")
dat <- merge(dat, means)
dat$xD <- dat$x-dat$xM
dat$yD <- dat$y-dat$yM
palette(c(palette()[1:5], rainbow(15)))
pdf("BW18.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
dev.off()
pdf("BW19.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(y~x, Z), col=Z$grp))
dev.off()
mods <- do.call(rbind, by(dat, dat$grp, function(Z) lm(y ~ x, Z)$coefficients))
mods <- data.frame(mods, matrix(t(do.call(rbind, by(dat, dat$grp, function(Z) confint(lm(y ~ x, Z))))), ncol=4, byrow=T))
names(mods) <- c("Intercept.est", "Slope.est", "Intercept.lo", "Intercept.hi", "Slope.lo", "Slope.hi")
mods$col <- c(palette()[1:5], rainbow(15))
mods <- mods[order(mods$Intercept.est),]
pdf("BW20.pdf")
par(cex.lab=1.25)
par(cex.axis=1.25)
par(mfrow=c(1,2))
plot(mods$Intercept.est, 1:20, pch=19, type="n", xlim=c(min(mods$Intercept.est)-10, max(mods$Intercept.est)+10), ylab="Group", xlab="Intercept", axes=F)
box("plot")
axis(1)
abline(h=seq(20), col="lightgray", lwd=0.5)
abline(v=seq(-20, 30, 10), col="lightgray", lwd=0.5)
segments(y0=1:20, y1=1:20, x0=mods$Intercept.lo, x1=mods$Intercept.hi, lwd=2, col=mods$col)
points(mods$Intercept.est, 1:20, pch=19, col=mods$col)
plot(mods$Slope.est, 1:20, pch=19, type="n", xlim=c(min(mods$Slope.est)-2, max(mods$Slope.est)+2), ylab="", xlab="Slope", axes=F)
box("plot")
axis(1)
abline(h=seq(20), col="lightgray", lwd=0.5)
abline(v=seq(-6, 6, 2), col="lightgray", lwd=0.5)
segments(y0=1:20, y1=1:20, x0=mods$Slope.lo, x1=mods$Slope.hi, lwd=2, col=mods$col)
points(mods$Slope.est, 1:20, pch=19, col=mods$col)
dev.off()
pdf("BW21.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
abline(a=(3-with(dat, lm(y ~ x + as.factor(grp)))$coefficients[2]*3), b=with(dat, lm(y ~ x + as.factor(grp)))$coefficients[2], lwd=5)
dev.off()
pdf("BW22.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(y~x, Z), col=Z$grp))
abline(a=(3-with(dat, lm(y ~ x + as.factor(grp)))$coefficients[2]*3), b=with(dat, lm(y ~ x + as.factor(grp)))$coefficients[2], lwd=5)
dev.off()
dat$yMLM <- predict(lmer(y ~ xD + (1 | grp), dat))
pdf("BW23.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
dev.off()
pdf("BW24.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat[dat$grp<6,], plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
dev.off()
pdf("BW25.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat[dat$grp<6,], plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) segments(x0=min(Z$x), y0=max(range(predict(lm(Z$y~Z$x)))), x1=max(Z$x), y1=min(range(predict(lm(Z$y~Z$x)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW26.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat[dat$grp<6,], plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) segments(x0=min(Z$x), y0=mean(Z$y)+max(range(predict(lm(dat$yD~dat$xD)))), x1=max(Z$x), y1=mean(Z$y)+min(range(predict(lm(dat$yD~dat$xD)))), col=which(unique(Z$u)==unique(u))))
dev.off()
pdf("BW27.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
abline(c(fixef(lmer(y ~ xD + (1 | grp), dat))[1]-3*fixef(lmer(y ~ xD + (1 | grp), dat))[2], fixef(lmer(y ~ xD + (1 | grp), dat))[2]), lwd=5)
dev.off()
pdf("BW28.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
by(dat, dat$grp, function(Z) points(mean(Z$x), mean(Z$y), col=Z$grp, pch=19, cex=2))
abline(c(fixef(lmer(y ~ xD + (1 | grp), dat))[1]-3*fixef(lmer(y ~ xD + (1 | grp), dat))[2], fixef(lmer(y ~ xD + (1 | grp), dat))[2]), lwd=5)
abline(fixef(lmer(y ~ xM + (1 | grp), dat)), lwd=5)
dev.off()
###### now show random slopes too
library(MASS)
a <- 0
set.seed(999)
u <- rep(rnorm(5), each=5)/5
e <- rnorm(25)
set.seed(123)
Xb <- rep(rep(1:5, each=5), 4)
Xw <- rep(rep(-2:2, 5)/5, 4)
x <- Xw + Xb
u <- c(u, rep(rnorm(15), each=5)/5)
e <- c(e, rnorm(75))
y <- a + Xb - Xw + u + e
grp <- rep(1:20, each=5)
dat <- data.frame(a, Xb, Xw, x, u, e, y, grp)
means <- by(dat, dat$grp, function(Z) c(mean(Z$x), mean(Z$y)))
means <- data.frame(grp=names(means), do.call(rbind, means))
names(means)[2:3] <- c("xM", "yM")
dat <- merge(dat, means)
dat$xD <- dat$x-dat$xM
dat$yD <- dat$y-dat$yM
set.seed(123)
U0U1 <- mvrnorm(n=20, mu=c(0,0), Sigma=matrix(c(0.2^2, 0.01, 0.01, 0.2^2), ncol=2)) # 0.5 correlation
dat <- data.frame(dat, U0U1[rep(1:nrow(U0U1), each=5),])
names(dat)[13:14] <- c("U0", "U1")
dat <- within(dat, y <- y - u + U0 + U1*Xw)
dat$yMLM <- predict(lmer(y ~ xD + (1 | grp), dat))
dat$yMLMs <- predict(lmer(y ~ xD + (xD | grp), dat))
palette(c(palette()[1:5], rainbow(15)))
pdf("BW29a.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
dev.off()
pdf("BW29.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(yMLMs ~ x, Z), col=Z$grp))
dev.off()
pdf("BW30.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat[dat$grp<6,], plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat[dat$grp<6,], dat$grp[dat$grp<6], function(Z) abline(lm(yMLMs ~ x, Z), col=Z$grp))
dev.off()
pdf("BW31.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(y~x, Z), col=Z$grp))
dev.off()
pdf("BW32.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) abline(lm(yMLM ~ x, Z), col=Z$grp))
dev.off()
pdf("BW33.pdf")
par(cex.lab=1.5)
par(cex.axis=1.5)
with(dat, plot(x, y, col=grp, xlim=c(-1,6), ylim=c(-1,6), pch=19))
by(dat, dat$grp, function(Z) points(mean(Z$x), mean(Z$y), col=Z$grp, pch=19, cex=2))
by(dat, dat$grp, function(Z) abline(lm(yMLMs ~ x, Z), col=Z$grp))
abline(fixef(lmer(y ~ xM + (1 | grp), dat)), lwd=5)
abline(c(fixef(lmer(y ~ xD + (1 | grp), dat))[1]-3*fixef(lmer(y ~ xD + (1 | grp), dat))[2], fixef(lmer(y ~ xD + (1 | grp), dat))[2]), lwd=5)
dev.off()
# fixed effects not only "zero out" the differences in the means of x and y, but also "zero out" the slopes...
# show next:
# random intercept models
# random slope models
# shrinkage of random intercepts
# shrinkage of random slopes
# look at SEs (how they change with random slopes)
# look at variances (think about number of parameters, and the variances)
# show pictures to illustrate... overall shrinkage (use Produc data?)
-> fit models with lmer, and MCMCglmm
# use the Produc data to show shrinkage
# then use EVS data to show what all this gets you (use GODIMP)
# add slides on the application... why AUTH, GODIMP?
# use MCMCglmm
# work with a binary outcome, and apply the binomial trick
|
# Load required libraries
library(ggplot2)
library(scales)
library(grid)
library(plyr)
library(lubridate)
library(zoo)
# Set working directory
setwd("D:/ClimData/SeaLevel")
# Read csv file
sl<-read.csv("rqd0138a.csv",header=FALSE)
# Rename columns
colnames(sl)<-c("year","month","day","sl_mm")
# Format date columns
sl$date <- as.Date(paste(sl$year,sl$month,sl$day),format="%Y%m%d")
sl$month <- as.numeric(format(sl$date,"%m"))
sl$year <- as.numeric(format(sl$date,"%Y"))
sl$monthf <- factor(sl$month,levels=as.character(1:12),labels=c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"),ordered=TRUE)
sl$mday <- strptime(sl$date, "%Y-%m-%d")$mday
sl$jday <- strptime(sl$date, "%Y-%m-%d")$yday+1
sl$daymth <- as.character(paste(sl$month,sl$day,sep="-"))
sl$daymth <-as.Date(sl$daymth,format="%m-%d")
# Classify data into seasons
sl$season <- "Season"
sl$season[sl$month == 1 & sl$mday >= 1 | sl$month == 2 & sl$mday <= 13| sl$month == 1]<-'Winter'
sl$season[sl$month == 2 & sl$mday >= 14 | sl$month == 4 & sl$mday <= 14 | sl$month == 3]<-'Spring'
sl$season[sl$month == 4 & sl$mday >= 15 | sl$month == 6 & sl$mday <= 14 | sl$month == 5]<-'Summer'
sl$season[sl$month == 6 & sl$mday >= 15 | sl$month == 8 & sl$mday <= 17 | sl$month == 7]<-'Monsoon'
sl$season[sl$month == 8 & sl$mday >= 18 | sl$month == 10 & sl$mday <= 18| sl$month == 9]<-'Autumn'
sl$season[sl$month == 10 & sl$mday >= 19 | sl$month == 12 & sl$mday <= 16| sl$month == 11]<-'Late Autumn'
sl$season[sl$month == 12 & sl$mday >= 17 | sl$month == 12 & sl$mday <= 31| sl$month == 1]<-'Winter'
sl$season = factor(sl$season, c("Winter", "Spring", "Summer", "Monsoon","Autumn","Late Autumn"))
## Plot Sea Level
hp_sl <- ggplot(sl, aes(date, sl_mm,colour=season))+
#geom_line(size=0.5)+
geom_point(shape=5,size=1)+
geom_smooth(method="lm",size=0.5,col="red")+
scale_x_date(name="\n\n\n Source: University of Hawaii Sea Level Centre / Bangladesh Inland Water Transport Authority (BIWTA) - 2014",labels=date_format("%Y"),breaks = date_breaks("2 years"))+
ylab("Milimetres (mm)\n")+
xlab("\nYear")+
theme_bw()+
ggtitle("Sea Level at Charchanga - Bangladesh (1980-2000)\n")+
theme(plot.title = element_text(lineheight=1.2, face="bold",size = 14, colour = "grey20"),
panel.border = element_rect(colour = "black",fill=F,size=1),
panel.grid.major = element_line(colour = "grey",size=0.25,linetype='longdash'),
panel.grid.minor = element_blank(),
axis.title.y=element_text(size=11,colour="grey20"),
axis.title.x=element_text(size=9,colour="grey20"),
panel.background = element_rect(fill = NA,colour = "black"))
hp_sl
# Get gradient and add to plot
m <- lm(sl_mm~year, data=sl )
ms <- summary(m)
slope <- coef(m)[2]
lg <- list(slope = format(slope, digits=3))
eq <- substitute(italic(Gradient)==slope,lg)
eqstr <-as.character(paste(as.expression(eq),"/year"))
hp_sl <- hp_sl + annotate(geom="text",as.Date(-Inf, origin = '1970-01-01'), y = Inf,
hjust = -0.1, vjust = 2, label = eqstr,parse = TRUE,size=3)
hp_sl
# Save plot to png
ggsave(hp_sl, file="Charchanga_SeaLevel_Plot_Seasons.png", width=10, height=6,dpi=400,unit="in",type="cairo")
# Code to produce html code of embedded sea level stations map using googleVis
# Load libraries
library(RCurl)
library(XML)
library(leafletR)
library(googleVis)
# Convert html table into data frame
theurl <- "http://uhslc.soest.hawaii.edu/data/download/rq"
tables <- readHTMLTable(theurl)
n.rows <- unlist(lapply(tables, function(t) dim(t)[1]))
tbl <- tables[[which.max(n.rows)]]
bgd.tbl <- subset(tbl, Country =="Bangladesh")
bgd.tbl$Latitude <- as.numeric(levels(bgd.tbl$Latitude)[bgd.tbl$Latitude])
bgd.tbl$Longitude <- as.numeric(levels(bgd.tbl$Longitude)[bgd.tbl$Longitude])
google.location <- paste(bgd.tbl$Latitude, bgd.tbl$Longitude, sep = ":")
stations.google <- data.frame(bgd.tbl, google.location)
# Plot map
map <- gvisMap(data = stations.google, locationvar = "google.location",tipvar = "Location",
options=list(showTip=TRUE, enableScrollWheel=TRUE,mapType='terrain', useMapTypeControl=TRUE,width=100,height=400,
icons=paste0("{","'default': {'normal': 'http://i.imgur.com/f3q6Oaj.gif',\n",
"'selected': 'http://i.imgur.com/f3q6Oaj.gif'","}}")))
plot(map) | /SLR_Bangladesh.r | no_license | jasonjb82/Jason-and-Doug-Blog | R | false | false | 4,564 | r | # Load required libraries
library(ggplot2)
library(scales)
library(grid)
library(plyr)
library(lubridate)
library(zoo)
# Set working directory
setwd("D:/ClimData/SeaLevel")
# Read csv file
sl<-read.csv("rqd0138a.csv",header=FALSE)
# Rename columns
colnames(sl)<-c("year","month","day","sl_mm")
# Format date columns
sl$date <- as.Date(paste(sl$year,sl$month,sl$day),format="%Y%m%d")
sl$month <- as.numeric(format(sl$date,"%m"))
sl$year <- as.numeric(format(sl$date,"%Y"))
sl$monthf <- factor(sl$month,levels=as.character(1:12),labels=c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"),ordered=TRUE)
sl$mday <- strptime(sl$date, "%Y-%m-%d")$mday
sl$jday <- strptime(sl$date, "%Y-%m-%d")$yday+1
sl$daymth <- as.character(paste(sl$month,sl$day,sep="-"))
sl$daymth <-as.Date(sl$daymth,format="%m-%d")
# Classify data into seasons
sl$season <- "Season"
sl$season[sl$month == 1 & sl$mday >= 1 | sl$month == 2 & sl$mday <= 13| sl$month == 1]<-'Winter'
sl$season[sl$month == 2 & sl$mday >= 14 | sl$month == 4 & sl$mday <= 14 | sl$month == 3]<-'Spring'
sl$season[sl$month == 4 & sl$mday >= 15 | sl$month == 6 & sl$mday <= 14 | sl$month == 5]<-'Summer'
sl$season[sl$month == 6 & sl$mday >= 15 | sl$month == 8 & sl$mday <= 17 | sl$month == 7]<-'Monsoon'
sl$season[sl$month == 8 & sl$mday >= 18 | sl$month == 10 & sl$mday <= 18| sl$month == 9]<-'Autumn'
sl$season[sl$month == 10 & sl$mday >= 19 | sl$month == 12 & sl$mday <= 16| sl$month == 11]<-'Late Autumn'
sl$season[sl$month == 12 & sl$mday >= 17 | sl$month == 12 & sl$mday <= 31| sl$month == 1]<-'Winter'
sl$season = factor(sl$season, c("Winter", "Spring", "Summer", "Monsoon","Autumn","Late Autumn"))
## Plot Sea Level
hp_sl <- ggplot(sl, aes(date, sl_mm,colour=season))+
#geom_line(size=0.5)+
geom_point(shape=5,size=1)+
geom_smooth(method="lm",size=0.5,col="red")+
scale_x_date(name="\n\n\n Source: University of Hawaii Sea Level Centre / Bangladesh Inland Water Transport Authority (BIWTA) - 2014",labels=date_format("%Y"),breaks = date_breaks("2 years"))+
ylab("Milimetres (mm)\n")+
xlab("\nYear")+
theme_bw()+
ggtitle("Sea Level at Charchanga - Bangladesh (1980-2000)\n")+
theme(plot.title = element_text(lineheight=1.2, face="bold",size = 14, colour = "grey20"),
panel.border = element_rect(colour = "black",fill=F,size=1),
panel.grid.major = element_line(colour = "grey",size=0.25,linetype='longdash'),
panel.grid.minor = element_blank(),
axis.title.y=element_text(size=11,colour="grey20"),
axis.title.x=element_text(size=9,colour="grey20"),
panel.background = element_rect(fill = NA,colour = "black"))
hp_sl
# Get gradient and add to plot
m <- lm(sl_mm~year, data=sl )
ms <- summary(m)
slope <- coef(m)[2]
lg <- list(slope = format(slope, digits=3))
eq <- substitute(italic(Gradient)==slope,lg)
eqstr <-as.character(paste(as.expression(eq),"/year"))
hp_sl <- hp_sl + annotate(geom="text",as.Date(-Inf, origin = '1970-01-01'), y = Inf,
hjust = -0.1, vjust = 2, label = eqstr,parse = TRUE,size=3)
hp_sl
# Save plot to png
ggsave(hp_sl, file="Charchanga_SeaLevel_Plot_Seasons.png", width=10, height=6,dpi=400,unit="in",type="cairo")
# Code to produce html code of embedded sea level stations map using googleVis
# Load libraries
library(RCurl)
library(XML)
library(leafletR)
library(googleVis)
# Convert html table into data frame
theurl <- "http://uhslc.soest.hawaii.edu/data/download/rq"
tables <- readHTMLTable(theurl)
n.rows <- unlist(lapply(tables, function(t) dim(t)[1]))
tbl <- tables[[which.max(n.rows)]]
bgd.tbl <- subset(tbl, Country =="Bangladesh")
bgd.tbl$Latitude <- as.numeric(levels(bgd.tbl$Latitude)[bgd.tbl$Latitude])
bgd.tbl$Longitude <- as.numeric(levels(bgd.tbl$Longitude)[bgd.tbl$Longitude])
google.location <- paste(bgd.tbl$Latitude, bgd.tbl$Longitude, sep = ":")
stations.google <- data.frame(bgd.tbl, google.location)
# Plot map
map <- gvisMap(data = stations.google, locationvar = "google.location",tipvar = "Location",
options=list(showTip=TRUE, enableScrollWheel=TRUE,mapType='terrain', useMapTypeControl=TRUE,width=100,height=400,
icons=paste0("{","'default': {'normal': 'http://i.imgur.com/f3q6Oaj.gif',\n",
"'selected': 'http://i.imgur.com/f3q6Oaj.gif'","}}")))
plot(map) |
library(here)
library(readr)
library(dplyr)
library(magrittr)
library(ggplot2)
library(readxl)
library(mikelaffr)
# OUTPUT ###############################################################################################################
dir.pdfs <- here("doc/qpcr/pdfs/")
dir.create(dir.pdfs, recursive = TRUE, showWarnings = FALSE)
# INPUT ################################################################################################################
# mRNA qPCR data from 13 June 2021
mRNA.data.xlsx <- here("results/qpcr/20210702_HNP_mRNA_repeat.xlsx")
# miRNA qPCR TaqMan data from 13 June 2021
miRNA.taqman.data.xlsx <- here("results/qpcr/20210613_HNP_miRNA_TaqMan.xlsx")
# additional mRNA qPCR data from 30 July 2021
mRNA.data2.xlsx <- here("results/qpcr/20210730_HNP_mRNA.xlsx")
# GLOBALS ##############################################################################################################
# Import mRNA Data #####################################################################################################
df.data <- read_xlsx(mRNA.data.xlsx, sheet = 3, range = "A46:O334", na = c("", "Undetermined"))
df.data %<>%
select(Sample = `Sample Name`,
Target = `Target Name`,
Well = `Well Position`,
CT)
df.data %<>%
mutate(Donor = sapply(strsplit(Sample, "_"), `[`, 1),
Day = sapply(strsplit(Sample, "_"), `[`, 2),
Expression = sapply(strsplit(Sample, "_"), `[`, 3),
Replicate = sapply(strsplit(Sample, "_"), `[`, 4))
df.data %<>%
select(Sample,
Donor,
Day,
Expression,
Replicate,
Well,
Target,
CT)
df.data$Expression <- factor(df.data$Expression,
levels = c("Control", "4707"),
labels = c("pTRIPZ-Control", "pTRIPZ-4707-C"),
ordered = TRUE)
df.data$Day <- factor(df.data$Day)
df.data$Donor <- factor(df.data$Donor)
df.data$Replicate <- factor(df.data$Replicate)
df.data %<>%
mutate(Name = paste(Donor, Expression, Day, Replicate))
# Calculate mean CT values across duplicates, only retain one row per sample/target pair
df.data %<>%
group_by(Sample, Target) %>%
mutate(CT_mean = mean(CT),
CT_sd = sd(CT)) %>%
select(-CT, -Well) %>%
distinct()
# Filter out outlier samples (determined after first pass analysis of data)
# df.data %<>%
# filter(!Sample == "D54_4707-A_3",
# !Sample == "D88_4707-C_2",
# !Sample == "D54_Control_1",
# !is.na(CT_mean))
# loop over samples, calculate delta CT to ACTB
samples <- unique(df.data$Sample)
df.new <- tibble()
for (sample in samples) {
df.tmp <- NULL
# filter for only this sample
df.data %>%
filter(Sample == sample) -> df.tmp
# get ACTB CT value for this sample
ct.actb <- df.tmp$CT_mean[match("ACTB", df.tmp$Target)]
# calculate delta CT within these samples
df.tmp %<>%
mutate(delta_CT_ACTB = CT_mean - ct.actb)
# combine with all data
df.new <- bind_rows(df.new, df.tmp)
}
df.data <- df.new
rm(df.new, df.tmp)
# loop over donors, calculate delta delta CT to pTRIPZ-Control of that donor for each target
donors <- unique(df.data$Donor)
days <- unique(df.data$Day)
targets <- unique(df.data$Target)
df.tripz <- tibble()
for (donor in donors) {
df.donor <- NULL
# filter for only this donor
df.data %>%
filter(Donor == donor) -> df.donor
for (day in days) {
df.day <- NULL
# filter for only this day
df.donor %>%
filter(Day == day) -> df.day
# loop over targets
for (target in targets) {
df.target <- NULL
# filter for only this target
df.day %>%
filter(Target == target) -> df.target
# use the mean control value across the replicates for delta delta ct
delta.ct.control <- mean(df.target$delta_CT_ACTB[which("pTRIPZ-Control" == df.target$Expression)])
# calculate delta delta CT within this target and donor
df.target %<>%
mutate(delta_delta_CT_ACTB = delta_CT_ACTB - delta.ct.control)
# calculate fold change
df.target %<>%
mutate(fold_change_ACTB = 2 ^ (-delta_delta_CT_ACTB))
# combine into all data
df.tripz <- bind_rows(df.tripz, df.target)
}
}
}
df.data <- df.tripz
rm(df.donor, df.target, df.day, df.tripz)
# Repeat for EIF4A2
# loop over samples, calculate delta CT to EIF4A2
samples <- unique(df.data$Sample)
df.new <- tibble()
for (sample in samples) {
df.tmp <- NULL
# filter for only this sample
df.data %>%
filter(Sample == sample) -> df.tmp
# get ACTB CT value for this sample
ct.eif4a2 <- df.tmp$CT_mean[match("EIF4A2", df.tmp$Target)]
# calculate delta CT within these samples
df.tmp %<>%
mutate(delta_CT_EIF4A2 = CT_mean - ct.eif4a2)
# combine with all data
df.new <- bind_rows(df.new, df.tmp)
}
df.data <- df.new
rm(df.new, df.tmp)
# loop over donors, calculate delta delta CT to pTRIPZ-Control of that donor for each target
donors <- unique(df.data$Donor)
days <- unique(df.data$Day)
targets <- unique(df.data$Target)
df.tripz <- tibble()
for (donor in donors) {
df.donor <- NULL
# filter for only this donor
df.data %>%
filter(Donor == donor) -> df.donor
for (day in days) {
df.day <- NULL
# filter for only this day
df.donor %>%
filter(Day == day) -> df.day
# loop over targets
for (target in targets) {
df.target <- NULL
# filter for only this target
df.day %>%
filter(Target == target) -> df.target
# use the mean control value across the replicates for delta delta ct
delta.ct.control <- mean(df.target$delta_CT_EIF4A2[which("pTRIPZ-Control" == df.target$Expression)])
# calculate delta delta CT within this target and donor
df.target %<>%
mutate(delta_delta_CT_EIF4A2 = delta_CT_EIF4A2 - delta.ct.control)
# calculate fold change
df.target %<>%
mutate(fold_change_EIF4A2 = 2 ^ (-delta_delta_CT_EIF4A2))
# combine into all data
df.tripz <- bind_rows(df.tripz, df.target)
}
}
}
df.data <- df.tripz
rm(df.donor, df.target, df.day, df.tripz)
# Import mRNA Data 2 ###################################################################################################
df.data2 <- read_xlsx(mRNA.data2.xlsx, sheet = 3, range = "A46:O334", na = c("", "Undetermined"))
df.data2 %<>%
select(Sample = `Sample Name`,
Target = `Target Name`,
Well = `Well Position`,
CT)
df.data2 %<>%
mutate(Donor = sapply(strsplit(Sample, "_"), `[`, 1),
Day = sapply(strsplit(Sample, "_"), `[`, 2),
Expression = sapply(strsplit(Sample, "_"), `[`, 3),
Replicate = sapply(strsplit(Sample, "_"), `[`, 4))
df.data2 %<>%
select(Sample,
Donor,
Day,
Expression,
Replicate,
Well,
Target,
CT)
df.data2$Expression <- factor(df.data2$Expression,
levels = c("Control", "4707"),
labels = c("pTRIPZ-Control", "pTRIPZ-4707-C"),
ordered = TRUE)
df.data2$Day <- factor(df.data2$Day)
df.data2$Donor <- factor(df.data2$Donor)
df.data2$Replicate <- factor(df.data2$Replicate)
df.data2 %<>%
mutate(Name = paste(Donor, Expression, Day, Replicate))
# Calculate mean CT values across duplicates, only retain one row per sample/target pair
df.data2 %<>%
group_by(Sample, Target) %>%
mutate(CT_mean = mean(CT),
CT_sd = sd(CT)) %>%
select(-CT, -Well) %>%
distinct()
# loop over samples, calculate delta CT to ACTB
samples <- unique(df.data2$Sample)
df.new <- tibble()
for (sample in samples) {
df.tmp <- NULL
# filter for only this sample
df.data2 %>%
filter(Sample == sample) -> df.tmp
# get ACTB CT value for this sample
ct.actb <- df.tmp$CT_mean[match("ACTB", df.tmp$Target)]
# calculate delta CT within these samples
df.tmp %<>%
mutate(delta_CT_ACTB = CT_mean - ct.actb)
# combine with all data
df.new <- bind_rows(df.new, df.tmp)
}
df.data2 <- df.new
rm(df.new, df.tmp)
# loop over donors, calculate delta delta CT to pTRIPZ-Control of that donor for each target
donors <- unique(df.data2$Donor)
days <- unique(df.data2$Day)
targets <- unique(df.data2$Target)
df.tripz <- tibble()
for (donor in donors) {
df.donor <- NULL
# filter for only this donor
df.data2 %>%
filter(Donor == donor) -> df.donor
for (day in days) {
df.day <- NULL
# filter for only this day
df.donor %>%
filter(Day == day) -> df.day
# loop over targets
for (target in targets) {
df.target <- NULL
# filter for only this target
df.day %>%
filter(Target == target) -> df.target
# use the mean control value across the replicates for delta delta ct
delta.ct.control <- mean(df.target$delta_CT_ACTB[which("pTRIPZ-Control" == df.target$Expression)])
# calculate delta delta CT within this target and donor
df.target %<>%
mutate(delta_delta_CT_ACTB = delta_CT_ACTB - delta.ct.control)
# calculate fold change
df.target %<>%
mutate(fold_change_ACTB = 2 ^ (-delta_delta_CT_ACTB))
# combine into all data
df.tripz <- bind_rows(df.tripz, df.target)
}
}
}
df.data2 <- df.tripz
rm(df.donor, df.target, df.day, df.tripz)
# Repeat for EIF4A2
# loop over samples, calculate delta CT to EIF4A2
samples <- unique(df.data$Sample)
df.new <- tibble()
for (sample in samples) {
df.tmp <- NULL
# filter for only this sample
df.data2 %>%
filter(Sample == sample) -> df.tmp
# get ACTB CT value for this sample
ct.eif4a2 <- df.tmp$CT_mean[match("EIF4A2", df.tmp$Target)]
# calculate delta CT within these samples
df.tmp %<>%
mutate(delta_CT_EIF4A2 = CT_mean - ct.eif4a2)
# combine with all data
df.new <- bind_rows(df.new, df.tmp)
}
df.data2 <- df.new
rm(df.new, df.tmp)
# loop over donors, calculate delta delta CT to pTRIPZ-Control of that donor for each target
donors <- unique(df.data2$Donor)
days <- unique(df.data2$Day)
targets <- unique(df.data2$Target)
df.tripz <- tibble()
for (donor in donors) {
df.donor <- NULL
# filter for only this donor
df.data2 %>%
filter(Donor == donor) -> df.donor
for (day in days) {
df.day <- NULL
# filter for only this day
df.donor %>%
filter(Day == day) -> df.day
# loop over targets
for (target in targets) {
df.target <- NULL
# filter for only this target
df.day %>%
filter(Target == target) -> df.target
# use the mean control value across the replicates for delta delta ct
delta.ct.control <- mean(df.target$delta_CT_EIF4A2[which("pTRIPZ-Control" == df.target$Expression)])
# calculate delta delta CT within this target and donor
df.target %<>%
mutate(delta_delta_CT_EIF4A2 = delta_CT_EIF4A2 - delta.ct.control)
# calculate fold change
df.target %<>%
mutate(fold_change_EIF4A2 = 2 ^ (-delta_delta_CT_EIF4A2))
# combine into all data
df.tripz <- bind_rows(df.tripz, df.target)
}
}
}
df.data2 <- df.tripz
rm(df.donor, df.target, df.day, df.tripz)
# Import TaqMan miRNA Data #############################################################################################
df.taqman <- read_xlsx(miRNA.taqman.data.xlsx, sheet = 3, range = "A45:O117", na = c("", "Undetermined"))
df.taqman %<>%
select(Sample = `Sample Name`,
Target = `Target Name`,
Well = `Well Position`,
CT)
df.taqman %<>%
mutate(Donor = sapply(strsplit(Sample, "_"), `[`, 1),
Day = sapply(strsplit(Sample, "_"), `[`, 2),
Expression = sapply(strsplit(Sample, "_"), `[`, 3),
Replicate = sapply(strsplit(Sample, "_"), `[`, 4))
df.taqman %<>%
select(Sample,
Donor,
Day,
Expression,
Replicate,
Well,
Target,
CT)
df.taqman$Expression <- factor(df.taqman$Expression,
levels = c("Control", "4707"),
labels = c("pTRIPZ-Control", "pTRIPZ-4707-C"),
ordered = TRUE)
df.taqman$Day <- factor(df.taqman$Day)
df.taqman$Donor <- factor(df.taqman$Donor)
df.taqman$Replicate <- factor(df.taqman$Replicate)
df.taqman %<>%
mutate(Name = paste(Donor, Expression, Day, Replicate))
# Calculate mean CT values across duplicates, only retain one row per sample/target pair
df.taqman %<>%
group_by(Sample, Target) %>%
mutate(CT_mean = mean(CT),
CT_sd = sd(CT)) %>%
select(-CT, -Well) %>%
distinct()
# loop over samples, calculate delta CT to miR-361
samples <- unique(df.taqman$Sample)
df.new <- tibble()
for (sample in samples) {
df.tmp <- NULL
# filter for only this sample
df.taqman %>%
filter(Sample == sample) -> df.tmp
# get miR-361 CT value for this sample
ct.361 <- df.tmp$CT_mean[match("miR-361-5p", df.tmp$Target)]
# calculate delta CT within these samples
df.tmp %<>%
mutate(delta_CT_miR361 = CT_mean - ct.361)
# combine with all data
df.new <- bind_rows(df.new, df.tmp)
}
df.taqman <- df.new
rm(df.new, df.tmp)
# pTRIPZ
# loop over donors, calculate delta delta CT to Control of that donor for each target
donors <- unique(df.taqman$Donor)
days <- unique(df.taqman$Day)
targets <- unique(df.taqman$Target)
df.ptripz <- tibble()
for (donor in donors) {
df.donor <- NULL
# filter for only this donor
df.taqman %>%
filter(Donor == donor) -> df.donor
for (day in days) {
df.day <- NULL
# filter for only this day
df.donor %>%
filter(Day == day) -> df.day
# loop over targets
for (target in targets) {
df.target <- NULL
# filter for only this target
df.day %>%
filter(Target == target) -> df.target
# use the mean control value across the replicates for delta delta ct
delta.ct.control <- mean(df.target$delta_CT_miR361[which("pTRIPZ-Control" == df.target$Expression)])
# calculate delta delta CT within this target and donor
df.target %<>%
mutate(delta_delta_CT_miR361 = delta_CT_miR361 - delta.ct.control)
# calculate fold change
df.target %<>%
mutate(fold_change_miR361 = 2 ^ (-delta_delta_CT_miR361))
# combine into all data
df.ptripz <- bind_rows(df.ptripz, df.target)
}
}
}
df.taqman <- df.ptripz
rm(df.donor, df.day, df.target, df.ptripz)
# Plot #################################################################################################################
# my.theme <- theme(axis.title = element_text(size = 16),
# axis.text = element_text(size = 14),
# axis.text.x = element_text(size = 12),
# title = element_text(size = 18),
# legend.title = element_text(size = 16),
# legend.text = element_text(size = 14),
# strip.text = element_text(size = 16),
# plot.caption = element_text(size = 12))
pdf(paste0(dir.pdfs, "20210802_extended_mRNA.pdf"), height = 8, width = 8)
df.taqman %>%
ggplot(aes(y = fold_change_miR361, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
#my.theme +
scale_color_manual(values = c("darkblue", "darkorange", "gray")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: miR-361",
title = "TaqMan: qPCR after HNP Lentivirus Transductions")
#ggsave(paste0(dir.pdfs, "D88_miRNA_fold_change.pdf"), height = 4, width = 7)
df.taqman %>%
ggplot(aes(y = CT_mean, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target) +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
#my.theme +
scale_color_manual(values = c("darkblue", "darkorange", "gray")) +
labs(y = "mean(CT)",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days.",
title = "TaqMan: qPCR after HNP Lentivirus Transductions")
#ggsave(paste0(dir.pdfs, "D88_miRNA_mean_ct.pdf"), height = 4, width = 7)
df.data %>%
ggplot(aes(y = fold_change_ACTB, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85), size = 0.6) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: ACTB",
title = "qPCR after HNP Lentivirus Transductions")
df.data2 %>%
ggplot(aes(y = fold_change_ACTB, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85), size = 0.6) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: ACTB",
title = "qPCR after HNP Lentivirus Transductions")
df.data %>%
filter(Target %in% c("ACTB", "EGFP", "EIF4A2")) %>%
ggplot(aes(y = fold_change_ACTB, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: ACTB",
title = "qPCR after HNP Lentivirus Transductions")
df.data %>%
filter(grepl("HAUS", Target) | grepl("Ki67", Target) | grepl("CCND1", Target)) %>%
ggplot(aes(y = fold_change_ACTB, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y", nrow = 1) +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: ACTB",
title = "qPCR after HNP Lentivirus Transductions")
#ggsave(paste0(dir.pdfs, "D88_fold_change_actb.pdf"), height = 5, width = 9)
df.data %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85), size = 0.6) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
df.data2 %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85), size = 0.6) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
df.data %>%
filter(Target %in% c("ACTB", "EGFP", "EIF4A2")) %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
df.data %>%
filter(grepl("HAUS", Target) | grepl("Ki67", Target) | grepl("CCND1", Target)) %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y", nrow = 1) +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
#ggsave(paste0(dir.pdfs, "D88_fold_change_eif4a2_repeat.pdf"), height = 5, width = 9)
df.data2 %>%
filter(grepl("Ki67", Target) | grepl("SOX2", Target) | grepl("DCX", Target) | grepl("TUJ1", Target)) %>%
mutate(Target = factor(Target, levels = c("ACTB", "EIF4A2", "Ki67", "SOX2", "DCX", "TUJ1", "PAX6", "TBR2"))) %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y", nrow = 1) +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
ggsave(paste0(dir.pdfs, "D88_fold_change_eif4a2_additional.pdf"), height = 5, width = 9)
df.data %>%
#filter(Target %in% c("ACTB", "EGFP", "EIF4A2")) %>%
ggplot(aes(y = CT_mean, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85)) +
#geom_errorbar(aes(ymin = CT_mean - CT_sd, ymax = CT_mean + CT_sd), position = position_dodge(width = 0.85), width = 0.3, lwd = 0.3) +
facet_wrap(~Target) +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "mean(CT)",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days.",
title = "qPCR after HNP Lentivirus Transductions")
df.data %>%
#filter(grepl("HAUS", Target) | grepl("Ki67", Target)) %>%
ggplot(aes(y = CT_mean, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3, outlier.shape = NA) +
geom_point(position = position_dodge(width = 0.85), size = 1) +
#geom_errorbar(aes(ymin = CT_mean - CT_sd, ymax = CT_mean + CT_sd), position = position_dodge(width = 0.85), width = 0.3, lwd = 0.3) +
facet_wrap(~Target, scales = "free_y") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "mean(CT)",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days.",
title = "qPCR after HNP Lentivirus Transductions")
df.data2 %>%
#filter(grepl("HAUS", Target) | grepl("Ki67", Target)) %>%
ggplot(aes(y = CT_mean, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3, outlier.shape = NA) +
geom_point(position = position_dodge(width = 0.85), size = 1) +
#geom_errorbar(aes(ymin = CT_mean - CT_sd, ymax = CT_mean + CT_sd), position = position_dodge(width = 0.85), width = 0.3, lwd = 0.3) +
facet_wrap(~Target, scales = "free_y") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "mean(CT)",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days.",
title = "qPCR after HNP Lentivirus Transductions")
#ggsave(paste0(dir.pdfs, "D88_ct_mean_repeat.pdf"), height = 8, width = 8)
#dev.off()
# Presentation ####################
library(ggpubr)
df.data %>%
filter(Target == "HAUS4_1" | Target == "CCND1") %>%
bind_rows(df.data2) -> df.data3
df.taqman %>%
select(Sample, Donor, Day, Expression, Replicate, Target, CT_mean, CT_sd,
fold_change_EIF4A2 = fold_change_miR361) %>%
bind_rows(df.data3) -> df.data4
df.data4 %<>%
mutate(Target = factor(Target, levels = c("ACTB", "EIF4A2", "miR-361-5p", "miR-4707-3p-C", "Ki67", "CCND1", "HAUS4_1", "PAX6", "SOX2", "DCX", "TBR2", "TUJ1")))
my_comparisons <- list(c("pTRIPZ-Control","pTRIPZ-4707-C"))
df.data4 %>%
filter(grepl("Ki67", Target) | grepl("SOX2", Target) | grepl("DCX", Target) | grepl("TUJ1", Target) | grepl("miR-4707-3p-C", Target) | grepl("PAX6", Target) | grepl("HAUS4_1", Target) | grepl("CCND1", Target)) %>%
#filter(Day == "Day8") %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, nrow = 1) +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c(paperBlue, paperRed)) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
#stat_compare_means(label.x=2, label.y=.25, size = 3, method = "t.test", label = "p.format")
df.data4 %>%
filter(grepl("Ki67", Target) | grepl("SOX2", Target) | grepl("DCX", Target) | grepl("TUJ1", Target) | grepl("miR-4707-3p-C", Target) | grepl("PAX6", Target) | grepl("HAUS4_1", Target) | grepl("CCND1", Target)) %>%
#filter(Day == "Day8") %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), outlier.shape = NA, size = 0.3) +
geom_point(position = position_dodge(width = 0.85), size = 0.3) +
facet_wrap(~Target, nrow = 1) +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1), legend.position = "bottom") +
plotTheme("figure") +
scale_color_manual(values = c(paperBlue, paperRed)) +
labs(y = "Fold Change",
x = "Day")
dir.pdfs <- here("doc/paper/figure6/pdfs/")
ggsave(paste0(dir.pdfs, "D88_fold_change_time-course.pdf"), height = 2.2, width = 6.1)
# Scratch ###########################
# nanodrop values
#df.nano <- read_xlsx(here("results/qpcr/20210611_RNA_extractions.xlsx"), sheet = 1)
| /doc/qpcr/20210730_HNP_mRNA_miRNA_taqman_plot_qpcr_data.R | no_license | mikelaff/mirna-eqtl-manuscript | R | false | false | 29,128 | r |
library(here)
library(readr)
library(dplyr)
library(magrittr)
library(ggplot2)
library(readxl)
library(mikelaffr)
# OUTPUT ###############################################################################################################
dir.pdfs <- here("doc/qpcr/pdfs/")
dir.create(dir.pdfs, recursive = TRUE, showWarnings = FALSE)
# INPUT ################################################################################################################
# mRNA qPCR data from 13 June 2021
mRNA.data.xlsx <- here("results/qpcr/20210702_HNP_mRNA_repeat.xlsx")
# miRNA qPCR TaqMan data from 13 June 2021
miRNA.taqman.data.xlsx <- here("results/qpcr/20210613_HNP_miRNA_TaqMan.xlsx")
# additional mRNA qPCR data from 30 July 2021
mRNA.data2.xlsx <- here("results/qpcr/20210730_HNP_mRNA.xlsx")
# GLOBALS ##############################################################################################################
# Import mRNA Data #####################################################################################################
df.data <- read_xlsx(mRNA.data.xlsx, sheet = 3, range = "A46:O334", na = c("", "Undetermined"))
df.data %<>%
select(Sample = `Sample Name`,
Target = `Target Name`,
Well = `Well Position`,
CT)
df.data %<>%
mutate(Donor = sapply(strsplit(Sample, "_"), `[`, 1),
Day = sapply(strsplit(Sample, "_"), `[`, 2),
Expression = sapply(strsplit(Sample, "_"), `[`, 3),
Replicate = sapply(strsplit(Sample, "_"), `[`, 4))
df.data %<>%
select(Sample,
Donor,
Day,
Expression,
Replicate,
Well,
Target,
CT)
df.data$Expression <- factor(df.data$Expression,
levels = c("Control", "4707"),
labels = c("pTRIPZ-Control", "pTRIPZ-4707-C"),
ordered = TRUE)
df.data$Day <- factor(df.data$Day)
df.data$Donor <- factor(df.data$Donor)
df.data$Replicate <- factor(df.data$Replicate)
df.data %<>%
mutate(Name = paste(Donor, Expression, Day, Replicate))
# Calculate mean CT values across duplicates, only retain one row per sample/target pair
df.data %<>%
group_by(Sample, Target) %>%
mutate(CT_mean = mean(CT),
CT_sd = sd(CT)) %>%
select(-CT, -Well) %>%
distinct()
# Filter out outlier samples (determined after first pass analysis of data)
# df.data %<>%
# filter(!Sample == "D54_4707-A_3",
# !Sample == "D88_4707-C_2",
# !Sample == "D54_Control_1",
# !is.na(CT_mean))
# loop over samples, calculate delta CT to ACTB
samples <- unique(df.data$Sample)
df.new <- tibble()
for (sample in samples) {
df.tmp <- NULL
# filter for only this sample
df.data %>%
filter(Sample == sample) -> df.tmp
# get ACTB CT value for this sample
ct.actb <- df.tmp$CT_mean[match("ACTB", df.tmp$Target)]
# calculate delta CT within these samples
df.tmp %<>%
mutate(delta_CT_ACTB = CT_mean - ct.actb)
# combine with all data
df.new <- bind_rows(df.new, df.tmp)
}
df.data <- df.new
rm(df.new, df.tmp)
# loop over donors, calculate delta delta CT to pTRIPZ-Control of that donor for each target
donors <- unique(df.data$Donor)
days <- unique(df.data$Day)
targets <- unique(df.data$Target)
df.tripz <- tibble()
for (donor in donors) {
df.donor <- NULL
# filter for only this donor
df.data %>%
filter(Donor == donor) -> df.donor
for (day in days) {
df.day <- NULL
# filter for only this day
df.donor %>%
filter(Day == day) -> df.day
# loop over targets
for (target in targets) {
df.target <- NULL
# filter for only this target
df.day %>%
filter(Target == target) -> df.target
# use the mean control value across the replicates for delta delta ct
delta.ct.control <- mean(df.target$delta_CT_ACTB[which("pTRIPZ-Control" == df.target$Expression)])
# calculate delta delta CT within this target and donor
df.target %<>%
mutate(delta_delta_CT_ACTB = delta_CT_ACTB - delta.ct.control)
# calculate fold change
df.target %<>%
mutate(fold_change_ACTB = 2 ^ (-delta_delta_CT_ACTB))
# combine into all data
df.tripz <- bind_rows(df.tripz, df.target)
}
}
}
df.data <- df.tripz
rm(df.donor, df.target, df.day, df.tripz)
# Repeat for EIF4A2
# loop over samples, calculate delta CT to EIF4A2
samples <- unique(df.data$Sample)
df.new <- tibble()
for (sample in samples) {
df.tmp <- NULL
# filter for only this sample
df.data %>%
filter(Sample == sample) -> df.tmp
# get ACTB CT value for this sample
ct.eif4a2 <- df.tmp$CT_mean[match("EIF4A2", df.tmp$Target)]
# calculate delta CT within these samples
df.tmp %<>%
mutate(delta_CT_EIF4A2 = CT_mean - ct.eif4a2)
# combine with all data
df.new <- bind_rows(df.new, df.tmp)
}
df.data <- df.new
rm(df.new, df.tmp)
# loop over donors, calculate delta delta CT to pTRIPZ-Control of that donor for each target
donors <- unique(df.data$Donor)
days <- unique(df.data$Day)
targets <- unique(df.data$Target)
df.tripz <- tibble()
for (donor in donors) {
df.donor <- NULL
# filter for only this donor
df.data %>%
filter(Donor == donor) -> df.donor
for (day in days) {
df.day <- NULL
# filter for only this day
df.donor %>%
filter(Day == day) -> df.day
# loop over targets
for (target in targets) {
df.target <- NULL
# filter for only this target
df.day %>%
filter(Target == target) -> df.target
# use the mean control value across the replicates for delta delta ct
delta.ct.control <- mean(df.target$delta_CT_EIF4A2[which("pTRIPZ-Control" == df.target$Expression)])
# calculate delta delta CT within this target and donor
df.target %<>%
mutate(delta_delta_CT_EIF4A2 = delta_CT_EIF4A2 - delta.ct.control)
# calculate fold change
df.target %<>%
mutate(fold_change_EIF4A2 = 2 ^ (-delta_delta_CT_EIF4A2))
# combine into all data
df.tripz <- bind_rows(df.tripz, df.target)
}
}
}
df.data <- df.tripz
rm(df.donor, df.target, df.day, df.tripz)
# Import mRNA Data 2 ###################################################################################################
df.data2 <- read_xlsx(mRNA.data2.xlsx, sheet = 3, range = "A46:O334", na = c("", "Undetermined"))
df.data2 %<>%
select(Sample = `Sample Name`,
Target = `Target Name`,
Well = `Well Position`,
CT)
df.data2 %<>%
mutate(Donor = sapply(strsplit(Sample, "_"), `[`, 1),
Day = sapply(strsplit(Sample, "_"), `[`, 2),
Expression = sapply(strsplit(Sample, "_"), `[`, 3),
Replicate = sapply(strsplit(Sample, "_"), `[`, 4))
df.data2 %<>%
select(Sample,
Donor,
Day,
Expression,
Replicate,
Well,
Target,
CT)
df.data2$Expression <- factor(df.data2$Expression,
levels = c("Control", "4707"),
labels = c("pTRIPZ-Control", "pTRIPZ-4707-C"),
ordered = TRUE)
df.data2$Day <- factor(df.data2$Day)
df.data2$Donor <- factor(df.data2$Donor)
df.data2$Replicate <- factor(df.data2$Replicate)
df.data2 %<>%
mutate(Name = paste(Donor, Expression, Day, Replicate))
# Calculate mean CT values across duplicates, only retain one row per sample/target pair
df.data2 %<>%
group_by(Sample, Target) %>%
mutate(CT_mean = mean(CT),
CT_sd = sd(CT)) %>%
select(-CT, -Well) %>%
distinct()
# loop over samples, calculate delta CT to ACTB
samples <- unique(df.data2$Sample)
df.new <- tibble()
for (sample in samples) {
df.tmp <- NULL
# filter for only this sample
df.data2 %>%
filter(Sample == sample) -> df.tmp
# get ACTB CT value for this sample
ct.actb <- df.tmp$CT_mean[match("ACTB", df.tmp$Target)]
# calculate delta CT within these samples
df.tmp %<>%
mutate(delta_CT_ACTB = CT_mean - ct.actb)
# combine with all data
df.new <- bind_rows(df.new, df.tmp)
}
df.data2 <- df.new
rm(df.new, df.tmp)
# loop over donors, calculate delta delta CT to pTRIPZ-Control of that donor for each target
donors <- unique(df.data2$Donor)
days <- unique(df.data2$Day)
targets <- unique(df.data2$Target)
df.tripz <- tibble()
for (donor in donors) {
df.donor <- NULL
# filter for only this donor
df.data2 %>%
filter(Donor == donor) -> df.donor
for (day in days) {
df.day <- NULL
# filter for only this day
df.donor %>%
filter(Day == day) -> df.day
# loop over targets
for (target in targets) {
df.target <- NULL
# filter for only this target
df.day %>%
filter(Target == target) -> df.target
# use the mean control value across the replicates for delta delta ct
delta.ct.control <- mean(df.target$delta_CT_ACTB[which("pTRIPZ-Control" == df.target$Expression)])
# calculate delta delta CT within this target and donor
df.target %<>%
mutate(delta_delta_CT_ACTB = delta_CT_ACTB - delta.ct.control)
# calculate fold change
df.target %<>%
mutate(fold_change_ACTB = 2 ^ (-delta_delta_CT_ACTB))
# combine into all data
df.tripz <- bind_rows(df.tripz, df.target)
}
}
}
df.data2 <- df.tripz
rm(df.donor, df.target, df.day, df.tripz)
# Repeat for EIF4A2
# loop over samples, calculate delta CT to EIF4A2
samples <- unique(df.data$Sample)
df.new <- tibble()
for (sample in samples) {
df.tmp <- NULL
# filter for only this sample
df.data2 %>%
filter(Sample == sample) -> df.tmp
# get ACTB CT value for this sample
ct.eif4a2 <- df.tmp$CT_mean[match("EIF4A2", df.tmp$Target)]
# calculate delta CT within these samples
df.tmp %<>%
mutate(delta_CT_EIF4A2 = CT_mean - ct.eif4a2)
# combine with all data
df.new <- bind_rows(df.new, df.tmp)
}
df.data2 <- df.new
rm(df.new, df.tmp)
# loop over donors, calculate delta delta CT to pTRIPZ-Control of that donor for each target
donors <- unique(df.data2$Donor)
days <- unique(df.data2$Day)
targets <- unique(df.data2$Target)
df.tripz <- tibble()
for (donor in donors) {
df.donor <- NULL
# filter for only this donor
df.data2 %>%
filter(Donor == donor) -> df.donor
for (day in days) {
df.day <- NULL
# filter for only this day
df.donor %>%
filter(Day == day) -> df.day
# loop over targets
for (target in targets) {
df.target <- NULL
# filter for only this target
df.day %>%
filter(Target == target) -> df.target
# use the mean control value across the replicates for delta delta ct
delta.ct.control <- mean(df.target$delta_CT_EIF4A2[which("pTRIPZ-Control" == df.target$Expression)])
# calculate delta delta CT within this target and donor
df.target %<>%
mutate(delta_delta_CT_EIF4A2 = delta_CT_EIF4A2 - delta.ct.control)
# calculate fold change
df.target %<>%
mutate(fold_change_EIF4A2 = 2 ^ (-delta_delta_CT_EIF4A2))
# combine into all data
df.tripz <- bind_rows(df.tripz, df.target)
}
}
}
df.data2 <- df.tripz
rm(df.donor, df.target, df.day, df.tripz)
# Import TaqMan miRNA Data #############################################################################################
df.taqman <- read_xlsx(miRNA.taqman.data.xlsx, sheet = 3, range = "A45:O117", na = c("", "Undetermined"))
df.taqman %<>%
select(Sample = `Sample Name`,
Target = `Target Name`,
Well = `Well Position`,
CT)
df.taqman %<>%
mutate(Donor = sapply(strsplit(Sample, "_"), `[`, 1),
Day = sapply(strsplit(Sample, "_"), `[`, 2),
Expression = sapply(strsplit(Sample, "_"), `[`, 3),
Replicate = sapply(strsplit(Sample, "_"), `[`, 4))
df.taqman %<>%
select(Sample,
Donor,
Day,
Expression,
Replicate,
Well,
Target,
CT)
df.taqman$Expression <- factor(df.taqman$Expression,
levels = c("Control", "4707"),
labels = c("pTRIPZ-Control", "pTRIPZ-4707-C"),
ordered = TRUE)
df.taqman$Day <- factor(df.taqman$Day)
df.taqman$Donor <- factor(df.taqman$Donor)
df.taqman$Replicate <- factor(df.taqman$Replicate)
df.taqman %<>%
mutate(Name = paste(Donor, Expression, Day, Replicate))
# Calculate mean CT values across duplicates, only retain one row per sample/target pair
df.taqman %<>%
group_by(Sample, Target) %>%
mutate(CT_mean = mean(CT),
CT_sd = sd(CT)) %>%
select(-CT, -Well) %>%
distinct()
# loop over samples, calculate delta CT to miR-361
samples <- unique(df.taqman$Sample)
df.new <- tibble()
for (sample in samples) {
df.tmp <- NULL
# filter for only this sample
df.taqman %>%
filter(Sample == sample) -> df.tmp
# get miR-361 CT value for this sample
ct.361 <- df.tmp$CT_mean[match("miR-361-5p", df.tmp$Target)]
# calculate delta CT within these samples
df.tmp %<>%
mutate(delta_CT_miR361 = CT_mean - ct.361)
# combine with all data
df.new <- bind_rows(df.new, df.tmp)
}
df.taqman <- df.new
rm(df.new, df.tmp)
# pTRIPZ
# loop over donors, calculate delta delta CT to Control of that donor for each target
donors <- unique(df.taqman$Donor)
days <- unique(df.taqman$Day)
targets <- unique(df.taqman$Target)
df.ptripz <- tibble()
for (donor in donors) {
df.donor <- NULL
# filter for only this donor
df.taqman %>%
filter(Donor == donor) -> df.donor
for (day in days) {
df.day <- NULL
# filter for only this day
df.donor %>%
filter(Day == day) -> df.day
# loop over targets
for (target in targets) {
df.target <- NULL
# filter for only this target
df.day %>%
filter(Target == target) -> df.target
# use the mean control value across the replicates for delta delta ct
delta.ct.control <- mean(df.target$delta_CT_miR361[which("pTRIPZ-Control" == df.target$Expression)])
# calculate delta delta CT within this target and donor
df.target %<>%
mutate(delta_delta_CT_miR361 = delta_CT_miR361 - delta.ct.control)
# calculate fold change
df.target %<>%
mutate(fold_change_miR361 = 2 ^ (-delta_delta_CT_miR361))
# combine into all data
df.ptripz <- bind_rows(df.ptripz, df.target)
}
}
}
df.taqman <- df.ptripz
rm(df.donor, df.day, df.target, df.ptripz)
# Plot #################################################################################################################
# my.theme <- theme(axis.title = element_text(size = 16),
# axis.text = element_text(size = 14),
# axis.text.x = element_text(size = 12),
# title = element_text(size = 18),
# legend.title = element_text(size = 16),
# legend.text = element_text(size = 14),
# strip.text = element_text(size = 16),
# plot.caption = element_text(size = 12))
pdf(paste0(dir.pdfs, "20210802_extended_mRNA.pdf"), height = 8, width = 8)
df.taqman %>%
ggplot(aes(y = fold_change_miR361, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
#my.theme +
scale_color_manual(values = c("darkblue", "darkorange", "gray")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: miR-361",
title = "TaqMan: qPCR after HNP Lentivirus Transductions")
#ggsave(paste0(dir.pdfs, "D88_miRNA_fold_change.pdf"), height = 4, width = 7)
df.taqman %>%
ggplot(aes(y = CT_mean, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target) +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
#my.theme +
scale_color_manual(values = c("darkblue", "darkorange", "gray")) +
labs(y = "mean(CT)",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days.",
title = "TaqMan: qPCR after HNP Lentivirus Transductions")
#ggsave(paste0(dir.pdfs, "D88_miRNA_mean_ct.pdf"), height = 4, width = 7)
df.data %>%
ggplot(aes(y = fold_change_ACTB, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85), size = 0.6) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: ACTB",
title = "qPCR after HNP Lentivirus Transductions")
df.data2 %>%
ggplot(aes(y = fold_change_ACTB, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85), size = 0.6) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: ACTB",
title = "qPCR after HNP Lentivirus Transductions")
df.data %>%
filter(Target %in% c("ACTB", "EGFP", "EIF4A2")) %>%
ggplot(aes(y = fold_change_ACTB, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: ACTB",
title = "qPCR after HNP Lentivirus Transductions")
df.data %>%
filter(grepl("HAUS", Target) | grepl("Ki67", Target) | grepl("CCND1", Target)) %>%
ggplot(aes(y = fold_change_ACTB, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y", nrow = 1) +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: ACTB",
title = "qPCR after HNP Lentivirus Transductions")
#ggsave(paste0(dir.pdfs, "D88_fold_change_actb.pdf"), height = 5, width = 9)
df.data %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85), size = 0.6) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
df.data2 %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85), size = 0.6) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
df.data %>%
filter(Target %in% c("ACTB", "EGFP", "EIF4A2")) %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y") +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
df.data %>%
filter(grepl("HAUS", Target) | grepl("Ki67", Target) | grepl("CCND1", Target)) %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y", nrow = 1) +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
#ggsave(paste0(dir.pdfs, "D88_fold_change_eif4a2_repeat.pdf"), height = 5, width = 9)
df.data2 %>%
filter(grepl("Ki67", Target) | grepl("SOX2", Target) | grepl("DCX", Target) | grepl("TUJ1", Target)) %>%
mutate(Target = factor(Target, levels = c("ACTB", "EIF4A2", "Ki67", "SOX2", "DCX", "TUJ1", "PAX6", "TBR2"))) %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, scales = "free_y", nrow = 1) +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
ggsave(paste0(dir.pdfs, "D88_fold_change_eif4a2_additional.pdf"), height = 5, width = 9)
df.data %>%
#filter(Target %in% c("ACTB", "EGFP", "EIF4A2")) %>%
ggplot(aes(y = CT_mean, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3) +
geom_point(position = position_dodge(width = 0.85)) +
#geom_errorbar(aes(ymin = CT_mean - CT_sd, ymax = CT_mean + CT_sd), position = position_dodge(width = 0.85), width = 0.3, lwd = 0.3) +
facet_wrap(~Target) +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "mean(CT)",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days.",
title = "qPCR after HNP Lentivirus Transductions")
df.data %>%
#filter(grepl("HAUS", Target) | grepl("Ki67", Target)) %>%
ggplot(aes(y = CT_mean, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3, outlier.shape = NA) +
geom_point(position = position_dodge(width = 0.85), size = 1) +
#geom_errorbar(aes(ymin = CT_mean - CT_sd, ymax = CT_mean + CT_sd), position = position_dodge(width = 0.85), width = 0.3, lwd = 0.3) +
facet_wrap(~Target, scales = "free_y") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "mean(CT)",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days.",
title = "qPCR after HNP Lentivirus Transductions")
df.data2 %>%
#filter(grepl("HAUS", Target) | grepl("Ki67", Target)) %>%
ggplot(aes(y = CT_mean, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), lwd = 0.3, outlier.shape = NA) +
geom_point(position = position_dodge(width = 0.85), size = 1) +
#geom_errorbar(aes(ymin = CT_mean - CT_sd, ymax = CT_mean + CT_sd), position = position_dodge(width = 0.85), width = 0.3, lwd = 0.3) +
facet_wrap(~Target, scales = "free_y") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c("darkblue", "darkorange")) +
labs(y = "mean(CT)",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days.",
title = "qPCR after HNP Lentivirus Transductions")
#ggsave(paste0(dir.pdfs, "D88_ct_mean_repeat.pdf"), height = 8, width = 8)
#dev.off()
# Presentation ####################
library(ggpubr)
df.data %>%
filter(Target == "HAUS4_1" | Target == "CCND1") %>%
bind_rows(df.data2) -> df.data3
df.taqman %>%
select(Sample, Donor, Day, Expression, Replicate, Target, CT_mean, CT_sd,
fold_change_EIF4A2 = fold_change_miR361) %>%
bind_rows(df.data3) -> df.data4
df.data4 %<>%
mutate(Target = factor(Target, levels = c("ACTB", "EIF4A2", "miR-361-5p", "miR-4707-3p-C", "Ki67", "CCND1", "HAUS4_1", "PAX6", "SOX2", "DCX", "TBR2", "TUJ1")))
my_comparisons <- list(c("pTRIPZ-Control","pTRIPZ-4707-C"))
df.data4 %>%
filter(grepl("Ki67", Target) | grepl("SOX2", Target) | grepl("DCX", Target) | grepl("TUJ1", Target) | grepl("miR-4707-3p-C", Target) | grepl("PAX6", Target) | grepl("HAUS4_1", Target) | grepl("CCND1", Target)) %>%
#filter(Day == "Day8") %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85)) +
geom_point(position = position_dodge(width = 0.85)) +
facet_wrap(~Target, nrow = 1) +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
scale_color_manual(values = c(paperBlue, paperRed)) +
labs(y = "Fold Change",
x = "Day",
caption = "Donor 88. HNP expression after 4-8 days. Endogenous Control: EIF4A2",
title = "qPCR after HNP Lentivirus Transductions")
#stat_compare_means(label.x=2, label.y=.25, size = 3, method = "t.test", label = "p.format")
df.data4 %>%
filter(grepl("Ki67", Target) | grepl("SOX2", Target) | grepl("DCX", Target) | grepl("TUJ1", Target) | grepl("miR-4707-3p-C", Target) | grepl("PAX6", Target) | grepl("HAUS4_1", Target) | grepl("CCND1", Target)) %>%
#filter(Day == "Day8") %>%
ggplot(aes(y = fold_change_EIF4A2, x = Day, color = Expression)) +
geom_boxplot(position = position_dodge(width = 0.85), outlier.shape = NA, size = 0.3) +
geom_point(position = position_dodge(width = 0.85), size = 0.3) +
facet_wrap(~Target, nrow = 1) +
geom_hline(yintercept = 1, linetype = "dashed") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1), legend.position = "bottom") +
plotTheme("figure") +
scale_color_manual(values = c(paperBlue, paperRed)) +
labs(y = "Fold Change",
x = "Day")
dir.pdfs <- here("doc/paper/figure6/pdfs/")
ggsave(paste0(dir.pdfs, "D88_fold_change_time-course.pdf"), height = 2.2, width = 6.1)
# Scratch ###########################
# nanodrop values
#df.nano <- read_xlsx(here("results/qpcr/20210611_RNA_extractions.xlsx"), sheet = 1)
|
# This function returns a list composed of a matrix of NAs called Inverse and functions that
# get:() returns the content of the matrix x submitted as argument of makeCacheMatrix
# (set() of the original piece of code was not included because it was useless)
# set_inverse() assigns the inverse of the matrix to the Inverse matrix using the special operator <<-
# because Inverse is outside the environemnt of the function
# get_inverse() returns the matrix Inverse
makeCacheMatrix <- function(x) {
Inverse <- matrix (nrow = nrow(x), ncol = ncol(x))
get <- function() x
set_inverse <- function(Invs) {
Inverse <<- Invs
}
get_inverse <- function() {
Inverse
}
list(get = get, set_inverse = set_inverse,
get_inverse = get_inverse)
}
# This function returns the inverse of a matrix which was previously submitted as argument to the makeCacheMatrix function.
# The argument for this function is a list returned by makeCacheMatrix().
# The function first retrieves the matrix called Inverse from the list returned with makeCacheMatrix() using the function get_inverse of that list
# It then checks if the first element is NA and if not prints 'Getting inverse of matrix' and returns the matrix (and the following code is not executed)
# In case the first element of the matrix is actually an NA (meaning that the inverse of the matrix has not yet been computed), the function retrieves
# the content of the matrix originally submitted to makeCacheMatrix() and assigns it to object M. It then assigns the inverse of M to Inverse.
# It then assigns the object Inverse (in the scope of the function) to the Inverse object of the list submitted as argument, and finally returns the Inverse object.
cacheSolve <- function(x) {
Inverse <- x$get_inverse()
if(!is.na(Inverse[1,1])) {
print("Getting inverse of matrix")
return(Inverse)
}
M <- x$get()
Inverse <- solve(M)
x$set_inverse(Inverse)
Inverse
} | /cachematrix.R | no_license | ericmayor/ProgrammingAssignment2 | R | false | false | 1,919 | r | # This function returns a list composed of a matrix of NAs called Inverse and functions that
# get:() returns the content of the matrix x submitted as argument of makeCacheMatrix
# (set() of the original piece of code was not included because it was useless)
# set_inverse() assigns the inverse of the matrix to the Inverse matrix using the special operator <<-
# because Inverse is outside the environemnt of the function
# get_inverse() returns the matrix Inverse
makeCacheMatrix <- function(x) {
Inverse <- matrix (nrow = nrow(x), ncol = ncol(x))
get <- function() x
set_inverse <- function(Invs) {
Inverse <<- Invs
}
get_inverse <- function() {
Inverse
}
list(get = get, set_inverse = set_inverse,
get_inverse = get_inverse)
}
# This function returns the inverse of a matrix which was previously submitted as argument to the makeCacheMatrix function.
# The argument for this function is a list returned by makeCacheMatrix().
# The function first retrieves the matrix called Inverse from the list returned with makeCacheMatrix() using the function get_inverse of that list
# It then checks if the first element is NA and if not prints 'Getting inverse of matrix' and returns the matrix (and the following code is not executed)
# In case the first element of the matrix is actually an NA (meaning that the inverse of the matrix has not yet been computed), the function retrieves
# the content of the matrix originally submitted to makeCacheMatrix() and assigns it to object M. It then assigns the inverse of M to Inverse.
# It then assigns the object Inverse (in the scope of the function) to the Inverse object of the list submitted as argument, and finally returns the Inverse object.
cacheSolve <- function(x) {
Inverse <- x$get_inverse()
if(!is.na(Inverse[1,1])) {
print("Getting inverse of matrix")
return(Inverse)
}
M <- x$get()
Inverse <- solve(M)
x$set_inverse(Inverse)
Inverse
} |
library(clinUtils)
data(dataADaMCDISCP01)
labelVars <- attr(dataADaMCDISCP01, "labelVars")
dataAE <- dataADaMCDISCP01$ADAE
dataDM <- dataADaMCDISCP01$ADSL
## example of basic sunburst:
# sunburst takes as input table with counts
library(inTextSummaryTable)
# total counts: Safety Analysis Set (patients with start date for the first treatment)
dataTotal <- subset(dataDM, RFSTDTC != "")
# compute adverse event table
tableAE <- getSummaryStatisticsTable(
data = dataAE,
rowVar = c("AESOC", "AEDECOD"),
dataTotal = dataTotal,
rowOrder = "total",
labelVars = labelVars,
stats = getStats("count"),
# plotly treemap requires records (rows) for each group
rowVarTotalInclude = "AEDECOD",
outputType = "data.frame-base"
)
dataSunburst <- tableAE
dataSunburst$n <- as.numeric(dataSunburst$n)
# create plot
sunburstClinData(
data = dataSunburst,
vars = c("AESOC", "AEDECOD"),
valueVar = "n",
valueLab = "Number of patients with adverse events"
)
## example where sum(counts) of child = counts of parent
# counts of patients per arm/site
tableDM <- getSummaryStatisticsTable(
data = dataDM,
rowVar = c("ARM", "SITEID"),
labelVars = labelVars,
# plotly treemap requires records (rows) for each group
rowVarTotalInclude = "SITEID",
rowTotalInclude = TRUE,
outputType = "data.frame-base"
)
tableDM$statN <- as.numeric(tableDM$statN)
# create the plot
sunburstClinData(
data = tableDM,
vars = c("ARM", "SITEID"),
valueVar = "statN", valueLab = "Counts of patients",
valueType = "total"
) | /package/clinDataReview/inst/examples/sunburstClinData-example.R | no_license | ClinicoPath/clinDataReview | R | false | false | 1,519 | r | library(clinUtils)
data(dataADaMCDISCP01)
labelVars <- attr(dataADaMCDISCP01, "labelVars")
dataAE <- dataADaMCDISCP01$ADAE
dataDM <- dataADaMCDISCP01$ADSL
## example of basic sunburst:
# sunburst takes as input table with counts
library(inTextSummaryTable)
# total counts: Safety Analysis Set (patients with start date for the first treatment)
dataTotal <- subset(dataDM, RFSTDTC != "")
# compute adverse event table
tableAE <- getSummaryStatisticsTable(
data = dataAE,
rowVar = c("AESOC", "AEDECOD"),
dataTotal = dataTotal,
rowOrder = "total",
labelVars = labelVars,
stats = getStats("count"),
# plotly treemap requires records (rows) for each group
rowVarTotalInclude = "AEDECOD",
outputType = "data.frame-base"
)
dataSunburst <- tableAE
dataSunburst$n <- as.numeric(dataSunburst$n)
# create plot
sunburstClinData(
data = dataSunburst,
vars = c("AESOC", "AEDECOD"),
valueVar = "n",
valueLab = "Number of patients with adverse events"
)
## example where sum(counts) of child = counts of parent
# counts of patients per arm/site
tableDM <- getSummaryStatisticsTable(
data = dataDM,
rowVar = c("ARM", "SITEID"),
labelVars = labelVars,
# plotly treemap requires records (rows) for each group
rowVarTotalInclude = "SITEID",
rowTotalInclude = TRUE,
outputType = "data.frame-base"
)
tableDM$statN <- as.numeric(tableDM$statN)
# create the plot
sunburstClinData(
data = tableDM,
vars = c("ARM", "SITEID"),
valueVar = "statN", valueLab = "Counts of patients",
valueType = "total"
) |
# This script subsets the full data file for the session.
# Original data downloaded from https://www.kaggle.com/c/widsdatathon2020/data
library(readr)
library(dplyr)
# Prepping the data
data_dict <- read_csv("WiDS Datathon 2020 Dictionary.csv")
gossis <- read_csv("training_v2.csv")
gossis %>%
select(encounter_id, patient_id, hospital_id, hospital_death,
age, bmi, elective_surgery, ethnicity, gender, height, weight,
hospital_admit_source, icu_stay_type, icu_type, pre_icu_los_days) %>%
write_csv("gossis_subset.csv")
| /dataprep.R | no_license | nuitrcs/r-first-steps | R | false | false | 548 | r | # This script subsets the full data file for the session.
# Original data downloaded from https://www.kaggle.com/c/widsdatathon2020/data
library(readr)
library(dplyr)
# Prepping the data
data_dict <- read_csv("WiDS Datathon 2020 Dictionary.csv")
gossis <- read_csv("training_v2.csv")
gossis %>%
select(encounter_id, patient_id, hospital_id, hospital_death,
age, bmi, elective_surgery, ethnicity, gender, height, weight,
hospital_admit_source, icu_stay_type, icu_type, pre_icu_los_days) %>%
write_csv("gossis_subset.csv")
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
######################################################################################################
# Plot functions for PEcAn ED2 Diagnostics
#
# v1
#
# TODO: Finalize plots for various functions
#
######################################################################################################
#====================================================================================================#
# Plot mean daily output
#====================================================================================================#
# UNDER DEVELOPMENT
plot_daily = function(model.run,in.dir,out.dir){
i = 1
for (year in start_year:end_year) {
message(paste("--- PROCESSING YEAR: ",year," ---"))
#---------------- Generate Subset Length --------------------------------------------------------#
if (year == start_year) {
start_day <- as.numeric(format(start_date, "%j"))
} else {
start_day = 1
}
if (year == end_year) {
end_day = as.numeric(format(end_date, "%j"))
} else {
end_day = as.numeric(format(as.Date(sprintf("%s-12-31", year)), "%j"))
}
}
} # End of plot_daily
#----------------------------------------------------------------------------------------------------#
#====================================================================================================#
# Plot mean diel function
#====================================================================================================#
# not implemented yet
#----------------------------------------------------------------------------------------------------#
#====================================================================================================#
# Plot site average fluxes (i.e. "Tower" file output)
#====================================================================================================#
site_fluxes = function(model.run,in.dir,out.dir){
#---------------- Import prescribed pheno data, if exists -----------------------------------------#
# Info: Display prescribed phenology on diagnostic plots (if present)
# May need to get rid of this as it is mostly ED specific
pheno = list.files(path=model.run,pattern="phenology")
if (length(pheno)==0) {
site_pheno=NA
}else{
pheno_data = read.delim(pheno,header=F,sep="\t",skip=1)
Yr = pheno_data[,1]
GU = 1/pheno_data[,2]
LO = 1/pheno_data[,4]
site_pheno = data.frame(Year=Yr,Greenup=GU,LeafOff=LO)
print('Site Phenology Info (DoY)')
print(site_pheno)
print("")
}
#--------------------------------------------------------------------------------------------------#
i = 1
for (year in start_year:end_year) {
message(paste("--- PROCESSING YEAR: ",year," ---"))
#---------------- Generate Subset Length --------------------------------------------------------#
if (year == start_year) {
start_day <- as.numeric(format(start_date, "%j"))
} else {
start_day = 1
}
if (year == end_year) {
end_day = as.numeric(format(end_date, "%j"))
} else {
end_day = as.numeric(format(as.Date(sprintf("%s-12-31", year)), "%j"))
}
polyx = start_day:end_day # <--- for plotting below
vals_day = out_day # <--- values written out per day, 86400/FRQFAST
hdflength = (vals_day*(1+end_day-start_day))
#---------------- Init. Arrays ------------------------------------------------------------------#
# Info: Initialize arrays for entire model run and populate with for loop (below)
GPP.AVG = rep(0,times=hdflength)
VLEAF.RESP.AVG = rep(0,times=hdflength)
LEAF.RESP.AVG = rep(0,times=hdflength)
STORAGE.RESP.AVG = rep(0,times=hdflength)
GROWTH.RESP.AVG = rep(0,times=hdflength)
ROOT.RESP.AVG = rep(0,times=hdflength)
PLANT.RESP.AVG = rep(0,times=hdflength)
HTROPH.RESP.AVG = rep(0,times=hdflength)
Reco.AVG = rep(0,times=hdflength)
NPP.AVG = rep(0,times=hdflength)
NEE.AVG = rep(0,times=hdflength)
#---------------------------------------------
# Units: [kg/m2/s]
#AVG.VAPOR.WC = rep(0,times=hdflength) # wood vapor flux.
AVG.VAPOR.LC = rep(0,times=hdflength)
AVG.VAPOR.GC = rep(0,times=hdflength)
AVG.VAPOR.AC = rep(0,times=hdflength)
AVG.TRANSP = rep(0,times=hdflength)
AVG.EVAP = rep(0,times=hdflength)
# Units [kg/kg]
AVG.CAN.SHV = rep(0,times=hdflength)
#---------------------------------------------
# Not implemented yet
#AVG.SOIL.TEMP = rep(0,times=hdflength)
#CAN.AIR.TEMP.AVG = rep(0,times=hdflength)
#SWC.AVG = rep(0,times=hdflength)
#AVG.SFCWATER.DEPTH = rep(0,times=hdflength)
#------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------#
# Info from driver script
# dates contains YYmmdd, month (num), doy. fjday (0-1)
init = dates[1,4]
total = seq(1,hdflength,1) # <--- is this unused?
reps = hdflength/vals_day # <--- this should set the total number of days of data based on
# hdf length. E.g. 48 obs per day -- 17520/48 = 365
dayfrac = rep(seq(deltaT,24,deltaT), each=1, times=reps) # <--- setup daily output rate for subset
# rep over total lenght (hdflength/vals)
subset = 0 # <--- initialize variable
period = c(10.0,17.0) # <--- choose which times to average over. Can make user selectable.
s = seq(period[1],period[2],deltaT)
subset = which(dayfrac >= period[1] & dayfrac <= period[2])
hours = dayfrac[dayfrac >= period[2] & dayfrac <= period[1]]
aggrlist = rep(start_day:(end_day), each=length(s)) # subset list
#---------------- Load ED2 Model Output (hdf5) --------------------------------------------------#
filename = list.files(in.dir,full.names=TRUE,
pattern=paste('.*-T-', year, '-.*.h5', sep=''))[1]
if (is.na(filename)==1) {
break
}else{
data <- hdf5load(filename, load = FALSE,tidy=TRUE) # LOAD ED2 OUTPUT
}
var_names = summary(data) # View info about vars. For debugging
if (i==1){
print(paste("Site Averaged Fluxes (ITOUTPUT) for ",year))
print(var_names) # Show variable names in log file
print("")
#print(str(data))
}
i=i+1
#------------------------------------------------------------------------------------------------#
#---------------- Get Phenology Information -----------------------------------------------------#
chk = which(site_pheno==year)
if (is.nan(mean(chk))==1) {
phenology = data.frame(-9999,-9999,-9999)
names(phenology)=c("Year","Greenup","LeafOff")
GS_LENGTH = NA
}else{
phenology = site_pheno[chk,]
GS_LENGTH = phenology[,3]-phenology[,2]
}
#------------------------------------------------------------------------------------------------#
#---------------- Generate Figures --------------------------------------------------------------#
umol2gc <- 1.0368 # convert to gC
######################## SETUP PLOT PARAMETERS ###################################################
cex = 1
labcex = 2
axiscex = 2
maincex = 2
linew = 1.3 # line width
######################## ED2 OUTPUT ##############################################################
# units: umol/m2/s
GPP.AVG = data$AVG.GPP[subset]*umol2gc
GPP.AVG.mn = aggregate(GPP.AVG,by=list(aggrlist),mean)[[2]]
GPP.AVG.ll = aggregate(GPP.AVG,by=list(aggrlist),min)[[2]]
GPP.AVG.ul = aggregate(GPP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# units: umol/m2/s
LEAF.RESP.AVG = data$AVG.LEAF.RESP[subset]*umol2gc
LEAF.RESP.AVG.mn = aggregate(LEAF.RESP.AVG,by=list(aggrlist),mean)[[2]]
LEAF.RESP.AVG.ll = aggregate(LEAF.RESP.AVG,by=list(aggrlist),min)[[2]]
LEAF.RESP.AVG.ul = aggregate(LEAF.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# units: umol/m2/s
VLEAF.RESP.AVG = data$AVG.VLEAF.RESP[subset]*umol2gc
VLEAF.RESP.AVG.mn = aggregate(VLEAF.RESP.AVG,by=list(aggrlist),mean)[[2]]
VLEAF.RESP.AVG.ll = aggregate(VLEAF.RESP.AVG,by=list(aggrlist),min)[[2]]
VLEAF.RESP.AVG.ul = aggregate(VLEAF.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# units: umol/m2/s
STORAGE.RESP.AVG = data$AVG.STORAGE.RESP[subset]*umol2gc
STORAGE.RESP.AVG.mn = aggregate(STORAGE.RESP.AVG,by=list(aggrlist),mean)[[2]]
STORAGE.RESP.AVG.ll = aggregate(STORAGE.RESP.AVG,by=list(aggrlist),min)[[2]]
STORAGE.RESP.AVG.ul = aggregate(STORAGE.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# units: umol/m2/s
GROWTH.RESP.AVG = data$AVG.GROWTH.RESP[subset]*umol2gc
GROWTH.RESP.AVG.mn = aggregate(GROWTH.RESP.AVG,by=list(aggrlist),mean)[[2]]
GROWTH.RESP.AVG.ll = aggregate(GROWTH.RESP.AVG,by=list(aggrlist),min)[[2]]
GROWTH.RESP.AVG.ul = aggregate(GROWTH.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# units: umol/m2/s
ROOT.RESP.AVG = data$AVG.ROOT.RESP[subset]*umol2gc
ROOT.RESP.AVG.mn = aggregate(ROOT.RESP.AVG,by=list(aggrlist),mean)[[2]]
ROOT.RESP.AVG.ll = aggregate(ROOT.RESP.AVG,by=list(aggrlist),min)[[2]]
ROOT.RESP.AVG.ul = aggregate(ROOT.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
PLANT.RESP.AVG = data$AVG.PLANT.RESP[subset] *umol2gc
PLANT.RESP.AVG.mn = aggregate(PLANT.RESP.AVG,by=list(aggrlist),mean)[[2]]
PLANT.RESP.AVG.ll = aggregate(PLANT.RESP.AVG,by=list(aggrlist),min)[[2]]
PLANT.RESP.AVG.ul = aggregate(PLANT.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
HTROPH.RESP.AVG = data$AVG.HTROPH.RESP[subset] *umol2gc
HTROPH.RESP.AVG.mn = aggregate(HTROPH.RESP.AVG,by=list(aggrlist),mean)[[2]]
HTROPH.RESP.AVG.ll = aggregate(HTROPH.RESP.AVG,by=list(aggrlist),min)[[2]]
HTROPH.RESP.AVG.ul = aggregate(HTROPH.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
Reco.AVG.mn = (PLANT.RESP.AVG.mn + HTROPH.RESP.AVG.mn)
Reco.AVG.ll = (PLANT.RESP.AVG.ll + HTROPH.RESP.AVG.ll)
Reco.AVG.ul = (PLANT.RESP.AVG.ul + HTROPH.RESP.AVG.ul)
#------------------------------------------------------------------------------------------------#
#NPP.AVG = data$AVG.NPPDAILY[subset] *umol2gc
#NPP.AVG.mn = aggregate(NPP.AVG,by=list(aggrlist),mean)[[2]]
#NPP.AVG.ll = aggregate(NPP.AVG,by=list(aggrlist),min)[[2]]
#NPP.AVG.ul = aggregate(NPP.AVG,by=list(aggrlist),max)[[2]]
NPP.AVG.mn = (GPP.AVG.mn - PLANT.RESP.AVG.mn)
NPP.AVG.ll = (GPP.AVG.ll - PLANT.RESP.AVG.ul)
NPP.AVG.ul = (GPP.AVG.ul - PLANT.RESP.AVG.ll)
#------------------------------------------------------------------------------------------------#
NEE.AVG.mn = -1*(GPP.AVG.mn - (PLANT.RESP.AVG.mn + HTROPH.RESP.AVG.mn))
NEE.AVG.ll = -1*(GPP.AVG.ll - (PLANT.RESP.AVG.ul + HTROPH.RESP.AVG.ul))
NEE.AVG.ul = -1*(GPP.AVG.ul - (PLANT.RESP.AVG.ll + HTROPH.RESP.AVG.ll))
#------------------------------------------------------------------------------------------------#
# [kg/m2/s]
#AVG.VAPOR.WC = data$AVG.VAPOR.WC[subset] #polygon wood to canopy air vapor flux
#AVG.VAPOR.WC.mn = aggregate(AVG.VAPOR.WC,by=list(aggrlist),mean)[[2]]
#AVG.VAPOR.WC.ll = aggregate(AVG.VAPOR.WC,by=list(aggrlist),min)[[2]]
#AVG.VAPOR.WC.ul = aggregate(AVG.VAPOR.WC,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# attempt to make this backwards compatible
AVG.VAPOR.LC = tryCatch(data$AVG.VAPOR.LC[subset],finally= data$AVG.VAPOR.VC[subset])
AVG.VAPOR.LC.mn = aggregate(AVG.VAPOR.LC,by=list(aggrlist),mean)[[2]]
AVG.VAPOR.LC.ll = aggregate(AVG.VAPOR.LC,by=list(aggrlist),min)[[2]]
AVG.VAPOR.LC.ul = aggregate(AVG.VAPOR.LC,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
AVG.VAPOR.GC = data$AVG.VAPOR.GC[subset] #polygon moisture flux ground to canopy air
AVG.VAPOR.GC.mn = aggregate(AVG.VAPOR.GC,by=list(aggrlist),mean)[[2]]
AVG.VAPOR.GC.ll = aggregate(AVG.VAPOR.GC,by=list(aggrlist),min)[[2]]
AVG.VAPOR.GC.ul = aggregate(AVG.VAPOR.GC,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
AVG.VAPOR.AC = data$AVG.VAPOR.AC[subset]#polygon vapor flux atmosphere to canopy air
AVG.VAPOR.AC.mn = aggregate(AVG.VAPOR.AC,by=list(aggrlist),mean)[[2]]
AVG.VAPOR.AC.ll = aggregate(AVG.VAPOR.AC,by=list(aggrlist),min)[[2]]
AVG.VAPOR.AC.ul = aggregate(AVG.VAPOR.AC,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
AVG.TRANSP = data$AVG.TRANSP[subset]#polygon transpiration from stomata to canopy air spac
AVG.TRANSP.mn = aggregate(AVG.TRANSP,by=list(aggrlist),mean)[[2]]
AVG.TRANSP.ll = aggregate(AVG.TRANSP,by=list(aggrlist),min)[[2]]
AVG.TRANSP.ul = aggregate(AVG.TRANSP,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
AVG.EVAP = data$AVG.EVAP[subset] #Polygon averaged evap/dew from ground and leaves to C
AVG.EVAP.mn = aggregate(AVG.EVAP,by=list(aggrlist),mean)[[2]]
AVG.EVAP.ll = aggregate(AVG.EVAP,by=list(aggrlist),min)[[2]]
AVG.EVAP.ul = aggregate(AVG.EVAP,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
AVG.CAN.SHV = data$AVG.CAN.SHV[subset] #Polygon Average Specific Humidity of Canopy Air
AVG.CAN.SHV.mn = aggregate(AVG.CAN.SHV,by=list(aggrlist),mean)[[2]]
AVG.CAN.SHV.ll = aggregate(AVG.CAN.SHV,by=list(aggrlist),min)[[2]]
AVG.CAN.SHV.ul = aggregate(AVG.CAN.SHV,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
#AVG.SOIL.TEMP = data$AVG.SOIL.TEMP[subset,1,9]-273.15 #Polygon Average Soil Temperature
#AVG.SOIL.TEMP.5cm = aggregate(AVG.SOIL.TEMP,by=list(aggrlist),mean)[[2]]
#AVG.SOIL.TEMP = data$AVG.SOIL.TEMP[subset,1,8]-273.15 #Polygon Average Soil Temperature
#AVG.SOIL.TEMP.10cm = aggregate(AVG.SOIL.TEMP,by=list(aggrlist),mean)[[2]]
#------------------------------------------------------------------------------------------------#
#CAN.AIR.TEMP.AVG = (data$AVG.CAN.TEMP[subset])-273.15 # convert to celcius
#SWC.AVG = data$AVG.SOIL.WATER[subset,1,9] # soil moisture at 5cm
###########################################################################################################
##################################### COMPONENT FLUXES ####################################################
pdf(paste(out.dir,"/","ED2_",year,"_Site_Avg_Fluxes.pdf",sep=""),width=12,height=11,
onefile=TRUE)
par(mfrow=c(3,2),mar=c(5,5.7,0.9,0.5),mgp=c(3.3,1.5,0),oma=c(0,0,3,0)) # B, L, T, R
#==========================================================================================================
# GPP
#==========================================================================================================
ylim = range(c(GPP.AVG.ll,GPP.AVG.ul),na.rm=TRUE) # define Y lims
plot(start_day:end_day,GPP.AVG.mn,xlab='',ylab=expression(paste(GPP," (gC",~m^{-2},")")),
ylim=ylim,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(v=phenology[,2],lty=2,lwd=1.5,col="green3")
abline(v=phenology[,3],lty=2,lwd=1.5,col="brown")
polygon(c(polyx, rev(polyx)), c(GPP.AVG.ul, rev(GPP.AVG.ll)), col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,GPP.AVG.mn,lty=1,col="black")
points(start_day:end_day,GPP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
if (is.nan(mean(chk))==0) {
legend("topleft",legend=c("Greenup","Leaf Off"),bty="n",
lty=2,lwd=1.5,col=c("green3","brown"),cex=2)
#text(37,max(GPP.AVG)-4,"GS Length:",cex=2)
#text(35,max(GPP.AVG)-5,paste(round(GS_LENGTH,2)," days",sep=""),
# cex=2 )
}
abline(h=0,lty=2,lwd=1.5,col="black")
rm(chk)
box(lwd=2.2)
#==========================================================================================================
# NPP
#==========================================================================================================
ylim = range(c(NPP.AVG.ll,NPP.AVG.ul),na.rm=TRUE) # define Y lims
plot(start_day:end_day,NPP.AVG.mn,xlab='',ylab=expression(paste(NPP," (gC",~m^{-2},")")),
pch=21,col="black", bg="black",ylim=ylim,
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(NPP.AVG.ul, rev(NPP.AVG.ll)),
col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,NPP.AVG.mn,lty=1,col="black")
points(start_day:end_day,NPP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Plant Resp
#==========================================================================================================
ylim = range(c(PLANT.RESP.AVG.ll,PLANT.RESP.AVG.ul),na.rm=TRUE) # define Y lims
plot(start_day:end_day,PLANT.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[a]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(PLANT.RESP.AVG.ul, rev(PLANT.RESP.AVG.ll)),
col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,PLANT.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,PLANT.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Heterotrophic Resp
#==========================================================================================================
ylim = range(c(HTROPH.RESP.AVG.ll,HTROPH.RESP.AVG.ul),na.rm=TRUE) # define Y lims
plot(start_day:end_day,HTROPH.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[h]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(HTROPH.RESP.AVG.ul, rev(HTROPH.RESP.AVG.ll)),
col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,HTROPH.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,HTROPH.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Reco
#==========================================================================================================
ylim = range(c(Reco.AVG.ll,Reco.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,Reco.AVG.mn,xlab=paste("DOY",as.character(year)),ylim=ylim,
ylab=expression(paste(italic(R)[eco.]," (gC",~m^{-2},")")),
pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(Reco.AVG.ul, rev(Reco.AVG.ll)),col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,Reco.AVG.mn,lty=1,col="black")
points(start_day:end_day,Reco.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# NEE
#==========================================================================================================
ylim = range(c(NEE.AVG.ll,NEE.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,NEE.AVG.mn,xlab=paste("DOY",as.character(year)),ylim=ylim,
ylab=expression(paste(NEE," (gC",~m^{-2},")")),
pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(NEE.AVG.ul, rev(NEE.AVG.ll)),col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,NEE.AVG.mn,lty=1,col="black")
points(start_day:end_day,NEE.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
# add single title to plot
mtext("Site Component Fluxes", side=3, line=1, outer=TRUE, cex=1.5, font=2)
######################################## RESPIRATION COMPONENTS ###########################################
par(mfrow=c(3,2),mar=c(5,5.7,0.9,0.5),mgp=c(3.3,1.5,0),oma=c(0,0,3,0)) # B, L, T, R
#==========================================================================================================
# Plant resp
#==========================================================================================================
ylim = range(c(PLANT.RESP.AVG.ll,PLANT.RESP.AVG.ul),na.rm=TRUE) # define Y lims
plot(start_day:end_day,PLANT.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[a]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(PLANT.RESP.AVG.ul, rev(PLANT.RESP.AVG.ll)),
col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,PLANT.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,PLANT.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Leaf resp
#==========================================================================================================
ylim = range(c(LEAF.RESP.AVG.ll,LEAF.RESP.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,LEAF.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[leaf]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(LEAF.RESP.AVG.ul,rev(LEAF.RESP.AVG.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,LEAF.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,LEAF.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Root resp
#==========================================================================================================
ylim = range(c(ROOT.RESP.AVG.ll,ROOT.RESP.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,ROOT.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[root]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(ROOT.RESP.AVG.ul,rev(ROOT.RESP.AVG.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,ROOT.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,ROOT.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Growth Resp
#==========================================================================================================
ylim = range(c(GROWTH.RESP.AVG.ll,GROWTH.RESP.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,GROWTH.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[growth]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(GROWTH.RESP.AVG.ul,rev(GROWTH.RESP.AVG.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,GROWTH.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,GROWTH.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Storage Resp
#==========================================================================================================
ylim = range(c(STORAGE.RESP.AVG.ll,STORAGE.RESP.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,STORAGE.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[growth]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(STORAGE.RESP.AVG.ul,rev(STORAGE.RESP.AVG.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,STORAGE.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,STORAGE.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Vleaf resp
#==========================================================================================================
ylim = range(c(VLEAF.RESP.AVG.ll,VLEAF.RESP.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,VLEAF.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(VR)[leaf]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(VLEAF.RESP.AVG.ul,rev(VLEAF.RESP.AVG.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,VLEAF.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,VLEAF.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
# Plot title
mtext("Site Component Respiration ", side=3, line=1, outer=TRUE, cex=1.5, font=2)
########################################### Energy Balance ################################################
par(mfrow=c(3,2),mar=c(5,5.7,0.9,0.5),mgp=c(3.3,1.5,0),oma=c(0,0,3,0)) # B, L, T, R
#==========================================================================================================
# Polygon vegetation/leaf vapor flux
#==========================================================================================================
ylim = range(c(AVG.VAPOR.LC.ll,AVG.VAPOR.LC.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.VAPOR.LC.mn,xlab='',ylim=ylim,
ylab=expression(paste(V.~Flux[veg~to~CAS]," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.LC.ul,rev(AVG.VAPOR.LC.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.VAPOR.LC.mn,lty=1,col="black")
points(start_day:end_day,AVG.VAPOR.LC.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon moisture flux ground to canopy air
#==========================================================================================================
ylim = range(c(AVG.VAPOR.GC.ll,AVG.VAPOR.GC.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.VAPOR.GC.mn,xlab='',ylim=ylim,
ylab=expression(paste(V.~Flux[ground~to~CAS]," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.GC.ll,rev(AVG.VAPOR.GC.ul)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.VAPOR.GC.mn,lty=1,col="black")
points(start_day:end_day,AVG.VAPOR.GC.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon vapor flux atmosphere to canopy air
#==========================================================================================================
ylim = range(c(AVG.VAPOR.AC.ll,AVG.VAPOR.AC.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.VAPOR.AC.mn,xlab='',ylim=ylim,
ylab=expression(paste(V.~Flux[atm.~to~CAS]," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.AC.ul,rev(AVG.VAPOR.AC.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.VAPOR.AC.mn,lty=1,col="black")
points(start_day:end_day,AVG.VAPOR.AC.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon transpiration from stomata to canopy air spac
#==========================================================================================================
ylim = range(c(AVG.TRANSP.ll,AVG.TRANSP.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.TRANSP.mn,xlab='',ylim=ylim,
ylab=expression(paste(Transpiration," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.TRANSP.ul,rev(AVG.TRANSP.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.TRANSP.mn,lty=1,col="black")
points(start_day:end_day,AVG.TRANSP.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon averaged evap/dew from ground and leaves to C
#==========================================================================================================
ylim = range(c(AVG.EVAP.ll,AVG.EVAP.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.EVAP.mn,xlab='',ylim=ylim,
ylab=expression(paste(Evaporation," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.EVAP.ul,rev(AVG.EVAP.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.EVAP.mn,lty=1,col="black")
points(start_day:end_day,AVG.EVAP.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon Average Specific Humidity of Canopy Air
#==========================================================================================================
ylim = range(c(AVG.CAN.SHV.ll,AVG.CAN.SHV.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.CAN.SHV.mn,xlab='',ylim=ylim,
ylab=expression(paste(Sp.Humidity[CAS]," (kg",~kg^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.CAN.SHV.ul,rev(AVG.CAN.SHV.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.CAN.SHV.mn,lty=1,col="black")
points(start_day:end_day,AVG.CAN.SHV.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon wood to canopy air vapor flux
#==========================================================================================================
# ylim = range(c(AVG.VAPOR.WC.ll,AVG.VAPOR.WC.ul),na.rm=TRUE)
# plot(start_day:end_day,AVG.VAPOR.WC.mn,xlab='',ylim=ylim,
# ylab=expression(paste(italic(Vapor Flux)[wood]," (kg",~m^{-2},~s^{-1}")")),pch=21,col="black",
# bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
# polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.WC.ul,rev(AVG.VAPOR.WC.ll)),col="light gray",
# border="dark grey",lty=2)
# lines(start_day:end_day,AVG.VAPOR.WC.mn,lty=1,col="black")
# points(start_day:end_day,AVG.VAPOR.WC.mn,pch=21,col="black", bg="black",
# cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
# abline(h=0,lty=2,lwd=1.5,col="black")
# box(lwd=2.2)
# Plot title
mtext("Site Vapor Fluxes ", side=3, line=1, outer=TRUE, cex=1.5, font=2)
##################################### MET ##########################################
#plot(start_day:end_day,AVG.SOIL.TEMP.5cm)
#plot(start_day:end_day,AVG.SOIL.TEMP.10cm)
#mtext("Site Soil Temperatures ", side=3, line=1, outer=TRUE, cex=1.5, font=2)
dev.off() # Close PDF
} # END for loop
}
#----------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------#
# Plot monthly
plot_monthly = function(model.run,in.dir,out.dir){
# UNDER DEVELOPMENT
#--------------------------------------------------------------------------------------------------#
when = NULL
pft.names = c("C4 Grass","Early Tropical","Mid Tropical","Late Tropical"
,"C3 Grass","North Pine","South Pine","Late Conifer"
,"Early Temperate","Mid Temperate","Late Temperate"
,"C3 Pasture","C3 Crop","C4 Pasture","C4 Crop","Subtropical C3 grass ",
"Araucaria","Total")
n.pft = length(pft.names) - 1
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------#
# Loop over time. #
#----------------------------------------------------------------------------------------------#
i = 1 # counter printing variable names to log file
for (year in start_year:end_year) {
message(paste("--- PROCESSING YEAR: ",year," ---"))
#--------------------------------------------------------------------------------------------#
if (year == start_year){
month.begin = IMONTHA
}else{
month.begin = 1
} #end if
if (year == end_year){
month.end = IMONTHZ
}else{
month.end = 12
} #end if
#n.months = (as.numeric(month.end)-as.numeric(month.begin))+1
n.months = -12+as.numeric(month.end)+(12-as.numeric(month.begin)+1)
nplant.pft = matrix(0,nrow=n.months,ncol=n.pft+1)
lai.pft = matrix(0,nrow=n.months,ncol=n.pft+1)
agb.pft = matrix(0,nrow=n.months,ncol=n.pft+1)
coh.area = list()
coh.age = list()
coh.dbh = list()
coh.pft = list()
coh.nplant = list()
coh.height = list()
coh.gpp = list()
coh.resp = list()
coh.npp = list()
#--------------------------------------------------------------------------------------------#
j = 0 # counter for month in output
for (mm in month.begin:month.end) {
j = j+1
mth = toupper(mon2mmm(mm,lang="English")) #<--- convert month num to 3 letter name
message(paste("-------- PROCESSING MONTH: ",mth))
when.now = chron(dates=paste(mm,1,year,sep="/"),times=paste(0,0,0,sep=":"))
when = c(when,when.now)
#---------------- Load ED2 Model Output (hdf5) ----------------------------------------------#
filename = list.files(in.dir,full.names=TRUE,
pattern=paste('.*-E-', year, '-.*.h5', sep=''))[1]
if (is.na(filename)==1) {
break
}else{
data <- hdf5load(filename, load = FALSE,tidy=TRUE) # LOAD ED2 OUTPUT
}
var_names = summary(data) # View info about vars. For debugging
if (i==1){
print("Mean Monthly Output Variables (IMOUTPUT)")
print(var_names)
print("")
} # end of complex if/then
#--------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Get desired PFT-level variables #
#------------------------------------------------------------------------------------#
lai.pft [j,1:n.pft] = data$MMEAN.LAI.PFT
message(data.frame(data$MMEAN.LAI.PFT))
agb.pft [j,1:n.pft] = data$AGB.PFT
#------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Define the global number of patches and cohorts. #
#------------------------------------------------------------------------------------#
npatches.global = data$NPATCHES.GLOBAL
ncohorts.global = data$NCOHORTS.GLOBAL
#----- Find the indices for the beginning and end of each patch. --------------------#
ncohorts = diff(c(data$PACO.ID,ncohorts.global+1))
aco = data$PACO.ID
zco = data$PACO.ID + ncohorts - 1
#------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Extend the area and age of each patch so it has the same length as the #
# cohorts. #
#------------------------------------------------------------------------------------#
coh.area[[j]] = rep(data$AREA,times=ncohorts)
coh.age [[j]] = rep(data$AGE ,times=ncohorts)
#------------------------------------------------------------------------------------#
#----- Grab other cohort-level variables. -------------------------------------------#
coh.pft [[j]] = data$PFT
message(data$PFT)
coh.dbh [[j]] = data$DBH
coh.nplant [[j]] = data$NPLANT*coh.area[[j]]
coh.height [[j]] = data$HITE
coh.gpp [[j]] = data$MMEAN.GPP.CO
coh.resp [[j]] = ( data$MMEAN.LEAF.RESP.CO
+ data$MMEAN.ROOT.RESP.CO
+ data$MMEAN.GROWTH.RESP.CO
+ data$MMEAN.STORAGE.RESP.CO
+ data$MMEAN.VLEAF.RESP.CO )
coh.npp [[j]] = coh.gpp[[j]] - coh.resp[[j]] # NPP
#------------------------------------------------------------------------------------#
i=i+1 # counter for printing variable names to log file
} # end for loop for importing monthly data for year x
#------------------------------------------------------------------------------------------#
# Find which PFTs we use, and set any NA to zero (in case a PFT goes extinct). #
#------------------------------------------------------------------------------------------#
tot = n.pft + 1 # <---- total cohort
agb.pft [,tot] = rowSums(agb.pft [,1:n.pft])
lai.pft [,tot] = rowSums(lai.pft [,1:n.pft])
#message(lai.pft)
#lai.pft
pft.use = which(colSums(agb.pft) > 0)
#------------------------------------------------------------------------------------------#
#==========================================================================================#
# Figures #
#==========================================================================================#
# Plot the LAI of all PFTs together. #
#------------------------------------------------------------------------------------------#
pdf(paste(out.dir,"/","ED2_",year,"_Monthly_Mean_Output.pdf",sep=""),width=10,height=10,
onefile=TRUE)
#----- Find the limits and expand the range so the legend fits. ---------------------------#
lai.ylim = range(lai.pft,na.rm=TRUE)
lai.ylim[2] = lai.ylim[2] + 0.2 * (lai.ylim[2] - lai.ylim[1])
lai.title = paste("Leaf Area Index","US-WCr",sep=" - ") # <--- Site needs to be dynamic
lai.xlab = "Month"
lai.ylab = expression(paste("LAI (",m^{2}~m^{-2},")")) #"LAI [m2/m2]"
plot(x=when,y=lai.pft[,1],type="n",ylim=lai.ylim,xaxt="n"
,main=lai.title,xlab=lai.xlab,ylab=lai.ylab)
dev.off()
} # end for loop
} # end of function
#----------------------------------------------------------------------------------------------------#
| /models/ed/inst/pecan.ed2.diag.plots.R | permissive | PecanProject/pecan | R | false | false | 45,221 | r | #-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
######################################################################################################
# Plot functions for PEcAn ED2 Diagnostics
#
# v1
#
# TODO: Finalize plots for various functions
#
######################################################################################################
#====================================================================================================#
# Plot mean daily output
#====================================================================================================#
# UNDER DEVELOPMENT
plot_daily = function(model.run,in.dir,out.dir){
i = 1
for (year in start_year:end_year) {
message(paste("--- PROCESSING YEAR: ",year," ---"))
#---------------- Generate Subset Length --------------------------------------------------------#
if (year == start_year) {
start_day <- as.numeric(format(start_date, "%j"))
} else {
start_day = 1
}
if (year == end_year) {
end_day = as.numeric(format(end_date, "%j"))
} else {
end_day = as.numeric(format(as.Date(sprintf("%s-12-31", year)), "%j"))
}
}
} # End of plot_daily
#----------------------------------------------------------------------------------------------------#
#====================================================================================================#
# Plot mean diel function
#====================================================================================================#
# not implemented yet
#----------------------------------------------------------------------------------------------------#
#====================================================================================================#
# Plot site average fluxes (i.e. "Tower" file output)
#====================================================================================================#
site_fluxes = function(model.run,in.dir,out.dir){
#---------------- Import prescribed pheno data, if exists -----------------------------------------#
# Info: Display prescribed phenology on diagnostic plots (if present)
# May need to get rid of this as it is mostly ED specific
pheno = list.files(path=model.run,pattern="phenology")
if (length(pheno)==0) {
site_pheno=NA
}else{
pheno_data = read.delim(pheno,header=F,sep="\t",skip=1)
Yr = pheno_data[,1]
GU = 1/pheno_data[,2]
LO = 1/pheno_data[,4]
site_pheno = data.frame(Year=Yr,Greenup=GU,LeafOff=LO)
print('Site Phenology Info (DoY)')
print(site_pheno)
print("")
}
#--------------------------------------------------------------------------------------------------#
i = 1
for (year in start_year:end_year) {
message(paste("--- PROCESSING YEAR: ",year," ---"))
#---------------- Generate Subset Length --------------------------------------------------------#
if (year == start_year) {
start_day <- as.numeric(format(start_date, "%j"))
} else {
start_day = 1
}
if (year == end_year) {
end_day = as.numeric(format(end_date, "%j"))
} else {
end_day = as.numeric(format(as.Date(sprintf("%s-12-31", year)), "%j"))
}
polyx = start_day:end_day # <--- for plotting below
vals_day = out_day # <--- values written out per day, 86400/FRQFAST
hdflength = (vals_day*(1+end_day-start_day))
#---------------- Init. Arrays ------------------------------------------------------------------#
# Info: Initialize arrays for entire model run and populate with for loop (below)
GPP.AVG = rep(0,times=hdflength)
VLEAF.RESP.AVG = rep(0,times=hdflength)
LEAF.RESP.AVG = rep(0,times=hdflength)
STORAGE.RESP.AVG = rep(0,times=hdflength)
GROWTH.RESP.AVG = rep(0,times=hdflength)
ROOT.RESP.AVG = rep(0,times=hdflength)
PLANT.RESP.AVG = rep(0,times=hdflength)
HTROPH.RESP.AVG = rep(0,times=hdflength)
Reco.AVG = rep(0,times=hdflength)
NPP.AVG = rep(0,times=hdflength)
NEE.AVG = rep(0,times=hdflength)
#---------------------------------------------
# Units: [kg/m2/s]
#AVG.VAPOR.WC = rep(0,times=hdflength) # wood vapor flux.
AVG.VAPOR.LC = rep(0,times=hdflength)
AVG.VAPOR.GC = rep(0,times=hdflength)
AVG.VAPOR.AC = rep(0,times=hdflength)
AVG.TRANSP = rep(0,times=hdflength)
AVG.EVAP = rep(0,times=hdflength)
# Units [kg/kg]
AVG.CAN.SHV = rep(0,times=hdflength)
#---------------------------------------------
# Not implemented yet
#AVG.SOIL.TEMP = rep(0,times=hdflength)
#CAN.AIR.TEMP.AVG = rep(0,times=hdflength)
#SWC.AVG = rep(0,times=hdflength)
#AVG.SFCWATER.DEPTH = rep(0,times=hdflength)
#------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------#
# Info from driver script
# dates contains YYmmdd, month (num), doy. fjday (0-1)
init = dates[1,4]
total = seq(1,hdflength,1) # <--- is this unused?
reps = hdflength/vals_day # <--- this should set the total number of days of data based on
# hdf length. E.g. 48 obs per day -- 17520/48 = 365
dayfrac = rep(seq(deltaT,24,deltaT), each=1, times=reps) # <--- setup daily output rate for subset
# rep over total lenght (hdflength/vals)
subset = 0 # <--- initialize variable
period = c(10.0,17.0) # <--- choose which times to average over. Can make user selectable.
s = seq(period[1],period[2],deltaT)
subset = which(dayfrac >= period[1] & dayfrac <= period[2])
hours = dayfrac[dayfrac >= period[2] & dayfrac <= period[1]]
aggrlist = rep(start_day:(end_day), each=length(s)) # subset list
#---------------- Load ED2 Model Output (hdf5) --------------------------------------------------#
filename = list.files(in.dir,full.names=TRUE,
pattern=paste('.*-T-', year, '-.*.h5', sep=''))[1]
if (is.na(filename)==1) {
break
}else{
data <- hdf5load(filename, load = FALSE,tidy=TRUE) # LOAD ED2 OUTPUT
}
var_names = summary(data) # View info about vars. For debugging
if (i==1){
print(paste("Site Averaged Fluxes (ITOUTPUT) for ",year))
print(var_names) # Show variable names in log file
print("")
#print(str(data))
}
i=i+1
#------------------------------------------------------------------------------------------------#
#---------------- Get Phenology Information -----------------------------------------------------#
chk = which(site_pheno==year)
if (is.nan(mean(chk))==1) {
phenology = data.frame(-9999,-9999,-9999)
names(phenology)=c("Year","Greenup","LeafOff")
GS_LENGTH = NA
}else{
phenology = site_pheno[chk,]
GS_LENGTH = phenology[,3]-phenology[,2]
}
#------------------------------------------------------------------------------------------------#
#---------------- Generate Figures --------------------------------------------------------------#
umol2gc <- 1.0368 # convert to gC
######################## SETUP PLOT PARAMETERS ###################################################
cex = 1
labcex = 2
axiscex = 2
maincex = 2
linew = 1.3 # line width
######################## ED2 OUTPUT ##############################################################
# units: umol/m2/s
GPP.AVG = data$AVG.GPP[subset]*umol2gc
GPP.AVG.mn = aggregate(GPP.AVG,by=list(aggrlist),mean)[[2]]
GPP.AVG.ll = aggregate(GPP.AVG,by=list(aggrlist),min)[[2]]
GPP.AVG.ul = aggregate(GPP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# units: umol/m2/s
LEAF.RESP.AVG = data$AVG.LEAF.RESP[subset]*umol2gc
LEAF.RESP.AVG.mn = aggregate(LEAF.RESP.AVG,by=list(aggrlist),mean)[[2]]
LEAF.RESP.AVG.ll = aggregate(LEAF.RESP.AVG,by=list(aggrlist),min)[[2]]
LEAF.RESP.AVG.ul = aggregate(LEAF.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# units: umol/m2/s
VLEAF.RESP.AVG = data$AVG.VLEAF.RESP[subset]*umol2gc
VLEAF.RESP.AVG.mn = aggregate(VLEAF.RESP.AVG,by=list(aggrlist),mean)[[2]]
VLEAF.RESP.AVG.ll = aggregate(VLEAF.RESP.AVG,by=list(aggrlist),min)[[2]]
VLEAF.RESP.AVG.ul = aggregate(VLEAF.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# units: umol/m2/s
STORAGE.RESP.AVG = data$AVG.STORAGE.RESP[subset]*umol2gc
STORAGE.RESP.AVG.mn = aggregate(STORAGE.RESP.AVG,by=list(aggrlist),mean)[[2]]
STORAGE.RESP.AVG.ll = aggregate(STORAGE.RESP.AVG,by=list(aggrlist),min)[[2]]
STORAGE.RESP.AVG.ul = aggregate(STORAGE.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# units: umol/m2/s
GROWTH.RESP.AVG = data$AVG.GROWTH.RESP[subset]*umol2gc
GROWTH.RESP.AVG.mn = aggregate(GROWTH.RESP.AVG,by=list(aggrlist),mean)[[2]]
GROWTH.RESP.AVG.ll = aggregate(GROWTH.RESP.AVG,by=list(aggrlist),min)[[2]]
GROWTH.RESP.AVG.ul = aggregate(GROWTH.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# units: umol/m2/s
ROOT.RESP.AVG = data$AVG.ROOT.RESP[subset]*umol2gc
ROOT.RESP.AVG.mn = aggregate(ROOT.RESP.AVG,by=list(aggrlist),mean)[[2]]
ROOT.RESP.AVG.ll = aggregate(ROOT.RESP.AVG,by=list(aggrlist),min)[[2]]
ROOT.RESP.AVG.ul = aggregate(ROOT.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
PLANT.RESP.AVG = data$AVG.PLANT.RESP[subset] *umol2gc
PLANT.RESP.AVG.mn = aggregate(PLANT.RESP.AVG,by=list(aggrlist),mean)[[2]]
PLANT.RESP.AVG.ll = aggregate(PLANT.RESP.AVG,by=list(aggrlist),min)[[2]]
PLANT.RESP.AVG.ul = aggregate(PLANT.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
HTROPH.RESP.AVG = data$AVG.HTROPH.RESP[subset] *umol2gc
HTROPH.RESP.AVG.mn = aggregate(HTROPH.RESP.AVG,by=list(aggrlist),mean)[[2]]
HTROPH.RESP.AVG.ll = aggregate(HTROPH.RESP.AVG,by=list(aggrlist),min)[[2]]
HTROPH.RESP.AVG.ul = aggregate(HTROPH.RESP.AVG,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
Reco.AVG.mn = (PLANT.RESP.AVG.mn + HTROPH.RESP.AVG.mn)
Reco.AVG.ll = (PLANT.RESP.AVG.ll + HTROPH.RESP.AVG.ll)
Reco.AVG.ul = (PLANT.RESP.AVG.ul + HTROPH.RESP.AVG.ul)
#------------------------------------------------------------------------------------------------#
#NPP.AVG = data$AVG.NPPDAILY[subset] *umol2gc
#NPP.AVG.mn = aggregate(NPP.AVG,by=list(aggrlist),mean)[[2]]
#NPP.AVG.ll = aggregate(NPP.AVG,by=list(aggrlist),min)[[2]]
#NPP.AVG.ul = aggregate(NPP.AVG,by=list(aggrlist),max)[[2]]
NPP.AVG.mn = (GPP.AVG.mn - PLANT.RESP.AVG.mn)
NPP.AVG.ll = (GPP.AVG.ll - PLANT.RESP.AVG.ul)
NPP.AVG.ul = (GPP.AVG.ul - PLANT.RESP.AVG.ll)
#------------------------------------------------------------------------------------------------#
NEE.AVG.mn = -1*(GPP.AVG.mn - (PLANT.RESP.AVG.mn + HTROPH.RESP.AVG.mn))
NEE.AVG.ll = -1*(GPP.AVG.ll - (PLANT.RESP.AVG.ul + HTROPH.RESP.AVG.ul))
NEE.AVG.ul = -1*(GPP.AVG.ul - (PLANT.RESP.AVG.ll + HTROPH.RESP.AVG.ll))
#------------------------------------------------------------------------------------------------#
# [kg/m2/s]
#AVG.VAPOR.WC = data$AVG.VAPOR.WC[subset] #polygon wood to canopy air vapor flux
#AVG.VAPOR.WC.mn = aggregate(AVG.VAPOR.WC,by=list(aggrlist),mean)[[2]]
#AVG.VAPOR.WC.ll = aggregate(AVG.VAPOR.WC,by=list(aggrlist),min)[[2]]
#AVG.VAPOR.WC.ul = aggregate(AVG.VAPOR.WC,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
# attempt to make this backwards compatible
AVG.VAPOR.LC = tryCatch(data$AVG.VAPOR.LC[subset],finally= data$AVG.VAPOR.VC[subset])
AVG.VAPOR.LC.mn = aggregate(AVG.VAPOR.LC,by=list(aggrlist),mean)[[2]]
AVG.VAPOR.LC.ll = aggregate(AVG.VAPOR.LC,by=list(aggrlist),min)[[2]]
AVG.VAPOR.LC.ul = aggregate(AVG.VAPOR.LC,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
AVG.VAPOR.GC = data$AVG.VAPOR.GC[subset] #polygon moisture flux ground to canopy air
AVG.VAPOR.GC.mn = aggregate(AVG.VAPOR.GC,by=list(aggrlist),mean)[[2]]
AVG.VAPOR.GC.ll = aggregate(AVG.VAPOR.GC,by=list(aggrlist),min)[[2]]
AVG.VAPOR.GC.ul = aggregate(AVG.VAPOR.GC,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
AVG.VAPOR.AC = data$AVG.VAPOR.AC[subset]#polygon vapor flux atmosphere to canopy air
AVG.VAPOR.AC.mn = aggregate(AVG.VAPOR.AC,by=list(aggrlist),mean)[[2]]
AVG.VAPOR.AC.ll = aggregate(AVG.VAPOR.AC,by=list(aggrlist),min)[[2]]
AVG.VAPOR.AC.ul = aggregate(AVG.VAPOR.AC,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
AVG.TRANSP = data$AVG.TRANSP[subset]#polygon transpiration from stomata to canopy air spac
AVG.TRANSP.mn = aggregate(AVG.TRANSP,by=list(aggrlist),mean)[[2]]
AVG.TRANSP.ll = aggregate(AVG.TRANSP,by=list(aggrlist),min)[[2]]
AVG.TRANSP.ul = aggregate(AVG.TRANSP,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
AVG.EVAP = data$AVG.EVAP[subset] #Polygon averaged evap/dew from ground and leaves to C
AVG.EVAP.mn = aggregate(AVG.EVAP,by=list(aggrlist),mean)[[2]]
AVG.EVAP.ll = aggregate(AVG.EVAP,by=list(aggrlist),min)[[2]]
AVG.EVAP.ul = aggregate(AVG.EVAP,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
AVG.CAN.SHV = data$AVG.CAN.SHV[subset] #Polygon Average Specific Humidity of Canopy Air
AVG.CAN.SHV.mn = aggregate(AVG.CAN.SHV,by=list(aggrlist),mean)[[2]]
AVG.CAN.SHV.ll = aggregate(AVG.CAN.SHV,by=list(aggrlist),min)[[2]]
AVG.CAN.SHV.ul = aggregate(AVG.CAN.SHV,by=list(aggrlist),max)[[2]]
#------------------------------------------------------------------------------------------------#
#AVG.SOIL.TEMP = data$AVG.SOIL.TEMP[subset,1,9]-273.15 #Polygon Average Soil Temperature
#AVG.SOIL.TEMP.5cm = aggregate(AVG.SOIL.TEMP,by=list(aggrlist),mean)[[2]]
#AVG.SOIL.TEMP = data$AVG.SOIL.TEMP[subset,1,8]-273.15 #Polygon Average Soil Temperature
#AVG.SOIL.TEMP.10cm = aggregate(AVG.SOIL.TEMP,by=list(aggrlist),mean)[[2]]
#------------------------------------------------------------------------------------------------#
#CAN.AIR.TEMP.AVG = (data$AVG.CAN.TEMP[subset])-273.15 # convert to celcius
#SWC.AVG = data$AVG.SOIL.WATER[subset,1,9] # soil moisture at 5cm
###########################################################################################################
##################################### COMPONENT FLUXES ####################################################
pdf(paste(out.dir,"/","ED2_",year,"_Site_Avg_Fluxes.pdf",sep=""),width=12,height=11,
onefile=TRUE)
par(mfrow=c(3,2),mar=c(5,5.7,0.9,0.5),mgp=c(3.3,1.5,0),oma=c(0,0,3,0)) # B, L, T, R
#==========================================================================================================
# GPP
#==========================================================================================================
ylim = range(c(GPP.AVG.ll,GPP.AVG.ul),na.rm=TRUE) # define Y lims
plot(start_day:end_day,GPP.AVG.mn,xlab='',ylab=expression(paste(GPP," (gC",~m^{-2},")")),
ylim=ylim,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(v=phenology[,2],lty=2,lwd=1.5,col="green3")
abline(v=phenology[,3],lty=2,lwd=1.5,col="brown")
polygon(c(polyx, rev(polyx)), c(GPP.AVG.ul, rev(GPP.AVG.ll)), col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,GPP.AVG.mn,lty=1,col="black")
points(start_day:end_day,GPP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
if (is.nan(mean(chk))==0) {
legend("topleft",legend=c("Greenup","Leaf Off"),bty="n",
lty=2,lwd=1.5,col=c("green3","brown"),cex=2)
#text(37,max(GPP.AVG)-4,"GS Length:",cex=2)
#text(35,max(GPP.AVG)-5,paste(round(GS_LENGTH,2)," days",sep=""),
# cex=2 )
}
abline(h=0,lty=2,lwd=1.5,col="black")
rm(chk)
box(lwd=2.2)
#==========================================================================================================
# NPP
#==========================================================================================================
ylim = range(c(NPP.AVG.ll,NPP.AVG.ul),na.rm=TRUE) # define Y lims
plot(start_day:end_day,NPP.AVG.mn,xlab='',ylab=expression(paste(NPP," (gC",~m^{-2},")")),
pch=21,col="black", bg="black",ylim=ylim,
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(NPP.AVG.ul, rev(NPP.AVG.ll)),
col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,NPP.AVG.mn,lty=1,col="black")
points(start_day:end_day,NPP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Plant Resp
#==========================================================================================================
ylim = range(c(PLANT.RESP.AVG.ll,PLANT.RESP.AVG.ul),na.rm=TRUE) # define Y lims
plot(start_day:end_day,PLANT.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[a]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(PLANT.RESP.AVG.ul, rev(PLANT.RESP.AVG.ll)),
col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,PLANT.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,PLANT.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Heterotrophic Resp
#==========================================================================================================
ylim = range(c(HTROPH.RESP.AVG.ll,HTROPH.RESP.AVG.ul),na.rm=TRUE) # define Y lims
plot(start_day:end_day,HTROPH.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[h]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(HTROPH.RESP.AVG.ul, rev(HTROPH.RESP.AVG.ll)),
col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,HTROPH.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,HTROPH.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Reco
#==========================================================================================================
ylim = range(c(Reco.AVG.ll,Reco.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,Reco.AVG.mn,xlab=paste("DOY",as.character(year)),ylim=ylim,
ylab=expression(paste(italic(R)[eco.]," (gC",~m^{-2},")")),
pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(Reco.AVG.ul, rev(Reco.AVG.ll)),col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,Reco.AVG.mn,lty=1,col="black")
points(start_day:end_day,Reco.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# NEE
#==========================================================================================================
ylim = range(c(NEE.AVG.ll,NEE.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,NEE.AVG.mn,xlab=paste("DOY",as.character(year)),ylim=ylim,
ylab=expression(paste(NEE," (gC",~m^{-2},")")),
pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(NEE.AVG.ul, rev(NEE.AVG.ll)),col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,NEE.AVG.mn,lty=1,col="black")
points(start_day:end_day,NEE.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
# add single title to plot
mtext("Site Component Fluxes", side=3, line=1, outer=TRUE, cex=1.5, font=2)
######################################## RESPIRATION COMPONENTS ###########################################
par(mfrow=c(3,2),mar=c(5,5.7,0.9,0.5),mgp=c(3.3,1.5,0),oma=c(0,0,3,0)) # B, L, T, R
#==========================================================================================================
# Plant resp
#==========================================================================================================
ylim = range(c(PLANT.RESP.AVG.ll,PLANT.RESP.AVG.ul),na.rm=TRUE) # define Y lims
plot(start_day:end_day,PLANT.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[a]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx, rev(polyx)), c(PLANT.RESP.AVG.ul, rev(PLANT.RESP.AVG.ll)),
col="light gray", border="dark grey",lty=2)
lines(start_day:end_day,PLANT.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,PLANT.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Leaf resp
#==========================================================================================================
ylim = range(c(LEAF.RESP.AVG.ll,LEAF.RESP.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,LEAF.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[leaf]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(LEAF.RESP.AVG.ul,rev(LEAF.RESP.AVG.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,LEAF.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,LEAF.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Root resp
#==========================================================================================================
ylim = range(c(ROOT.RESP.AVG.ll,ROOT.RESP.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,ROOT.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[root]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(ROOT.RESP.AVG.ul,rev(ROOT.RESP.AVG.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,ROOT.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,ROOT.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Growth Resp
#==========================================================================================================
ylim = range(c(GROWTH.RESP.AVG.ll,GROWTH.RESP.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,GROWTH.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[growth]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(GROWTH.RESP.AVG.ul,rev(GROWTH.RESP.AVG.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,GROWTH.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,GROWTH.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Storage Resp
#==========================================================================================================
ylim = range(c(STORAGE.RESP.AVG.ll,STORAGE.RESP.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,STORAGE.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(R)[growth]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(STORAGE.RESP.AVG.ul,rev(STORAGE.RESP.AVG.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,STORAGE.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,STORAGE.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Vleaf resp
#==========================================================================================================
ylim = range(c(VLEAF.RESP.AVG.ll,VLEAF.RESP.AVG.ul),na.rm=TRUE)
plot(start_day:end_day,VLEAF.RESP.AVG.mn,xlab='',ylim=ylim,
ylab=expression(paste(italic(VR)[leaf]," (gC",~m^{-2},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(VLEAF.RESP.AVG.ul,rev(VLEAF.RESP.AVG.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,VLEAF.RESP.AVG.mn,lty=1,col="black")
points(start_day:end_day,VLEAF.RESP.AVG.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
# Plot title
mtext("Site Component Respiration ", side=3, line=1, outer=TRUE, cex=1.5, font=2)
########################################### Energy Balance ################################################
par(mfrow=c(3,2),mar=c(5,5.7,0.9,0.5),mgp=c(3.3,1.5,0),oma=c(0,0,3,0)) # B, L, T, R
#==========================================================================================================
# Polygon vegetation/leaf vapor flux
#==========================================================================================================
ylim = range(c(AVG.VAPOR.LC.ll,AVG.VAPOR.LC.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.VAPOR.LC.mn,xlab='',ylim=ylim,
ylab=expression(paste(V.~Flux[veg~to~CAS]," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.LC.ul,rev(AVG.VAPOR.LC.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.VAPOR.LC.mn,lty=1,col="black")
points(start_day:end_day,AVG.VAPOR.LC.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon moisture flux ground to canopy air
#==========================================================================================================
ylim = range(c(AVG.VAPOR.GC.ll,AVG.VAPOR.GC.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.VAPOR.GC.mn,xlab='',ylim=ylim,
ylab=expression(paste(V.~Flux[ground~to~CAS]," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.GC.ll,rev(AVG.VAPOR.GC.ul)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.VAPOR.GC.mn,lty=1,col="black")
points(start_day:end_day,AVG.VAPOR.GC.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon vapor flux atmosphere to canopy air
#==========================================================================================================
ylim = range(c(AVG.VAPOR.AC.ll,AVG.VAPOR.AC.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.VAPOR.AC.mn,xlab='',ylim=ylim,
ylab=expression(paste(V.~Flux[atm.~to~CAS]," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.AC.ul,rev(AVG.VAPOR.AC.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.VAPOR.AC.mn,lty=1,col="black")
points(start_day:end_day,AVG.VAPOR.AC.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon transpiration from stomata to canopy air spac
#==========================================================================================================
ylim = range(c(AVG.TRANSP.ll,AVG.TRANSP.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.TRANSP.mn,xlab='',ylim=ylim,
ylab=expression(paste(Transpiration," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.TRANSP.ul,rev(AVG.TRANSP.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.TRANSP.mn,lty=1,col="black")
points(start_day:end_day,AVG.TRANSP.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon averaged evap/dew from ground and leaves to C
#==========================================================================================================
ylim = range(c(AVG.EVAP.ll,AVG.EVAP.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.EVAP.mn,xlab='',ylim=ylim,
ylab=expression(paste(Evaporation," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.EVAP.ul,rev(AVG.EVAP.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.EVAP.mn,lty=1,col="black")
points(start_day:end_day,AVG.EVAP.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon Average Specific Humidity of Canopy Air
#==========================================================================================================
ylim = range(c(AVG.CAN.SHV.ll,AVG.CAN.SHV.ul),na.rm=TRUE)
plot(start_day:end_day,AVG.CAN.SHV.mn,xlab='',ylim=ylim,
ylab=expression(paste(Sp.Humidity[CAS]," (kg",~kg^{-1},")")),pch=21,col="black",
bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
polygon(c(polyx,rev(polyx)),c(AVG.CAN.SHV.ul,rev(AVG.CAN.SHV.ll)),col="light gray",
border="dark grey",lty=2)
lines(start_day:end_day,AVG.CAN.SHV.mn,lty=1,col="black")
points(start_day:end_day,AVG.CAN.SHV.mn,pch=21,col="black", bg="black",
cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
abline(h=0,lty=2,lwd=1.5,col="black")
box(lwd=2.2)
#==========================================================================================================
# Polygon wood to canopy air vapor flux
#==========================================================================================================
# ylim = range(c(AVG.VAPOR.WC.ll,AVG.VAPOR.WC.ul),na.rm=TRUE)
# plot(start_day:end_day,AVG.VAPOR.WC.mn,xlab='',ylim=ylim,
# ylab=expression(paste(italic(Vapor Flux)[wood]," (kg",~m^{-2},~s^{-1}")")),pch=21,col="black",
# bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
# polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.WC.ul,rev(AVG.VAPOR.WC.ll)),col="light gray",
# border="dark grey",lty=2)
# lines(start_day:end_day,AVG.VAPOR.WC.mn,lty=1,col="black")
# points(start_day:end_day,AVG.VAPOR.WC.mn,pch=21,col="black", bg="black",
# cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex)
# abline(h=0,lty=2,lwd=1.5,col="black")
# box(lwd=2.2)
# Plot title
mtext("Site Vapor Fluxes ", side=3, line=1, outer=TRUE, cex=1.5, font=2)
##################################### MET ##########################################
#plot(start_day:end_day,AVG.SOIL.TEMP.5cm)
#plot(start_day:end_day,AVG.SOIL.TEMP.10cm)
#mtext("Site Soil Temperatures ", side=3, line=1, outer=TRUE, cex=1.5, font=2)
dev.off() # Close PDF
} # END for loop
}
#----------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------#
# Plot monthly
plot_monthly = function(model.run,in.dir,out.dir){
# UNDER DEVELOPMENT
#--------------------------------------------------------------------------------------------------#
when = NULL
pft.names = c("C4 Grass","Early Tropical","Mid Tropical","Late Tropical"
,"C3 Grass","North Pine","South Pine","Late Conifer"
,"Early Temperate","Mid Temperate","Late Temperate"
,"C3 Pasture","C3 Crop","C4 Pasture","C4 Crop","Subtropical C3 grass ",
"Araucaria","Total")
n.pft = length(pft.names) - 1
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------#
# Loop over time. #
#----------------------------------------------------------------------------------------------#
i = 1 # counter printing variable names to log file
for (year in start_year:end_year) {
message(paste("--- PROCESSING YEAR: ",year," ---"))
#--------------------------------------------------------------------------------------------#
if (year == start_year){
month.begin = IMONTHA
}else{
month.begin = 1
} #end if
if (year == end_year){
month.end = IMONTHZ
}else{
month.end = 12
} #end if
#n.months = (as.numeric(month.end)-as.numeric(month.begin))+1
n.months = -12+as.numeric(month.end)+(12-as.numeric(month.begin)+1)
nplant.pft = matrix(0,nrow=n.months,ncol=n.pft+1)
lai.pft = matrix(0,nrow=n.months,ncol=n.pft+1)
agb.pft = matrix(0,nrow=n.months,ncol=n.pft+1)
coh.area = list()
coh.age = list()
coh.dbh = list()
coh.pft = list()
coh.nplant = list()
coh.height = list()
coh.gpp = list()
coh.resp = list()
coh.npp = list()
#--------------------------------------------------------------------------------------------#
j = 0 # counter for month in output
for (mm in month.begin:month.end) {
j = j+1
mth = toupper(mon2mmm(mm,lang="English")) #<--- convert month num to 3 letter name
message(paste("-------- PROCESSING MONTH: ",mth))
when.now = chron(dates=paste(mm,1,year,sep="/"),times=paste(0,0,0,sep=":"))
when = c(when,when.now)
#---------------- Load ED2 Model Output (hdf5) ----------------------------------------------#
filename = list.files(in.dir,full.names=TRUE,
pattern=paste('.*-E-', year, '-.*.h5', sep=''))[1]
if (is.na(filename)==1) {
break
}else{
data <- hdf5load(filename, load = FALSE,tidy=TRUE) # LOAD ED2 OUTPUT
}
var_names = summary(data) # View info about vars. For debugging
if (i==1){
print("Mean Monthly Output Variables (IMOUTPUT)")
print(var_names)
print("")
} # end of complex if/then
#--------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Get desired PFT-level variables #
#------------------------------------------------------------------------------------#
lai.pft [j,1:n.pft] = data$MMEAN.LAI.PFT
message(data.frame(data$MMEAN.LAI.PFT))
agb.pft [j,1:n.pft] = data$AGB.PFT
#------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Define the global number of patches and cohorts. #
#------------------------------------------------------------------------------------#
npatches.global = data$NPATCHES.GLOBAL
ncohorts.global = data$NCOHORTS.GLOBAL
#----- Find the indices for the beginning and end of each patch. --------------------#
ncohorts = diff(c(data$PACO.ID,ncohorts.global+1))
aco = data$PACO.ID
zco = data$PACO.ID + ncohorts - 1
#------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Extend the area and age of each patch so it has the same length as the #
# cohorts. #
#------------------------------------------------------------------------------------#
coh.area[[j]] = rep(data$AREA,times=ncohorts)
coh.age [[j]] = rep(data$AGE ,times=ncohorts)
#------------------------------------------------------------------------------------#
#----- Grab other cohort-level variables. -------------------------------------------#
coh.pft [[j]] = data$PFT
message(data$PFT)
coh.dbh [[j]] = data$DBH
coh.nplant [[j]] = data$NPLANT*coh.area[[j]]
coh.height [[j]] = data$HITE
coh.gpp [[j]] = data$MMEAN.GPP.CO
coh.resp [[j]] = ( data$MMEAN.LEAF.RESP.CO
+ data$MMEAN.ROOT.RESP.CO
+ data$MMEAN.GROWTH.RESP.CO
+ data$MMEAN.STORAGE.RESP.CO
+ data$MMEAN.VLEAF.RESP.CO )
coh.npp [[j]] = coh.gpp[[j]] - coh.resp[[j]] # NPP
#------------------------------------------------------------------------------------#
i=i+1 # counter for printing variable names to log file
} # end for loop for importing monthly data for year x
#------------------------------------------------------------------------------------------#
# Find which PFTs we use, and set any NA to zero (in case a PFT goes extinct). #
#------------------------------------------------------------------------------------------#
tot = n.pft + 1 # <---- total cohort
agb.pft [,tot] = rowSums(agb.pft [,1:n.pft])
lai.pft [,tot] = rowSums(lai.pft [,1:n.pft])
#message(lai.pft)
#lai.pft
pft.use = which(colSums(agb.pft) > 0)
#------------------------------------------------------------------------------------------#
#==========================================================================================#
# Figures #
#==========================================================================================#
# Plot the LAI of all PFTs together. #
#------------------------------------------------------------------------------------------#
pdf(paste(out.dir,"/","ED2_",year,"_Monthly_Mean_Output.pdf",sep=""),width=10,height=10,
onefile=TRUE)
#----- Find the limits and expand the range so the legend fits. ---------------------------#
lai.ylim = range(lai.pft,na.rm=TRUE)
lai.ylim[2] = lai.ylim[2] + 0.2 * (lai.ylim[2] - lai.ylim[1])
lai.title = paste("Leaf Area Index","US-WCr",sep=" - ") # <--- Site needs to be dynamic
lai.xlab = "Month"
lai.ylab = expression(paste("LAI (",m^{2}~m^{-2},")")) #"LAI [m2/m2]"
plot(x=when,y=lai.pft[,1],type="n",ylim=lai.ylim,xaxt="n"
,main=lai.title,xlab=lai.xlab,ylab=lai.ylab)
dev.off()
} # end for loop
} # end of function
#----------------------------------------------------------------------------------------------------#
|
context("occ_facet")
test_that("occ_facet works", {
skip_on_cran()
aa <- occ_facet(facet = "country")
expect_is(aa, "list")
expect_named(aa, "country")
expect_named(aa$country, c('name', 'count'))
# facetMincount
bb <- occ_facet(facet = "country", facetMincount = 30000000L)
expect_is(bb, "list")
expect_named(bb, "country")
expect_named(bb$country, c('name', 'count'))
expect_lt(NROW(bb$country), NROW(aa$country))
})
test_that("occ_facet paging works", {
skip_on_cran()
aa <- occ_facet(
facet = c("country", "basisOfRecord", "hasCoordinate"),
country.facetLimit = 3,
basisOfRecord.facetLimit = 6
)
expect_is(aa, "list")
expect_equal(names(aa), c("basisOfRecord", "country", "hasCoordinate"))
expect_named(aa$country, c('name', 'count'))
expect_named(aa$basisOfRecord, c('name', 'count'))
expect_named(aa$hasCoordinate, c('name', 'count'))
expect_equal(NROW(aa$country), 3)
expect_equal(NROW(aa$basisOfRecord), 6)
})
test_that("occ_facet fails well", {
skip_on_cran()
expect_error(
occ_facet(),
"argument \"facet\" is missing"
)
# unknown facet variable
expect_equal(
length(occ_facet(facet = "asdfasdf")),
0
)
})
| /tests/testthat/test-occ_facet.R | permissive | MirzaCengic/rgbif | R | false | false | 1,209 | r | context("occ_facet")
test_that("occ_facet works", {
skip_on_cran()
aa <- occ_facet(facet = "country")
expect_is(aa, "list")
expect_named(aa, "country")
expect_named(aa$country, c('name', 'count'))
# facetMincount
bb <- occ_facet(facet = "country", facetMincount = 30000000L)
expect_is(bb, "list")
expect_named(bb, "country")
expect_named(bb$country, c('name', 'count'))
expect_lt(NROW(bb$country), NROW(aa$country))
})
test_that("occ_facet paging works", {
skip_on_cran()
aa <- occ_facet(
facet = c("country", "basisOfRecord", "hasCoordinate"),
country.facetLimit = 3,
basisOfRecord.facetLimit = 6
)
expect_is(aa, "list")
expect_equal(names(aa), c("basisOfRecord", "country", "hasCoordinate"))
expect_named(aa$country, c('name', 'count'))
expect_named(aa$basisOfRecord, c('name', 'count'))
expect_named(aa$hasCoordinate, c('name', 'count'))
expect_equal(NROW(aa$country), 3)
expect_equal(NROW(aa$basisOfRecord), 6)
})
test_that("occ_facet fails well", {
skip_on_cran()
expect_error(
occ_facet(),
"argument \"facet\" is missing"
)
# unknown facet variable
expect_equal(
length(occ_facet(facet = "asdfasdf")),
0
)
})
|
## wd etc ----
require(data.table)
require(stringr)
require(lubridate)
require(zoo)
require(lightgbm)
## data: train and test ----
xtrain <- fread('../input/air_visit_data.csv')
xtest <- fread('../input/sample_submission.csv')
# align the columns (test has the store id and date concatenated)
xtest$air_store_id <- str_sub(xtest$id, 1,-12)
xtest$visit_date <- str_sub(xtest$id, -10)
xtest$id <- NULL
# format
xtrain$visit_date <- as.Date(xtrain$visit_date)
xtest$visit_date <- as.Date(xtest$visit_date)
# combine
xtrain <- rbind(xtrain, xtest)
## reservations: air ----
reserve_air <- fread('../input/air_reserve.csv')
# convert to datetime
reserve_air$visit_datetime <- parse_date_time(reserve_air$visit_datetime, orders = '%Y-%m-%d H:M:S' )
reserve_air$reserve_datetime <- parse_date_time(reserve_air$reserve_datetime, orders = '%Y-%m-%d H:M:S' )
# time ahead
reserve_air$time_ahead <- as.double(reserve_air$visit_datetime - reserve_air$reserve_datetime)/3600
# round to day
reserve_air$visit_date <- as.Date(reserve_air$visit_datetime)
reserve_air$reserve_datetime <- as.Date(reserve_air$visit_datetime)
# aggregate to id x date combo
res_air_agg <- reserve_air[ j = list(air_res_visitors = sum(reserve_visitors),
air_mean_time_ahead = round(mean(time_ahead),2)) ,
by = list(air_store_id, visit_date)]
rm(reserve_air)
## store info: air ----
xstore <- fread('../input/air_store_info.csv')
xstore$air_genre_name <- factor(xstore$air_genre_name)
levels(xstore$air_genre_name) <- 1:nlevels(xstore$air_genre_name)
xstore$air_genre_name <- as.integer(xstore$air_genre_name)
xstore$air_area_name <- factor(xstore$air_area_name)
levels(xstore$air_area_name) <- 1:nlevels(xstore$air_area_name)
xstore$air_area_name <- as.integer(xstore$air_area_name)
## date info ---
xdate <- fread('../input/date_info.csv')
xdate$day_of_week <- NULL
xdate$calendar_date <- as.Date(xdate$calendar_date)
## data aggregation ----
xtrain <- merge(xtrain, res_air_agg, all.x = T)
xtrain <- merge(xtrain, xstore, all.x = T, by = 'air_store_id' )
xtrain <- merge(xtrain, xdate, by.x = 'visit_date', by.y = 'calendar_date')
rm(res_air_agg, xstore, xdate)
xtrain[is.na(xtrain)] <- 0
# xtrain <- xtrain[order(air_store_id, visit_dates)]
## FE ----
# holiday in the last 3 days
xtrain[ , `:=`(h3a = rollapply(holiday_flg, width = 3, FUN = function(s) sign(sum(s, na.rm = T)),
partial = TRUE, fill = 0, align = 'right') ),
by = c('air_store_id')]
# visits
xtrain[ , `:=`(vis14 = rollapply(log1p(visitors), width = 39, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(vis21 = rollapply(log1p(visitors), width = 46, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(vis28 = rollapply(log1p(visitors), width = 60, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(vis35 = rollapply(log1p(visitors), width = 74, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(vLag1 = round((vis21 - vis14)/7,2))]
xtrain[ , `:=`(vLag2 = round((vis28 - vis14)/21,2))]
xtrain[ , `:=`(vLag3 = round((vis35 - vis14)/35,2))]
xtrain[ , vis14 := NULL, with = TRUE]
xtrain[ , vis21 := NULL, with = TRUE]
xtrain[ , vis28 := NULL, with = TRUE]
xtrain[ , vis35 := NULL, with = TRUE]
# reservations
xtrain[ , `:=`(res7 = rollapply(log1p(air_res_visitors), width = 7, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(res14 = rollapply(log1p(air_res_visitors), width = 14, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(res21 = rollapply(log1p(air_res_visitors), width = 21, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(res28 = rollapply(log1p(air_res_visitors), width = 28, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
# separate
xtest <- xtrain[visitors == 0]
xtrain <- xtrain[visitors > 0]
## lgbm - validation ----
x0 <- xtrain[visit_date <= '2017-03-09' & visit_date > '2016-04-01']
x1 <- xtrain[visit_date > '2017-03-09']
y0 <- log1p(x0$visitors)
y1 <- log1p(x1$visitors)
mx1 <- as.integer(max(x0$visit_date) -min(x0$visit_date) )
mx2 <- as.integer(x0$visit_date -min(x0$visit_date))
x0$visit_date <- x0$air_store_id <- x0$visitors <- NULL
x1$visit_date <- x1$air_store_id <- x1$visitors <- NULL
cat_features <- c('air_genre_name', 'air_area_name')
d0 <- lgb.Dataset(as.matrix(x0), label = y0,
categorical_feature = cat_features,
free_raw_data = TRUE)
d1 <- lgb.Dataset(as.matrix(x1), label = y1,
categorical_feature = cat_features,
free_raw_data = TRUE)
# x0$wgt <- ((1 + mx2)/(1 + mx1))^5
params <- list(objective = 'regression', metric = 'mse', max_depth = 7,
feature_fraction = 0.7,
bagging_fraction = 0.8,
min_data_in_leaf = 30,
learning_rate = 0.02,
num_threads = 4,
weight = 'wgt')
ntrx <- 1000
valids <- list(valid = d1)
model <- lgb.train(params = params, data = d0, valids = valids, nrounds = ntrx,
early_stopping_rounds = 10)
pred_val <- predict(model, as.matrix(x1))
print( paste('validation error:', round(sd(pred_val - y1),4), sep = ' ' ))
# 0.5869
ntrx <- model$best_iter
## lgbm - full ----
x0 <- xtrain
x1 <- xtest
y0 <- log1p(x0$visitors)
x0$visit_date <- x0$air_store_id <- x0$visitors <- NULL
x1$visit_date <- x1$air_store_id <- x1$visitors <- NULL
cat_features <- c('air_genre_name', 'air_area_name')
d0 <- lgb.Dataset(as.matrix(x0), label = y0,
categorical_feature = cat_features,
free_raw_data = FALSE)
params <- list(objective = 'regression', metric = 'mse', max_depth = 7,
feature_fraction = 0.7,
bagging_fraction = 0.8,
min_data_in_leaf = 30,
learning_rate = 0.02,
num_threads = 4,
weight = 'wgt')
model <- lgb.train(params = params, data = d0, nrounds = ntrx)
pred_full <- predict(model, as.matrix(x1))
prx <- data.frame(id = paste(xtest$air_store_id, xtest$visit_date , sep = '_') ,
visitors = expm1(pred_full))
write.csv(prx, 'xgb_3011.csv', row.names = F, quote = F) | /recruit_restaurant/prophet.R | no_license | bweiher/projs | R | false | false | 7,038 | r | ## wd etc ----
require(data.table)
require(stringr)
require(lubridate)
require(zoo)
require(lightgbm)
## data: train and test ----
xtrain <- fread('../input/air_visit_data.csv')
xtest <- fread('../input/sample_submission.csv')
# align the columns (test has the store id and date concatenated)
xtest$air_store_id <- str_sub(xtest$id, 1,-12)
xtest$visit_date <- str_sub(xtest$id, -10)
xtest$id <- NULL
# format
xtrain$visit_date <- as.Date(xtrain$visit_date)
xtest$visit_date <- as.Date(xtest$visit_date)
# combine
xtrain <- rbind(xtrain, xtest)
## reservations: air ----
reserve_air <- fread('../input/air_reserve.csv')
# convert to datetime
reserve_air$visit_datetime <- parse_date_time(reserve_air$visit_datetime, orders = '%Y-%m-%d H:M:S' )
reserve_air$reserve_datetime <- parse_date_time(reserve_air$reserve_datetime, orders = '%Y-%m-%d H:M:S' )
# time ahead
reserve_air$time_ahead <- as.double(reserve_air$visit_datetime - reserve_air$reserve_datetime)/3600
# round to day
reserve_air$visit_date <- as.Date(reserve_air$visit_datetime)
reserve_air$reserve_datetime <- as.Date(reserve_air$visit_datetime)
# aggregate to id x date combo
res_air_agg <- reserve_air[ j = list(air_res_visitors = sum(reserve_visitors),
air_mean_time_ahead = round(mean(time_ahead),2)) ,
by = list(air_store_id, visit_date)]
rm(reserve_air)
## store info: air ----
xstore <- fread('../input/air_store_info.csv')
xstore$air_genre_name <- factor(xstore$air_genre_name)
levels(xstore$air_genre_name) <- 1:nlevels(xstore$air_genre_name)
xstore$air_genre_name <- as.integer(xstore$air_genre_name)
xstore$air_area_name <- factor(xstore$air_area_name)
levels(xstore$air_area_name) <- 1:nlevels(xstore$air_area_name)
xstore$air_area_name <- as.integer(xstore$air_area_name)
## date info ---
xdate <- fread('../input/date_info.csv')
xdate$day_of_week <- NULL
xdate$calendar_date <- as.Date(xdate$calendar_date)
## data aggregation ----
xtrain <- merge(xtrain, res_air_agg, all.x = T)
xtrain <- merge(xtrain, xstore, all.x = T, by = 'air_store_id' )
xtrain <- merge(xtrain, xdate, by.x = 'visit_date', by.y = 'calendar_date')
rm(res_air_agg, xstore, xdate)
xtrain[is.na(xtrain)] <- 0
# xtrain <- xtrain[order(air_store_id, visit_dates)]
## FE ----
# holiday in the last 3 days
xtrain[ , `:=`(h3a = rollapply(holiday_flg, width = 3, FUN = function(s) sign(sum(s, na.rm = T)),
partial = TRUE, fill = 0, align = 'right') ),
by = c('air_store_id')]
# visits
xtrain[ , `:=`(vis14 = rollapply(log1p(visitors), width = 39, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(vis21 = rollapply(log1p(visitors), width = 46, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(vis28 = rollapply(log1p(visitors), width = 60, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(vis35 = rollapply(log1p(visitors), width = 74, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(vLag1 = round((vis21 - vis14)/7,2))]
xtrain[ , `:=`(vLag2 = round((vis28 - vis14)/21,2))]
xtrain[ , `:=`(vLag3 = round((vis35 - vis14)/35,2))]
xtrain[ , vis14 := NULL, with = TRUE]
xtrain[ , vis21 := NULL, with = TRUE]
xtrain[ , vis28 := NULL, with = TRUE]
xtrain[ , vis35 := NULL, with = TRUE]
# reservations
xtrain[ , `:=`(res7 = rollapply(log1p(air_res_visitors), width = 7, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(res14 = rollapply(log1p(air_res_visitors), width = 14, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(res21 = rollapply(log1p(air_res_visitors), width = 21, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
xtrain[ , `:=`(res28 = rollapply(log1p(air_res_visitors), width = 28, FUN = function(s) sum(s, na.rm = T),
partial = TRUE, fill = 0, align = 'right') ) ,
by = c('air_store_id')]
# separate
xtest <- xtrain[visitors == 0]
xtrain <- xtrain[visitors > 0]
## lgbm - validation ----
x0 <- xtrain[visit_date <= '2017-03-09' & visit_date > '2016-04-01']
x1 <- xtrain[visit_date > '2017-03-09']
y0 <- log1p(x0$visitors)
y1 <- log1p(x1$visitors)
mx1 <- as.integer(max(x0$visit_date) -min(x0$visit_date) )
mx2 <- as.integer(x0$visit_date -min(x0$visit_date))
x0$visit_date <- x0$air_store_id <- x0$visitors <- NULL
x1$visit_date <- x1$air_store_id <- x1$visitors <- NULL
cat_features <- c('air_genre_name', 'air_area_name')
d0 <- lgb.Dataset(as.matrix(x0), label = y0,
categorical_feature = cat_features,
free_raw_data = TRUE)
d1 <- lgb.Dataset(as.matrix(x1), label = y1,
categorical_feature = cat_features,
free_raw_data = TRUE)
# x0$wgt <- ((1 + mx2)/(1 + mx1))^5
params <- list(objective = 'regression', metric = 'mse', max_depth = 7,
feature_fraction = 0.7,
bagging_fraction = 0.8,
min_data_in_leaf = 30,
learning_rate = 0.02,
num_threads = 4,
weight = 'wgt')
ntrx <- 1000
valids <- list(valid = d1)
model <- lgb.train(params = params, data = d0, valids = valids, nrounds = ntrx,
early_stopping_rounds = 10)
pred_val <- predict(model, as.matrix(x1))
print( paste('validation error:', round(sd(pred_val - y1),4), sep = ' ' ))
# 0.5869
ntrx <- model$best_iter
## lgbm - full ----
x0 <- xtrain
x1 <- xtest
y0 <- log1p(x0$visitors)
x0$visit_date <- x0$air_store_id <- x0$visitors <- NULL
x1$visit_date <- x1$air_store_id <- x1$visitors <- NULL
cat_features <- c('air_genre_name', 'air_area_name')
d0 <- lgb.Dataset(as.matrix(x0), label = y0,
categorical_feature = cat_features,
free_raw_data = FALSE)
params <- list(objective = 'regression', metric = 'mse', max_depth = 7,
feature_fraction = 0.7,
bagging_fraction = 0.8,
min_data_in_leaf = 30,
learning_rate = 0.02,
num_threads = 4,
weight = 'wgt')
model <- lgb.train(params = params, data = d0, nrounds = ntrx)
pred_full <- predict(model, as.matrix(x1))
prx <- data.frame(id = paste(xtest$air_store_id, xtest$visit_date , sep = '_') ,
visitors = expm1(pred_full))
write.csv(prx, 'xgb_3011.csv', row.names = F, quote = F) |
#' List of feather icons
#'
#' Simply beautiful open source icons
#'
#' @format A vector of 266 icons
#' @source \url{https://feathericons.com/}
"icons_list"
| /R/icons.R | permissive | ColinFay/feathericons | R | false | false | 159 | r | #' List of feather icons
#'
#' Simply beautiful open source icons
#'
#' @format A vector of 266 icons
#' @source \url{https://feathericons.com/}
"icons_list"
|
#install.packages("normalregMix_1.0.tar.gz", repos = NULL, type="source")
library(snow)
library(doParallel)
library(Rmpi)
library(normalregMix)
## Generates EM test result according to the dimension of X
PerformEMtest <- function (sample, q, an, m = 1, z = NULL, parallel) {
library(doParallel) # workers might need information
library(normalregMix) # workers might need information
testMode(TRUE) # for replication
n <- as.integer(length(sample)/(q+1))
y <- sample[1:n] # first n elements represents y data
if (q <= 0)
return (normalmixMEMtest(y, m = m, z = z, an = an, crit.method = "asy",
parallel = parallel))
# the other part consists of n chuck of q-length x data
x <- matrix(sample[(n+1):length(sample)], nrow = n, byrow = TRUE)
return (regmixMEMtest(y, x, m = m, z = z, an = an, crit.method = "asy",
parallel = parallel))
}
## Returns frequency that the null H0: m=1 is rejected
# out of replications of given an and data that consists of columns of samples
PerformEMtests <- function (an, data, q = 1, m = 1,
parallel, rmpi) {
if (rmpi)
{
# need to transform data (matrix) to a list first; each element is a column (y x_1' x_2' ... x_n')'
ldata <- lapply(seq_len(ncol(data)), function(i) data[,i])
out <- mpi.applyLB(ldata, PerformEMtest, q = q, an = an, m = m, z = NULL,
parallel = parallel)
}
else
out <- apply(data, 2, PerformEMtest, q = q, an = an, m = m, z = NULL,
parallel = parallel)
pvals <- sapply(out, "[[", "pvals")
print(list(an = an, reject.one.K2 = mean(pvals[2,] < 0.01), reject.one.K3 = mean(pvals[3,] < 0.01),
reject.five.K2 = mean(pvals[2,] < 0.05), reject.five.K3 = mean(pvals[3,] < 0.05)))
return (list(reject.one.K2 = mean(pvals[2,] < 0.01), reject.one.K3 = mean(pvals[3,] < 0.01),
reject.five.K2 = mean(pvals[2,] < 0.05), reject.five.K3 = mean(pvals[3,] < 0.05)))
}
# Returns data set of rejection frequency rate corresponding to each an,
# the value of optimal an that is closest to given sig. level (0.05 by default), and
# the frequency of rejection according to the optimal an.
FindOptimal1vs2an <- function (phidatapair, anset, m = 1,
parallel = 0, rmpi = TRUE) {
phi <- phidatapair$phi
data <- phidatapair$data
q <- length(phi$betaset)
# loop over each a_n.
output <- lapply(anset, PerformEMtests, data = data, q = q, m = m,
parallel = parallel, rmpi = rmpi)
freqs.one.K2 <- sapply(output, "[[", "reject.one.K2")
freqs.one.K3 <- sapply(output, "[[", "reject.one.K3")
freqs.five.K2 <- sapply(output, "[[", "reject.five.K2")
freqs.five.K3 <- sapply(output, "[[", "reject.five.K3")
# show me what you've got.
table <- data.frame(anset, freqs.one.K2, freqs.one.K3, freqs.five.K2, freqs.five.K3)
colnames(table) <- c("an", "1%, K=2", "1%, K=3", "5%, K=2", "5%, K=3")
optimal.value <- anset[which(abs(freqs.five.K2-0.05)==min(abs(freqs.five.K2-0.05)))][1]
optimal.perf <- freqs.five.K2[which(abs(freqs.five.K2-0.05)==min(abs(freqs.five.K2-0.05)))][1]
print(table)
return (list(optimal.value = optimal.value, optimal.perf = optimal.perf))
}
## Generate a column that represents a sample using phi given.
# each column has the form (y x_1' x_2' ... x_n')'
# where each column x_i represents q data for each observation
GenerateSample <- function(phi) {
n <- phi$n
betaset <- phi$betaset
q <- 0
if (!is.null(betaset))
q <- length(betaset[[1]])
if (q <= 0)
print("Error; in this experiment, dim(X) > 0")
x.sample <- matrix(rnorm(n*q), nrow = n) # each row is one observation
y.sample <- rnorm(n)
y.sample <- apply(x.sample, 1, function(x.obs)
rnorm(1, mean = ((betaset*x.obs)), sd = 1))
sample <- c(y.sample, c(t(x.sample)))
return (sample)
}
## Generate a pair of phi and data, where data is generated by replication.
GeneratePhiDataPair <- function(phi, replication) {
phi <- as.list(phi) # make it atomic.
# data is an (n replication) matrix whose column represents a sample of size n,
data <- do.call(cbind, replicate(replication, GenerateSample(phi = phi), simplify = FALSE))
return (list(phi = phi, data = data))
}
## Create data given phiset and replication
GeneratePhiDataPairs <- function(phiset, replication = 2000) { # original paper has 10000 replications
apply(phiset, 1, GeneratePhiDataPair, replication = replication) # list of (phi data)
}
## Rmpi setup
print("collecting workers..")
mpi.spawn.Rslaves()
mpi.setup.rngstream()
mpi.bcast.Robj2slave(performEMtest, all=TRUE)
print("workers loaded.")
## ====== BEGIN EXPERIMENT ======
## Initiliazation & data generation
# Model specification (per row of the table)
# dim(X) = 3
dimx <- 3
anlb <- 2.4
anub <- 2.7
ancount <- 4
SEED <- 333333
# init.
set.seed(SEED)
anset <- seq(anlb,anub,length.out = ancount)[1:ancount]
betaset <- rep(0.5, dimx)
# generate data
phiset <- expand.grid(n=200)
phiset$betasets <- lapply(1:nrow(phiset), function(j) betaset)
pairs <- GeneratePhiDataPairs(phiset)
## 2. Create a row for a table.
cols <- list()
for (i in 1:length(pairs)) {
phi <- pairs[[i]]$phi
data <- pairs[[i]]$data
n <- phi$n
result <- FindOptimal1vs2an(pairs[[i]], anset = anset, m = 1)
cols[[i]] <- list(n, result$optimal.value, result$optimal.perf)
df <- data.frame(matrix(unlist(cols), ncol = length(cols[[1]]), byrow=T))
colnames(df) <- c("n", "optimal.value", "optimal.perf")
print(df) # save every time
}
print(df)
## ====== END EXPERIMENT ======
# Rmpi termination
mpi.close.Rslaves()
| /experiments/Table4/2ndTrial/1vs2TestFindAnCoarseX3.R | no_license | hkasahar/normalregMix | R | false | false | 5,667 | r | #install.packages("normalregMix_1.0.tar.gz", repos = NULL, type="source")
library(snow)
library(doParallel)
library(Rmpi)
library(normalregMix)
## Generates EM test result according to the dimension of X
PerformEMtest <- function (sample, q, an, m = 1, z = NULL, parallel) {
library(doParallel) # workers might need information
library(normalregMix) # workers might need information
testMode(TRUE) # for replication
n <- as.integer(length(sample)/(q+1))
y <- sample[1:n] # first n elements represents y data
if (q <= 0)
return (normalmixMEMtest(y, m = m, z = z, an = an, crit.method = "asy",
parallel = parallel))
# the other part consists of n chuck of q-length x data
x <- matrix(sample[(n+1):length(sample)], nrow = n, byrow = TRUE)
return (regmixMEMtest(y, x, m = m, z = z, an = an, crit.method = "asy",
parallel = parallel))
}
## Returns frequency that the null H0: m=1 is rejected
# out of replications of given an and data that consists of columns of samples
PerformEMtests <- function (an, data, q = 1, m = 1,
parallel, rmpi) {
if (rmpi)
{
# need to transform data (matrix) to a list first; each element is a column (y x_1' x_2' ... x_n')'
ldata <- lapply(seq_len(ncol(data)), function(i) data[,i])
out <- mpi.applyLB(ldata, PerformEMtest, q = q, an = an, m = m, z = NULL,
parallel = parallel)
}
else
out <- apply(data, 2, PerformEMtest, q = q, an = an, m = m, z = NULL,
parallel = parallel)
pvals <- sapply(out, "[[", "pvals")
print(list(an = an, reject.one.K2 = mean(pvals[2,] < 0.01), reject.one.K3 = mean(pvals[3,] < 0.01),
reject.five.K2 = mean(pvals[2,] < 0.05), reject.five.K3 = mean(pvals[3,] < 0.05)))
return (list(reject.one.K2 = mean(pvals[2,] < 0.01), reject.one.K3 = mean(pvals[3,] < 0.01),
reject.five.K2 = mean(pvals[2,] < 0.05), reject.five.K3 = mean(pvals[3,] < 0.05)))
}
# Returns data set of rejection frequency rate corresponding to each an,
# the value of optimal an that is closest to given sig. level (0.05 by default), and
# the frequency of rejection according to the optimal an.
FindOptimal1vs2an <- function (phidatapair, anset, m = 1,
parallel = 0, rmpi = TRUE) {
phi <- phidatapair$phi
data <- phidatapair$data
q <- length(phi$betaset)
# loop over each a_n.
output <- lapply(anset, PerformEMtests, data = data, q = q, m = m,
parallel = parallel, rmpi = rmpi)
freqs.one.K2 <- sapply(output, "[[", "reject.one.K2")
freqs.one.K3 <- sapply(output, "[[", "reject.one.K3")
freqs.five.K2 <- sapply(output, "[[", "reject.five.K2")
freqs.five.K3 <- sapply(output, "[[", "reject.five.K3")
# show me what you've got.
table <- data.frame(anset, freqs.one.K2, freqs.one.K3, freqs.five.K2, freqs.five.K3)
colnames(table) <- c("an", "1%, K=2", "1%, K=3", "5%, K=2", "5%, K=3")
optimal.value <- anset[which(abs(freqs.five.K2-0.05)==min(abs(freqs.five.K2-0.05)))][1]
optimal.perf <- freqs.five.K2[which(abs(freqs.five.K2-0.05)==min(abs(freqs.five.K2-0.05)))][1]
print(table)
return (list(optimal.value = optimal.value, optimal.perf = optimal.perf))
}
## Generate a column that represents a sample using phi given.
# each column has the form (y x_1' x_2' ... x_n')'
# where each column x_i represents q data for each observation
GenerateSample <- function(phi) {
n <- phi$n
betaset <- phi$betaset
q <- 0
if (!is.null(betaset))
q <- length(betaset[[1]])
if (q <= 0)
print("Error; in this experiment, dim(X) > 0")
x.sample <- matrix(rnorm(n*q), nrow = n) # each row is one observation
y.sample <- rnorm(n)
y.sample <- apply(x.sample, 1, function(x.obs)
rnorm(1, mean = ((betaset*x.obs)), sd = 1))
sample <- c(y.sample, c(t(x.sample)))
return (sample)
}
## Generate a pair of phi and data, where data is generated by replication.
GeneratePhiDataPair <- function(phi, replication) {
phi <- as.list(phi) # make it atomic.
# data is an (n replication) matrix whose column represents a sample of size n,
data <- do.call(cbind, replicate(replication, GenerateSample(phi = phi), simplify = FALSE))
return (list(phi = phi, data = data))
}
## Create data given phiset and replication
GeneratePhiDataPairs <- function(phiset, replication = 2000) { # original paper has 10000 replications
apply(phiset, 1, GeneratePhiDataPair, replication = replication) # list of (phi data)
}
## Rmpi setup
print("collecting workers..")
mpi.spawn.Rslaves()
mpi.setup.rngstream()
mpi.bcast.Robj2slave(performEMtest, all=TRUE)
print("workers loaded.")
## ====== BEGIN EXPERIMENT ======
## Initiliazation & data generation
# Model specification (per row of the table)
# dim(X) = 3
dimx <- 3
anlb <- 2.4
anub <- 2.7
ancount <- 4
SEED <- 333333
# init.
set.seed(SEED)
anset <- seq(anlb,anub,length.out = ancount)[1:ancount]
betaset <- rep(0.5, dimx)
# generate data
phiset <- expand.grid(n=200)
phiset$betasets <- lapply(1:nrow(phiset), function(j) betaset)
pairs <- GeneratePhiDataPairs(phiset)
## 2. Create a row for a table.
cols <- list()
for (i in 1:length(pairs)) {
phi <- pairs[[i]]$phi
data <- pairs[[i]]$data
n <- phi$n
result <- FindOptimal1vs2an(pairs[[i]], anset = anset, m = 1)
cols[[i]] <- list(n, result$optimal.value, result$optimal.perf)
df <- data.frame(matrix(unlist(cols), ncol = length(cols[[1]]), byrow=T))
colnames(df) <- c("n", "optimal.value", "optimal.perf")
print(df) # save every time
}
print(df)
## ====== END EXPERIMENT ======
# Rmpi termination
mpi.close.Rslaves()
|
################################################################################################
## Copyright (C) 2015, Constantinos Tsirogiannis and Brody Sandel.
##
## Email: analekta@gmail.com and brody.sandel@bios.au.dk
##
## This file is part of PhyloMeasures.
##
## PhyloMeasures is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## PhyloMeasures is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with PhyloMeasures. If not, see <http://www.gnu.org/licenses/>
################################################################################################
require(ape)
library(PhyloMeasures)
tree.filename = "test.tre"
input.tree = read.tree(tree.filename)
names = input.tree$tip.label
for( k in 0:length(names))
{
all.samples = t(combn(names,k))
input.data = matrix(0,nrow = nrow(all.samples),ncol = length(names))
for( i in 1: nrow(all.samples) )
for( j in 1: length(names) )
{
if(is.element(names[j], all.samples[i,]))
input.data[i,j] = 1
}
colnames(input.data) = names
##########################################
########## Check PD functions ############
##########################################
results.pd = pd.query(input.tree, input.data, is.standardised = FALSE)
moments.pd = pd.moments(input.tree, c(k))
expectation.check = 0
deviation.check = 0
for(l in 1:length(results.pd))
expectation.check = expectation.check + results.pd[l]
expectation.check = expectation.check/length(results.pd)
for(l in 1:length(results.pd))
{
deviation = results.pd[l]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/length(results.pd))
if( abs(moments.pd[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the PD expectation.")
if( abs(moments.pd[2] - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the PD deviation.")
##########################################
########## Check MPD functions ###########
##########################################
results.mpd = mpd.query(input.tree, input.data, is.standardised = FALSE)
moments.mpd = mpd.moments(input.tree, c(k))
expectation.check = 0
deviation.check = 0
for(l in 1:length(results.mpd))
expectation.check = expectation.check + results.mpd[l]
expectation.check = expectation.check/length(results.mpd)
for(l in 1:length(results.mpd))
{
deviation = results.mpd[l]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/length(results.mpd))
if( abs(moments.mpd[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the MPD expectation.")
if( abs(moments.mpd[2] - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the MPD deviation.")
##########################################
######### Check MNTD functions ###########
##########################################
results.mntd = mntd.query(input.tree, input.data, is.standardised = FALSE)
moments.mntd = mntd.moments(input.tree, c(k))
expectation.check = 0
deviation.check = 0
for(l in 1:length(results.mntd))
expectation.check = expectation.check + results.mntd[l]
expectation.check = expectation.check/length(results.mntd)
for(l in 1:length(results.mntd))
{
deviation = results.mntd[l]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/length(results.mntd))
if( abs(moments.mntd[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the MNTD expectation.")
if( abs(moments.mntd[2] - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the MNTD deviation.")
##########################################
########## Check CAC functions ###########
##########################################
my.chi = 0.76
results.cac = cac.query(input.tree, input.data, my.chi, is.standardised = FALSE)
moments.cac = cac.moments(input.tree, my.chi, c(k))
expectation.check = 0
deviation.check = 0
for(l in 1:length(results.cac))
expectation.check = expectation.check + results.cac[l]
expectation.check = expectation.check/length(results.cac)
for(l in 1:length(results.cac))
{
deviation = results.cac[l]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/length(results.cac))
if( abs(moments.cac[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CAC expectation.")
if( abs(sqrt(moments.cac[2]) - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CAC deviation.")
for( h in 0: length(names) )
{
all.samples.b = t(combn(names,h))
input.data.b = matrix(0,nrow = nrow(all.samples.b),ncol = length(names))
for( i in 1:nrow(all.samples.b) )
for( j in 1: length(names) )
{
if(is.element(names[j], all.samples.b[i,]))
input.data.b[i,j] = 1
}
colnames(input.data.b) = names
input.sizes = matrix(0,nrow = 1, ncol = 2)
input.sizes[1,1] = k
input.sizes[1,2] = h
##########################################
########## Check CBL functions ###########
##########################################
results.cbl = cbl.query(input.tree, input.data, matrix.b = input.data.b, is.standardised = FALSE)
moments.cbl= cbl.moments(input.tree, input.sizes)
expectation.check = 0
deviation.check = 0
for(l in 1:nrow(results.cbl))
for(r in 1:ncol(results.cbl))
expectation.check = expectation.check + results.cbl[l,r]
expectation.check = expectation.check/(nrow(results.cbl)*ncol(results.cbl))
for(l in 1:nrow(results.cbl))
for(r in 1:ncol(results.cbl))
{
deviation = results.cbl[l,r]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/(nrow(results.cbl)*ncol(results.cbl)))
if( abs(moments.cbl[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CBL expectation.")
if( abs(moments.cbl[2] - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CBL deviation.")
##########################################
########## Check CD functions ###########
##########################################
results.cd = cd.query(input.tree, input.data, matrix.b = input.data.b, is.standardised = FALSE)
moments.cd= cd.moments(input.tree, input.sizes)
expectation.check = 0
deviation.check = 0
for(l in 1:nrow(results.cd))
for(r in 1:ncol(results.cd))
expectation.check = expectation.check + results.cd[l,r]
expectation.check = expectation.check/(nrow(results.cd)*ncol(results.cd))
for(l in 1:nrow(results.cd))
for(r in 1:ncol(results.cd))
{
deviation = results.cd[l,r]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/(nrow(results.cd)*ncol(results.cd)))
if( abs(moments.cd[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CD expectation.")
if( abs(moments.cd[2] - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CD deviation.")
} # for( h in 0: length(names) )
} # for( k in 1:length(names))
cat("\n")
cat("---------- All tests were completed successfully ----------")
cat("\n")
cat("\n")
| /PhyloMeasures/tests/tests.R | no_license | ingted/R-Examples | R | false | false | 8,271 | r | ################################################################################################
## Copyright (C) 2015, Constantinos Tsirogiannis and Brody Sandel.
##
## Email: analekta@gmail.com and brody.sandel@bios.au.dk
##
## This file is part of PhyloMeasures.
##
## PhyloMeasures is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## PhyloMeasures is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with PhyloMeasures. If not, see <http://www.gnu.org/licenses/>
################################################################################################
require(ape)
library(PhyloMeasures)
tree.filename = "test.tre"
input.tree = read.tree(tree.filename)
names = input.tree$tip.label
for( k in 0:length(names))
{
all.samples = t(combn(names,k))
input.data = matrix(0,nrow = nrow(all.samples),ncol = length(names))
for( i in 1: nrow(all.samples) )
for( j in 1: length(names) )
{
if(is.element(names[j], all.samples[i,]))
input.data[i,j] = 1
}
colnames(input.data) = names
##########################################
########## Check PD functions ############
##########################################
results.pd = pd.query(input.tree, input.data, is.standardised = FALSE)
moments.pd = pd.moments(input.tree, c(k))
expectation.check = 0
deviation.check = 0
for(l in 1:length(results.pd))
expectation.check = expectation.check + results.pd[l]
expectation.check = expectation.check/length(results.pd)
for(l in 1:length(results.pd))
{
deviation = results.pd[l]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/length(results.pd))
if( abs(moments.pd[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the PD expectation.")
if( abs(moments.pd[2] - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the PD deviation.")
##########################################
########## Check MPD functions ###########
##########################################
results.mpd = mpd.query(input.tree, input.data, is.standardised = FALSE)
moments.mpd = mpd.moments(input.tree, c(k))
expectation.check = 0
deviation.check = 0
for(l in 1:length(results.mpd))
expectation.check = expectation.check + results.mpd[l]
expectation.check = expectation.check/length(results.mpd)
for(l in 1:length(results.mpd))
{
deviation = results.mpd[l]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/length(results.mpd))
if( abs(moments.mpd[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the MPD expectation.")
if( abs(moments.mpd[2] - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the MPD deviation.")
##########################################
######### Check MNTD functions ###########
##########################################
results.mntd = mntd.query(input.tree, input.data, is.standardised = FALSE)
moments.mntd = mntd.moments(input.tree, c(k))
expectation.check = 0
deviation.check = 0
for(l in 1:length(results.mntd))
expectation.check = expectation.check + results.mntd[l]
expectation.check = expectation.check/length(results.mntd)
for(l in 1:length(results.mntd))
{
deviation = results.mntd[l]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/length(results.mntd))
if( abs(moments.mntd[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the MNTD expectation.")
if( abs(moments.mntd[2] - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the MNTD deviation.")
##########################################
########## Check CAC functions ###########
##########################################
my.chi = 0.76
results.cac = cac.query(input.tree, input.data, my.chi, is.standardised = FALSE)
moments.cac = cac.moments(input.tree, my.chi, c(k))
expectation.check = 0
deviation.check = 0
for(l in 1:length(results.cac))
expectation.check = expectation.check + results.cac[l]
expectation.check = expectation.check/length(results.cac)
for(l in 1:length(results.cac))
{
deviation = results.cac[l]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/length(results.cac))
if( abs(moments.cac[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CAC expectation.")
if( abs(sqrt(moments.cac[2]) - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CAC deviation.")
for( h in 0: length(names) )
{
all.samples.b = t(combn(names,h))
input.data.b = matrix(0,nrow = nrow(all.samples.b),ncol = length(names))
for( i in 1:nrow(all.samples.b) )
for( j in 1: length(names) )
{
if(is.element(names[j], all.samples.b[i,]))
input.data.b[i,j] = 1
}
colnames(input.data.b) = names
input.sizes = matrix(0,nrow = 1, ncol = 2)
input.sizes[1,1] = k
input.sizes[1,2] = h
##########################################
########## Check CBL functions ###########
##########################################
results.cbl = cbl.query(input.tree, input.data, matrix.b = input.data.b, is.standardised = FALSE)
moments.cbl= cbl.moments(input.tree, input.sizes)
expectation.check = 0
deviation.check = 0
for(l in 1:nrow(results.cbl))
for(r in 1:ncol(results.cbl))
expectation.check = expectation.check + results.cbl[l,r]
expectation.check = expectation.check/(nrow(results.cbl)*ncol(results.cbl))
for(l in 1:nrow(results.cbl))
for(r in 1:ncol(results.cbl))
{
deviation = results.cbl[l,r]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/(nrow(results.cbl)*ncol(results.cbl)))
if( abs(moments.cbl[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CBL expectation.")
if( abs(moments.cbl[2] - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CBL deviation.")
##########################################
########## Check CD functions ###########
##########################################
results.cd = cd.query(input.tree, input.data, matrix.b = input.data.b, is.standardised = FALSE)
moments.cd= cd.moments(input.tree, input.sizes)
expectation.check = 0
deviation.check = 0
for(l in 1:nrow(results.cd))
for(r in 1:ncol(results.cd))
expectation.check = expectation.check + results.cd[l,r]
expectation.check = expectation.check/(nrow(results.cd)*ncol(results.cd))
for(l in 1:nrow(results.cd))
for(r in 1:ncol(results.cd))
{
deviation = results.cd[l,r]-expectation.check
deviation.check = deviation.check + (deviation*deviation)
}
deviation.check = sqrt(deviation.check/(nrow(results.cd)*ncol(results.cd)))
if( abs(moments.cd[1] - expectation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CD expectation.")
if( abs(moments.cd[2] - deviation.check) > 0.01 )
stop("There is an unexpected discrepancy in the value of the CD deviation.")
} # for( h in 0: length(names) )
} # for( k in 1:length(names))
cat("\n")
cat("---------- All tests were completed successfully ----------")
cat("\n")
cat("\n")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/diversity.R
\name{diversity}
\alias{diversity}
\title{Diversity Statistics}
\usage{
diversity(text.var, grouping.var = NULL)
}
\arguments{
\item{text.var}{The text variable.}
\item{grouping.var}{The grouping variables. Default \code{NULL} generates
one word list for all text. Also takes a single grouping variable or a list
of 1 or more grouping variables.}
}
\value{
Returns a dataframe of various diversity related indices for Shannon,
collision, Berger Parker and Brillouin.
}
\description{
Transcript apply diversity/richness indices.
}
\details{
These are the formulas used to calculate the indices:
\bold{Shannon index:}
\deqn{H_1(X)=-\sum\limits_{i=1}^R{p_i};log;p_i}
Shannon, C. E. (1948). A mathematical theory of communication. Bell System \cr
\bold{Simpson index:}
\deqn{D=\frac{\sum_{i=1}^R{p_i};n_i(n_i -1)}{N(N-1))}}
Simpson, E. H. (1949). Measurement of diversity. Nature 163, p. 688 \cr
\bold{Collision entropy:}
\deqn{H_2(X)=-log\sum_{i=1}^n{p_i}^2}
Renyi, A. (1961). On measures of information and entropy. Proceedings of the
4th Berkeley Symposium on Mathematics, Statistics and Probability, 1960.
pp. 547-5661. \cr
\bold{Berger Parker index:}
\deqn{D_{BP}=\frac{N_{max}}{N}}
Berger, W. H., & Parker, F. L.(1970). Diversity of planktonic Foramenifera in
deep sea sediments. Science 168, pp. 1345-1347. \cr
\bold{Brillouin index:}
\deqn{H_B=\frac{ln(N!)-\sum{ln(n_1)!}}{N}}
Magurran, A. E. (2004). Measuring biological diversity. Blackwell.
}
\examples{
\dontrun{
div.mod <- with(mraja1spl, diversity(dialogue, list(sex, died, fam.aff)))
colsplit2df(div.mod)
plot(div.mod, high = "red", low = "yellow")
plot(div.mod, high = "red", low = "yellow", values = TRUE)
}
}
\references{
\url{http://arxiv.org/abs/physics/0512106}
}
\keyword{diversity}
| /man/diversity.Rd | no_license | Maddocent/qdap | R | false | false | 1,863 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/diversity.R
\name{diversity}
\alias{diversity}
\title{Diversity Statistics}
\usage{
diversity(text.var, grouping.var = NULL)
}
\arguments{
\item{text.var}{The text variable.}
\item{grouping.var}{The grouping variables. Default \code{NULL} generates
one word list for all text. Also takes a single grouping variable or a list
of 1 or more grouping variables.}
}
\value{
Returns a dataframe of various diversity related indices for Shannon,
collision, Berger Parker and Brillouin.
}
\description{
Transcript apply diversity/richness indices.
}
\details{
These are the formulas used to calculate the indices:
\bold{Shannon index:}
\deqn{H_1(X)=-\sum\limits_{i=1}^R{p_i};log;p_i}
Shannon, C. E. (1948). A mathematical theory of communication. Bell System \cr
\bold{Simpson index:}
\deqn{D=\frac{\sum_{i=1}^R{p_i};n_i(n_i -1)}{N(N-1))}}
Simpson, E. H. (1949). Measurement of diversity. Nature 163, p. 688 \cr
\bold{Collision entropy:}
\deqn{H_2(X)=-log\sum_{i=1}^n{p_i}^2}
Renyi, A. (1961). On measures of information and entropy. Proceedings of the
4th Berkeley Symposium on Mathematics, Statistics and Probability, 1960.
pp. 547-5661. \cr
\bold{Berger Parker index:}
\deqn{D_{BP}=\frac{N_{max}}{N}}
Berger, W. H., & Parker, F. L.(1970). Diversity of planktonic Foramenifera in
deep sea sediments. Science 168, pp. 1345-1347. \cr
\bold{Brillouin index:}
\deqn{H_B=\frac{ln(N!)-\sum{ln(n_1)!}}{N}}
Magurran, A. E. (2004). Measuring biological diversity. Blackwell.
}
\examples{
\dontrun{
div.mod <- with(mraja1spl, diversity(dialogue, list(sex, died, fam.aff)))
colsplit2df(div.mod)
plot(div.mod, high = "red", low = "yellow")
plot(div.mod, high = "red", low = "yellow", values = TRUE)
}
}
\references{
\url{http://arxiv.org/abs/physics/0512106}
}
\keyword{diversity}
|
#' Ribosome P-sites position within reads.
#'
#' This function identifies the exact position of the ribosome P-site within
#' each read, determined by the localisation of its first nucleotide (see
#' \code{Details}). It returns a data table containing, for all samples and read
#' lengths: i) the percentage of reads in the whole dataset, ii) the percentage
#' of reads aligning on the start codon (if any); iii) the distance of the
#' P-site from the two extremities of the reads before and after the correction
#' step; iv) the name of the sample. Optionally, this function plots a
#' collection of read length-specific occupancy metaprofiles displaying the
#' P-site offsets computed through the process.
#'
#' @param data List of data tables from \code{\link{bamtolist}},
#' \code{\link{bedtolist}}, \code{\link{duplicates_filter}} or
#' \code{\link{length_filter}}.
#' @param flanking Integer value specifying for the selected reads the minimum
#' number of nucleotides that must flank the reference codon in both
#' directions. Default is 6.
#' @param start Logical value whether to use the translation initiation site as
#' reference codon. Default is TRUE. If FALSE, the second to last codon is
#' used instead.
#' @param extremity Either "5end", "3end" or "auto". It specifies if the
#' correction step should be based on 5' extremities ("5end") or 3'
#' extremities ("3end"). Default is "auto" i.e. the optimal extremity is
#' automatically selected.
#' @param plot Logical value whether to plot the occupancy metaprofiles
#' displaying the P-site offsets computed in both steps of the algorithm.
#' Default is FALSE.
#' @param plot_dir Character string specifying the directory where read
#' length-specific occupancy metaprofiles shuold be stored. If the specified
#' folder doesn't exist, it is automatically created. If NULL (the default),
#' the metaprofiles are stored in a new subfolder of the working directory,
#' called \emph{offset_plot}. This parameter is considered only if \code{plot}
#' is TRUE.
#' @param plot_format Either "png" (the default) or "pdf". This parameter
#' specifies the file format storing the length-specific occupancy
#' metaprofiles. It is considered only if \code{plot} is TRUE.
#' @param cl Integer value in [1,100] specifying a confidence level for
#' generating occupancy metaprofiles for to a sub-range of read lengths i.e.
#' for the cl% of read lengths associated to the highest signals. Default is
#' 99. This parameter is considered only if \code{plot} is TRUE.
#' @param log_file Logical value whether to generate a plain text file, called
#' \emph{best_offset.txt}, that reports the extremity used for the correction
#' step and the best offset for each sample. Default is FALSE.
#' @param log_file_dir Character string specifying the directory where the log
#' file shuold be saved. If the specified folder doesn't exist, it is
#' automatically created. If NULL (the default), the file is stored in the
#' working directory. This parameter is considered only if \code{log_file} is
#' TRUE.
#' @details The P-site offset (PO) is defined as the distance between the
#' extremities of a read and the first nucleotide of the P-site itself. The
#' function processes all samples separately starting from reads mapping on
#' the reference codon (either the start codon or the second to last codon,
#' see \code{start}) of any annotated coding sequences. Read lengths-specific
#' POs are inferred in two steps. First, reads mapping on the reference codon
#' are grouped according to their length, each group corresponding to a bin.
#' Reads whose extremities are too close to the reference codon are discarded
#' (see \code{flanking}). For each bin temporary 5' and 3' POs are defined as
#' the distances between the first nucleotide of the reference codon and the
#' nucleotide corresponding to the global maximum found in the profiles of the
#' 5' and the 3' end at the left and at the right of the reference codon,
#' respectively. After the identification of the P-site for all reads aligning
#' on the reference codon, the POs corresponding to each length are assigned
#' to each read of the dataset. Second, the most frequent temporary POs
#' associated to the optimal extremity (see \code{extremity}) and the
#' predominant bins are exploited as reference values for correcting the
#' temporary POs of smaller bins. Briefly, the correction step defines for
#' each length bin a new PO based on the local maximum, whose distance from
#' the reference codon is the closest to the most frequent temporary POs. For
#' further details please refer to the \strong{riboWaltz} article (available
#' \href{https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006169}{here}).
#' @return A data table.
#' @examples
#' data(reads_list)
#'
#' ## Compute the P-site offset automatically selecting the optimal read
#' ## extremity for the correction step and not plotting any metaprofile:
#' psite(reads_list, flanking = 6, extremity="auto")
#' @import data.table
#' @import ggplot2
#' @export
psite <- function(data, flanking = 6, start = TRUE, extremity = "auto",
plot = FALSE, plot_dir = NULL, plot_format = "png", cl = 99,
log_file = FALSE, log_file_dir = NULL) {
if(log_file == T | log_file == TRUE){
if(length(log_file_dir) == 0){
log_file_dir <- getwd()
}
if (!dir.exists(log_file_dir)) {
dir.create(log_file_dir)
}
logpath <- paste0(log_file_dir, "/best_offset.txt")
cat("sample\texremity\toffset(nts)\n", file = logpath)
}
names <- names(data)
offset <- NULL
for (n in names) {
cat(sprintf("processing %s\n", n))
dt <- data[[n]]
lev <- sort(unique(dt$length))
if(start == T | start == TRUE){
base <- 0
dt[, site_dist_end5 := end5 - cds_start]
dt[, site_dist_end3 := end3 - cds_start]
} else {
base <- -5
dt[, site_dist_end5 := end5 - cds_stop - base]
dt[, site_dist_end3 := end3 - cds_stop - base]
}
site_sub <- dt[site_dist_end5 <= -flanking & site_dist_end3 >= flanking - 1]
minlen <- min(site_sub$length)
maxlen <- max(site_sub$length)
t <- table(factor(site_sub$length, levels = lev))
# offset
offset_temp <- data.table(length = as.numeric(as.character(names(t))), percentage = (as.vector(t)/sum(as.vector(t))) * 100)
offset_temp[, around_site := "T"
][percentage == 0, around_site := "F"]
tempoff <- function(v_dist){
ttable <- sort(table(v_dist), decreasing = T)
ttable_sr <- ttable[as.character(as.numeric(names(ttable))+1)]
ttable_sl <- ttable[as.character(as.numeric(names(ttable))-1)]
tsel <- rowSums(cbind(ttable > ttable_sr, ttable > ttable_sl), na.rm = T)
return(as.numeric(names(tsel[tsel == 2][1])))
}
offset_temp5 <- site_sub[, list(offset_from_5 = tempoff(.SD$site_dist_end5)), by = length]
offset_temp3 <- site_sub[, list(offset_from_3 = tempoff(.SD$site_dist_end3)), by = length]
merge_allx <- function(x, y) merge(x, y, all.x = TRUE, by = "length")
offset_temp <- Reduce(merge_allx, list(offset_temp, offset_temp5, offset_temp3))
# adjusted offset
adj_off <- function(dt_site, dist_site, add, bestoff){
temp_v <- dt_site[[dist_site]]
t <- table(factor(temp_v, levels = seq(min(temp_v) - 2, max(temp_v) + add)))
t[1:2] <- t[3] + 1
locmax <- as.numeric(as.character(names(t[which(diff(sign(diff(t))) == -2)]))) + 1
adjoff <- locmax[which.min(abs(locmax - bestoff))]
ifelse(length(adjoff) != 0, adjoff, bestoff)
}
best_from5_tab <- offset_temp[!is.na(offset_from_5), list(perc = sum(percentage)), by = offset_from_5
][perc == max(perc)]
best_from3_tab <- offset_temp[!is.na(offset_from_5), list(perc = sum(percentage)), by = offset_from_3
][perc == max(perc)]
if(extremity == "auto" &
((best_from3_tab[, perc] > best_from5_tab[, perc] &
as.numeric(best_from3_tab[, offset_from_3]) <= minlen - 2) |
(best_from3_tab[, perc] <= best_from5_tab[, perc] &
as.numeric(best_from5_tab[, offset_from_5]) > minlen - 1)) |
extremity == "3end"){
best_offset <- as.numeric(best_from3_tab[, offset_from_3])
line_plot <- "3end"
adj_tab <- site_sub[, list(corrected_offset_from_3 = adj_off(.SD, "site_dist_end3", 0, best_offset)), by = length]
offset_temp <- merge(offset_temp, adj_tab, all.x = TRUE, by = "length")
offset_temp[is.na(corrected_offset_from_3), corrected_offset_from_3 := best_offset
][, corrected_offset_from_5 := -corrected_offset_from_3 + length - 1]
} else {
if(extremity == "auto" &
((best_from3_tab[, perc] <= best_from5_tab[, perc] &
as.numeric(best_from5_tab[, offset_from_5]) <= minlen - 1) |
(best_from3_tab[, perc] > best_from5_tab[, perc] &
as.numeric(best_from3_tab[, offset_from_3]) > minlen - 2)) |
extremity == "5end"){
best_offset <- as.numeric(best_from5_tab[, offset_from_5])
line_plot <- "5end"
adj_tab <- site_sub[, list(corrected_offset_from_5 = adj_off(.SD, "site_dist_end5", 1, best_offset)), by = length]
offset_temp <- merge(offset_temp, adj_tab, all.x = TRUE, by = "length")
offset_temp[is.na(corrected_offset_from_5), corrected_offset_from_5 := best_offset
][, corrected_offset_from_5 := abs(corrected_offset_from_5)
][, corrected_offset_from_3 := abs(corrected_offset_from_5 - length + 1)]
}
}
cat(sprintf("best offset: %i nts from the %s\n", abs(best_offset), gsub("end", "' end", line_plot)))
if(log_file == T | log_file == TRUE){
cat(sprintf("%s\t%s\t%i\n", n, gsub("end", "'end", line_plot), abs(best_offset)), file = logpath, append = TRUE)
}
t <- table(factor(dt$length, levels = lev))
offset_temp[!is.na(offset_from_5), offset_from_5 := abs(offset_from_5)
][, total_percentage := as.numeric(format(round((as.vector(t)/sum(as.vector(t))) * 100, 3), nsmall=4))
][, percentage := as.numeric(format(round(percentage, 3), nsmall=4))
][, sample := n]
setcolorder(offset_temp, c("length", "total_percentage", "percentage", "around_site", "offset_from_5", "offset_from_3", "corrected_offset_from_5", "corrected_offset_from_3", "sample"))
if(start == TRUE | start == T){
setnames(offset_temp, c("length", "total_percentage", "start_percentage", "around_start", "offset_from_5", "offset_from_3", "corrected_offset_from_5", "corrected_offset_from_3", "sample"))
xlab_plot<-"Distance from start (nt)"
} else {
setnames(offset_temp, c("length", "total_percentage", "stop_percentage", "around_stop", "offset_from_5", "offset_from_3", "corrected_offset_from_5", "corrected_offset_from_3", "sample"))
xlab_plot<-"Distance from stop (nt)"
}
# plot
if (plot == T | plot == TRUE) {
options(warn=-1)
if (length(plot_dir) == 0) {
dir <- getwd()
plot_dir <- paste(dir, "/offset_plot", sep = "")
}
if (!dir.exists(plot_dir)) {
dir.create(plot_dir)
}
minlen <- ceiling(quantile(site_sub$length, (1 - cl/100)/2))
maxlen <- ceiling(quantile(site_sub$length, 1 - (1 - cl/100)/2))
for (len in minlen:maxlen) {
progress <- ceiling(((len + 1 - minlen)/(maxlen - minlen + 1)) * 25)
cat(sprintf("\rplotting %s\r", paste(paste(rep(c(" ", "<<", "-"),
c(25 - progress, 1, progress)), collapse = ""), " ", as.character(progress*4),
"% ", paste(rep(c("-", ">>", " "), c(progress, 1, 25 - progress)), collapse = ""), sep = "")))
site_temp <- dt[site_dist_end5 %in% seq(-len + 1, 0) & length == len]
site_tab5 <- data.table(table(factor(site_temp$site_dist_end5, levels = (-len + 1) : (len))))
site_temp <- dt[site_dist_end3 %in% seq(0, len - 2) & length == len]
site_tab3 <- data.table(table(factor(site_temp$site_dist_end3, levels = (-len) : (len - 2))))
setnames(site_tab5, c("distance", "reads"))
setnames(site_tab3, c("distance", "reads"))
site_tab5[, distance := as.numeric(as.character(site_tab5$distance))
][, extremity := "5' end"]
site_tab3[, distance := as.numeric(as.character(site_tab3$distance))
][, extremity := "3' end"]
final_tab <- rbind(site_tab5[distance <= 0], site_tab3[distance >= 0])
final_tab[, extremity := factor(extremity, levels = c("5' end", "3' end"))]
p <- ggplot(final_tab, aes(distance, reads, color = extremity)) +
geom_line() +
geom_vline(xintercept = seq(floor(min(final_tab$distance)/3) * 3, floor(max(final_tab$distance)/3) * 3, 3), linetype = 2, color = "gray90") +
geom_vline(xintercept = 0, color = "gray50") +
geom_vline(xintercept = - offset_temp[length == len, offset_from_5], color = "#D55E00", linetype = 2, size = 1.1) +
geom_vline(xintercept = offset_temp[length == len, offset_from_3], color = "#56B4E9", linetype = 2, size = 1.1) +
geom_vline(xintercept = - offset_temp[length == len, corrected_offset_from_5], color = "#D55E00", size = 1.1) +
geom_vline(xintercept = offset_temp[length == len, corrected_offset_from_3], color = "#56B4E9", size = 1.1) +
annotate("rect", ymin = -Inf, ymax = Inf, xmin = flanking - len, xmax = -flanking , fill = "#D55E00", alpha = 0.1) +
annotate("rect", ymin = -Inf, ymax = Inf, xmin = flanking - 1 , xmax = len - flanking - 1, fill = "#56B4E9", alpha = 0.1) +
labs(x = xlab_plot, y = "Number of read extremities", title = paste(n, " - length=", len, " nts", sep = ""), color= "Extremity") +
theme_bw(base_size = 20) +
scale_fill_discrete("") +
theme(panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), strip.placement = "outside") +
theme(plot.title = element_text(hjust = 0.5))
if(line_plot == "3end"){
p <- p + geom_vline(xintercept = best_offset, color = "black", linetype = 3, size = 1.1) +
geom_vline(xintercept = best_offset - len + 1, color = "black", linetype = 3, size = 1.1)
} else {
p <- p + geom_vline(xintercept = best_offset, color = "black", linetype = 3, size = 1.1) +
geom_vline(xintercept = best_offset + len - 1, color = "black", linetype = 3, size = 1.1)
}
p <- p +
scale_x_continuous(limits = c(min(final_tab$distance), max(final_tab$distance)),
breaks = seq(floor(min(final_tab$distance)/5) * 5, floor(max(final_tab$distance)/5) * 5, 5),
labels = as.character(seq(floor(min(final_tab$distance)/5) * 5, floor(max(final_tab$distance)/5) * 5, 5) + base))
subplot_dir <- paste(plot_dir, n, sep = "/")
dir.create(subplot_dir)
ggsave(paste(subplot_dir, "/", len, ".", plot_format, sep = ""), plot = p, width = 15, height = 5, units = "in")
}
cat(sprintf("\rplotting %s\n",
paste(paste(rep(c(" ", "<<", "-"), c(25 - progress, 1, progress)), collapse = ""), " ",
as.character(progress*4), "% ",
paste(rep(c("-", ">>", " "), c(progress, 1, 25 - progress)), collapse = ""), sep = "")))
options(warn=0)
}
dt[, c("site_dist_end5", "site_dist_end3") := NULL]
offset <- rbind(offset, offset_temp)
}
return(offset)
}
#' Update reads information according to the inferred P-sites.
#'
#' This function provides additional reads information according to the position
#' of the P-site identfied by \code{\link{psite}}. It attaches to each data
#' table in a list four columns reporting i) the P-site position with respect to
#' the 1st nucleotide of the transcript, ii) the P-site position with respect to
#' the start and the stop codon of the annotated coding sequence (if any) and
#' iii) the region of the transcript (5' UTR, CDS, 3' UTR) that includes the
#' P-site. Please note: for transcripts not associated to any annotated CDS the
#' position of the P-site with respect to the start and the stop codon is set to
#' NA. Optionally, additional columns reporting the three nucleotides covered by
#' the P-site, the A-site and the E-site are attached, based on FASTA files or
#' BSgenome data packages containing the transcript nucleotide sequences.
#'
#' @param data List of data tables from \code{\link{bamtolist}},
#' \code{\link{bedtolist}} or \code{\link{length_filter}}.
#' @param offset Data table from \code{\link{psite}}.
#' @param site Either "psite, "asite", "esite" or a combination of these
#' strings. It specifies if additional column(s) reporting the three
#' nucleotides covered by the ribosome P-site ("psite"), A-site ("asite") and
#' E-site ("esite") should be added. Note: either \code{fastapath} or
#' \code{bsgenome} is required for this purpose. Default is NULL.
#' @param fastapath Character string specifying the FASTA file used in the
#' alignment step, including its path, name and extension. This file can
#' contain reference nucleotide sequences either of a genome assembly or of
#' all the transcripts (see \code{Details} and \code{fasta_genome}). Please
#' make sure the sequences derive from the same release of the annotation file
#' used in the \code{\link{create_annotation}} function. Note: either
#' \code{fastapath} or \code{bsgenome} is required to generate additional
#' column(s) specified by \code{site}. Default is NULL.
#' @param fasta_genome Logical value whether the FASTA file specified by
#' \code{fastapath} contains nucleotide sequences of a genome assembly. If
#' TRUE (the default), an annotation object is required (see \code{gtfpath}
#' and \code{txdb}). FALSE implies the nucleotide sequences of all the
#' transcripts is provided instead.
#' @param refseq_sep Character specifying the separator between reference
#' sequences' name and additional information to discard, stored in the
#' headers of the FASTA file specified by \code{fastapath} (if any). It might
#' be required for matching the reference sequences' identifiers reported in
#' the input list of data tables. All characters before the first occurrence
#' of the specified separator are kept. Default is NULL i.e. no string
#' splitting is performed.
#' @param bsgenome Character string specifying the BSgenome data package with
#' the genome sequences to be loaded. If not already present in the system, it
#' is automatically installed through the biocLite.R script (check the list of
#' available BSgenome data packages by running the
#' \code{\link[BSgenome]{available.genomes}} function of the BSgenome
#' package). This parameter must be coupled with an annotation object (see
#' \code{gtfpath} and \code{txdb}). Please make sure the sequences included in
#' the specified BSgenome data pakage are in agreement with the sequences used
#' in the alignment step. Note: either \code{fastapath} or \code{bsgenome} is
#' required to generate additional column(s) specified by \code{site}. Default
#' is NULL.
#' @param gtfpath Character string specifying the location of a GTF file,
#' including its path, name and extension. Please make sure the GTF file and
#' the sequences specified by \code{fastapath} or \code{bsgenome} derive from
#' the same release. Note that either \code{gtfpath} or \code{txdb} is
#' required if and only if nucleotide sequences of a genome assembly are
#' provided (see \code{fastapath} or \code{bsgenome}). Default is NULL.
#' @param txdb Character string specifying the TxDb annotation package to be
#' loaded. If not already present in the system, it is automatically installed
#' through the biocLite.R script (check
#' \href{http://bioconductor.org/packages/release/BiocViews.html#___TxDb}{here}
#' the list of available TxDb annotation packages). Please make sure the TxDb
#' annotation package and the sequences specified by \code{fastapath} or
#' \code{bsgenome} derive from the same release. Note that either
#' \code{gtfpath} or \code{txdb} is required if and only if nucleotide
#' sequences of a genome assembly are provided (see \code{fastapath} or
#' \code{bsgenome}). Default is NULL.
#' @param dataSource Optional character string describing the origin of the GTF
#' data file. This parameter is considered only if \code{gtfpath} is
#' specified. For more information about this parameter please refer to the
#' description of \emph{dataSource} of the
#' \code{\link[GenomicFeatures]{makeTxDbFromGFF}} function included in the
#' \code{GenomicFeatures} package.
#' @param organism Optional character string reporting the genus and species of
#' the organism of the GTF data file. This parameter is considered only if
#' \code{gtfpath} is specified. For more information about this parameter
#' please refer to the description of \emph{organism} of the
#' \code{\link[GenomicFeatures]{makeTxDbFromGFF}} function included in the
#' \code{GenomicFeatures} package.
#' @param granges Logical value whether to return a GRangesList object. Default
#' is FALSE i.e. a list of data tables (the required input for downstream
#' analyses and graphical outputs provided by riboWaltz) is returned instead.
#' @details \strong{riboWaltz} only works for read alignments based on
#' transcript coordinates. This choice is due to the main purpose of RiboSeq
#' assays to study translational events through the isolation and sequencing
#' of ribosome protected fragments. Most reads from RiboSeq are supposed to
#' map on mRNAs and not on introns and intergenic regions. Nevertheless, BAM
#' based on transcript coordinates can be generated in two ways: i) aligning
#' directly against transcript sequences; ii) aligning against standard
#' chromosome sequences, requiring the outputs to be translated in transcript
#' coordinates. The first option can be easily handled by many aligners (e.g.
#' Bowtie), given a reference FASTA file where each sequence represents a
#' transcript, from the beginning of the 5' UTR to the end of the 3' UTR. The
#' second procedure is based on reference FASTA files where each sequence
#' represents a chromosome, usually coupled with comprehensive gene annotation
#' files (GTF or GFF). The STAR aligner, with its option --quantMode
#' TranscriptomeSAM (see Chapter 6 of its
#' \href{http://labshare.cshl.edu/shares/gingeraslab/www-data/dobin/STAR/STAR.posix/doc/STARmanual.pdf}{manual}),
#' is an example of tool providing such a feature.
#' @return A list of data tables or a GRangesList object.
#' @examples
#' data(reads_list)
#' data(psite_offset)
#' data(mm81cdna)
#'
#' reads_psite_list <- psite_info(reads_list, psite_offset)
#' @import data.table
#' @export
psite_info <- function(data, offset, site = NULL, fastapath = NULL,
fasta_genome = TRUE, refseq_sep = NULL, bsgenome = NULL,
gtfpath = NULL, txdb = NULL, dataSource = NA,
organism = NA, granges = FALSE) {
if(!(all(site %in% c("psite", "asite", "esite"))) & length(site) != 0){
cat("\n")
stop("parameter site must be either NULL, \"psite\", \"asite\", \"esite\" or a combination of the three strings \n\n")
} else {
if(length(site) != 0 & length(fastapath) == 0 & length(bsgenome) == 0){
cat("\n")
stop("parameter site is specified but both fastapath and bsgenome are missing \n\n")
}
}
if(length(site) != 0){
if(((length(fastapath) != 0 & (fasta_genome == TRUE | fasta_genome == T)) |
length(bsgenome) != 0) &
length(gtfpath) == 0 & length(txdb) == 0){
cat("\n")
stop("genome annotation file not specified (both GTF path and TxDb object are missing)\n\n")
}
if(length(fastapath) != 0 & length(bsgenome) != 0){
cat("\n")
warning("both fastapath and bsgenome are specified. Only fastapath will be considered\n")
bsgenome = NULL
}
if(length(gtfpath) != 0 & length(txdb) != 0){
cat("\n")
warning("both gtfpath and txdb are specified. Only gtfpath will be considered\n")
txdb = NULL
}
if((length(gtfpath) != 0 | length(txdb) != 0) &
((length(fastapath) == 0 & length(bsgenome) == 0) |
(length(fastapath) != 0 & (fasta_genome == FALSE | fasta_genome == F)))){
cat("\n")
warning("a genome annotation file is specified but no sequences from genome assembly are provided\n")
}
if(length(gtfpath) != 0 | length(txdb) != 0){
if(length(gtfpath) != 0){
path_to_gtf <- gtfpath
txdbanno <- GenomicFeatures::makeTxDbFromGFF(file = path_to_gtf, format = "gtf", dataSource = dataSource, organism = organism)
} else {
if(txdb %in% rownames(installed.packages())){
library(txdb, character.only = TRUE)
} else {
source("https://bioconductor.org/biocLite.R")
biocLite(txdb, suppressUpdates = TRUE)
library(txdb, character.only = TRUE)
}
txdbanno <- get(txdb)
}
}
if(length(fastapath) != 0 | length(bsgenome) != 0){
if(length(fastapath) != 0) {
if(fasta_genome == TRUE | fasta_genome == T){
temp_sequences <- Biostrings::readDNAStringSet(fastapath, format = "fasta", use.names = TRUE)
if(length(refseq_sep) != 0){
names(temp_sequences) <- tstrsplit(names(temp_sequences), refseq_sep, fixed = TRUE, keep = 1)[[1]]
}
exon <- suppressWarnings(GenomicFeatures::exonsBy(txdbanno, by = "tx", use.names = TRUE))
exon <- as.data.table(exon[unique(names(exon))])
sub_exon_plus <- exon[as.character(seqnames) %in% names(temp_sequences) & strand == "+"]
sub_exon_minus <- exon[as.character(seqnames) %in% names(temp_sequences) & strand == "-"
][, new_end := Biostrings::width(temp_sequences[as.character(seqnames)]) - start + 1
][, new_start := Biostrings::width(temp_sequences[as.character(seqnames)]) - end + 1]
seq_dt_plus <- sub_exon_plus[, nt_seq := "emp"
][, nt_seq := as.character(Biostrings::subseq(temp_sequences[as.character(seqnames)],
start = start,
end = end))
][, list(seq = paste(nt_seq, collapse = "")), by = group_name]
revcompl_temp_sequences <- Biostrings::reverseComplement(temp_sequences)
seq_dt_minus <- sub_exon_minus[, nt_seq := "emp"
][, nt_seq := as.character(Biostrings::subseq(revcompl_temp_sequences[as.character(seqnames)],
start = new_start,
end = new_end))
][, list(seq = paste(nt_seq, collapse = "")), by = group_name]
sequences <- Biostrings::DNAStringSet(c(seq_dt_plus$seq, seq_dt_minus$seq))
names(sequences) <- c(unique(sub_exon_plus$group_name), unique(sub_exon_minus$group_name))
} else {
sequences <- Biostrings::readDNAStringSet(fastapath, format = "fasta", use.names = TRUE)
if(length(refseq_sep) != 0){
names(sequences) <- tstrsplit(names(sequences), refseq_sep, fixed = TRUE, keep = 1)[[1]]
}
}
} else {
if(bsgenome %in% installed.genomes()){
library(bsgenome, character.only = TRUE)
} else {
source("http://www.bioconductor.org/biocLite.R")
biocLite(bsgenome, suppressUpdates = TRUE)
library(bsgenome, character.only = TRUE)
}
sequences <- GenomicFeatures::extractTranscriptSeqs(get(bsgenome), txdbanno, use.names=T)
}
}
}
names <- names(data)
for (n in names) {
cat(sprintf("processing %s\n", n))
dt <- data[[n]]
suboff <- offset[sample == n, .(length,corrected_offset_from_3)]
cat("1. adding p-site position\n")
dt[suboff, on = 'length', psite := i.corrected_offset_from_3]
dt[, psite := end3 - psite]
setcolorder(dt,c("transcript", "end5", "psite", "end3", "length", "cds_start", "cds_stop"))
dt[, psite_from_start := psite - cds_start
][cds_stop == 0, psite_from_start := 0]
dt[, psite_from_stop := psite - cds_stop
][cds_stop == 0, psite_from_stop := 0]
cat("2. adding transcript region\n")
dt[, psite_region := "5utr"
][psite_from_start >= 0 & psite_from_stop <= 0, psite_region := "cds"
][psite_from_stop > 0, psite_region := "3utr"
][cds_stop == 0, psite_region := NA]
if(length(site) != 0){
cat("3. adding nucleotide sequence(s)\n")
if("psite" %in% site){
dt[, p_site_codon := as.character(Biostrings::subseq(sequences[as.character(dt$transcript)],
start = dt$psite,
end = dt$psite + 2))]
}
if("asite" %in% site){
dt[, a_site_codon := as.character(Biostrings::subseq(sequences[as.character(dt$transcript)],
start = dt$psite + 3,
end = dt$psite + 5))]
}
if("esite" %in% site){
dt[, e_site_codon := as.character(Biostrings::subseq(sequences[as.character(dt$transcript)],
start = dt$psite - 3,
end = dt$psite - 1))]
}
}
setorder(dt, transcript, end5, end3)
if (granges == T | granges == TRUE) {
dt <- GenomicRanges::makeGRangesFromDataFrame(dt,
keep.extra.columns = TRUE,
ignore.strand = TRUE,
seqnames.field = c("transcript"),
start.field = "end5",
end.field = "end3",
strand.field = "strand",
starts.in.df.are.0based = FALSE)
GenomicRanges::strand(dt) <- "+"
}
data[[n]] <- dt
}
if (granges == T | granges == TRUE) {
data <- GenomicRanges::GRangesList(data)
}
return(data)
}
| /R/psites.R | permissive | hudsonam/riboWaltz | R | false | false | 31,320 | r | #' Ribosome P-sites position within reads.
#'
#' This function identifies the exact position of the ribosome P-site within
#' each read, determined by the localisation of its first nucleotide (see
#' \code{Details}). It returns a data table containing, for all samples and read
#' lengths: i) the percentage of reads in the whole dataset, ii) the percentage
#' of reads aligning on the start codon (if any); iii) the distance of the
#' P-site from the two extremities of the reads before and after the correction
#' step; iv) the name of the sample. Optionally, this function plots a
#' collection of read length-specific occupancy metaprofiles displaying the
#' P-site offsets computed through the process.
#'
#' @param data List of data tables from \code{\link{bamtolist}},
#' \code{\link{bedtolist}}, \code{\link{duplicates_filter}} or
#' \code{\link{length_filter}}.
#' @param flanking Integer value specifying for the selected reads the minimum
#' number of nucleotides that must flank the reference codon in both
#' directions. Default is 6.
#' @param start Logical value whether to use the translation initiation site as
#' reference codon. Default is TRUE. If FALSE, the second to last codon is
#' used instead.
#' @param extremity Either "5end", "3end" or "auto". It specifies if the
#' correction step should be based on 5' extremities ("5end") or 3'
#' extremities ("3end"). Default is "auto" i.e. the optimal extremity is
#' automatically selected.
#' @param plot Logical value whether to plot the occupancy metaprofiles
#' displaying the P-site offsets computed in both steps of the algorithm.
#' Default is FALSE.
#' @param plot_dir Character string specifying the directory where read
#' length-specific occupancy metaprofiles shuold be stored. If the specified
#' folder doesn't exist, it is automatically created. If NULL (the default),
#' the metaprofiles are stored in a new subfolder of the working directory,
#' called \emph{offset_plot}. This parameter is considered only if \code{plot}
#' is TRUE.
#' @param plot_format Either "png" (the default) or "pdf". This parameter
#' specifies the file format storing the length-specific occupancy
#' metaprofiles. It is considered only if \code{plot} is TRUE.
#' @param cl Integer value in [1,100] specifying a confidence level for
#' generating occupancy metaprofiles for to a sub-range of read lengths i.e.
#' for the cl% of read lengths associated to the highest signals. Default is
#' 99. This parameter is considered only if \code{plot} is TRUE.
#' @param log_file Logical value whether to generate a plain text file, called
#' \emph{best_offset.txt}, that reports the extremity used for the correction
#' step and the best offset for each sample. Default is FALSE.
#' @param log_file_dir Character string specifying the directory where the log
#' file shuold be saved. If the specified folder doesn't exist, it is
#' automatically created. If NULL (the default), the file is stored in the
#' working directory. This parameter is considered only if \code{log_file} is
#' TRUE.
#' @details The P-site offset (PO) is defined as the distance between the
#' extremities of a read and the first nucleotide of the P-site itself. The
#' function processes all samples separately starting from reads mapping on
#' the reference codon (either the start codon or the second to last codon,
#' see \code{start}) of any annotated coding sequences. Read lengths-specific
#' POs are inferred in two steps. First, reads mapping on the reference codon
#' are grouped according to their length, each group corresponding to a bin.
#' Reads whose extremities are too close to the reference codon are discarded
#' (see \code{flanking}). For each bin temporary 5' and 3' POs are defined as
#' the distances between the first nucleotide of the reference codon and the
#' nucleotide corresponding to the global maximum found in the profiles of the
#' 5' and the 3' end at the left and at the right of the reference codon,
#' respectively. After the identification of the P-site for all reads aligning
#' on the reference codon, the POs corresponding to each length are assigned
#' to each read of the dataset. Second, the most frequent temporary POs
#' associated to the optimal extremity (see \code{extremity}) and the
#' predominant bins are exploited as reference values for correcting the
#' temporary POs of smaller bins. Briefly, the correction step defines for
#' each length bin a new PO based on the local maximum, whose distance from
#' the reference codon is the closest to the most frequent temporary POs. For
#' further details please refer to the \strong{riboWaltz} article (available
#' \href{https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1006169}{here}).
#' @return A data table.
#' @examples
#' data(reads_list)
#'
#' ## Compute the P-site offset automatically selecting the optimal read
#' ## extremity for the correction step and not plotting any metaprofile:
#' psite(reads_list, flanking = 6, extremity="auto")
#' @import data.table
#' @import ggplot2
#' @export
psite <- function(data, flanking = 6, start = TRUE, extremity = "auto",
plot = FALSE, plot_dir = NULL, plot_format = "png", cl = 99,
log_file = FALSE, log_file_dir = NULL) {
if(log_file == T | log_file == TRUE){
if(length(log_file_dir) == 0){
log_file_dir <- getwd()
}
if (!dir.exists(log_file_dir)) {
dir.create(log_file_dir)
}
logpath <- paste0(log_file_dir, "/best_offset.txt")
cat("sample\texremity\toffset(nts)\n", file = logpath)
}
names <- names(data)
offset <- NULL
for (n in names) {
cat(sprintf("processing %s\n", n))
dt <- data[[n]]
lev <- sort(unique(dt$length))
if(start == T | start == TRUE){
base <- 0
dt[, site_dist_end5 := end5 - cds_start]
dt[, site_dist_end3 := end3 - cds_start]
} else {
base <- -5
dt[, site_dist_end5 := end5 - cds_stop - base]
dt[, site_dist_end3 := end3 - cds_stop - base]
}
site_sub <- dt[site_dist_end5 <= -flanking & site_dist_end3 >= flanking - 1]
minlen <- min(site_sub$length)
maxlen <- max(site_sub$length)
t <- table(factor(site_sub$length, levels = lev))
# offset
offset_temp <- data.table(length = as.numeric(as.character(names(t))), percentage = (as.vector(t)/sum(as.vector(t))) * 100)
offset_temp[, around_site := "T"
][percentage == 0, around_site := "F"]
tempoff <- function(v_dist){
ttable <- sort(table(v_dist), decreasing = T)
ttable_sr <- ttable[as.character(as.numeric(names(ttable))+1)]
ttable_sl <- ttable[as.character(as.numeric(names(ttable))-1)]
tsel <- rowSums(cbind(ttable > ttable_sr, ttable > ttable_sl), na.rm = T)
return(as.numeric(names(tsel[tsel == 2][1])))
}
offset_temp5 <- site_sub[, list(offset_from_5 = tempoff(.SD$site_dist_end5)), by = length]
offset_temp3 <- site_sub[, list(offset_from_3 = tempoff(.SD$site_dist_end3)), by = length]
merge_allx <- function(x, y) merge(x, y, all.x = TRUE, by = "length")
offset_temp <- Reduce(merge_allx, list(offset_temp, offset_temp5, offset_temp3))
# adjusted offset
adj_off <- function(dt_site, dist_site, add, bestoff){
temp_v <- dt_site[[dist_site]]
t <- table(factor(temp_v, levels = seq(min(temp_v) - 2, max(temp_v) + add)))
t[1:2] <- t[3] + 1
locmax <- as.numeric(as.character(names(t[which(diff(sign(diff(t))) == -2)]))) + 1
adjoff <- locmax[which.min(abs(locmax - bestoff))]
ifelse(length(adjoff) != 0, adjoff, bestoff)
}
best_from5_tab <- offset_temp[!is.na(offset_from_5), list(perc = sum(percentage)), by = offset_from_5
][perc == max(perc)]
best_from3_tab <- offset_temp[!is.na(offset_from_5), list(perc = sum(percentage)), by = offset_from_3
][perc == max(perc)]
if(extremity == "auto" &
((best_from3_tab[, perc] > best_from5_tab[, perc] &
as.numeric(best_from3_tab[, offset_from_3]) <= minlen - 2) |
(best_from3_tab[, perc] <= best_from5_tab[, perc] &
as.numeric(best_from5_tab[, offset_from_5]) > minlen - 1)) |
extremity == "3end"){
best_offset <- as.numeric(best_from3_tab[, offset_from_3])
line_plot <- "3end"
adj_tab <- site_sub[, list(corrected_offset_from_3 = adj_off(.SD, "site_dist_end3", 0, best_offset)), by = length]
offset_temp <- merge(offset_temp, adj_tab, all.x = TRUE, by = "length")
offset_temp[is.na(corrected_offset_from_3), corrected_offset_from_3 := best_offset
][, corrected_offset_from_5 := -corrected_offset_from_3 + length - 1]
} else {
if(extremity == "auto" &
((best_from3_tab[, perc] <= best_from5_tab[, perc] &
as.numeric(best_from5_tab[, offset_from_5]) <= minlen - 1) |
(best_from3_tab[, perc] > best_from5_tab[, perc] &
as.numeric(best_from3_tab[, offset_from_3]) > minlen - 2)) |
extremity == "5end"){
best_offset <- as.numeric(best_from5_tab[, offset_from_5])
line_plot <- "5end"
adj_tab <- site_sub[, list(corrected_offset_from_5 = adj_off(.SD, "site_dist_end5", 1, best_offset)), by = length]
offset_temp <- merge(offset_temp, adj_tab, all.x = TRUE, by = "length")
offset_temp[is.na(corrected_offset_from_5), corrected_offset_from_5 := best_offset
][, corrected_offset_from_5 := abs(corrected_offset_from_5)
][, corrected_offset_from_3 := abs(corrected_offset_from_5 - length + 1)]
}
}
cat(sprintf("best offset: %i nts from the %s\n", abs(best_offset), gsub("end", "' end", line_plot)))
if(log_file == T | log_file == TRUE){
cat(sprintf("%s\t%s\t%i\n", n, gsub("end", "'end", line_plot), abs(best_offset)), file = logpath, append = TRUE)
}
t <- table(factor(dt$length, levels = lev))
offset_temp[!is.na(offset_from_5), offset_from_5 := abs(offset_from_5)
][, total_percentage := as.numeric(format(round((as.vector(t)/sum(as.vector(t))) * 100, 3), nsmall=4))
][, percentage := as.numeric(format(round(percentage, 3), nsmall=4))
][, sample := n]
setcolorder(offset_temp, c("length", "total_percentage", "percentage", "around_site", "offset_from_5", "offset_from_3", "corrected_offset_from_5", "corrected_offset_from_3", "sample"))
if(start == TRUE | start == T){
setnames(offset_temp, c("length", "total_percentage", "start_percentage", "around_start", "offset_from_5", "offset_from_3", "corrected_offset_from_5", "corrected_offset_from_3", "sample"))
xlab_plot<-"Distance from start (nt)"
} else {
setnames(offset_temp, c("length", "total_percentage", "stop_percentage", "around_stop", "offset_from_5", "offset_from_3", "corrected_offset_from_5", "corrected_offset_from_3", "sample"))
xlab_plot<-"Distance from stop (nt)"
}
# plot
if (plot == T | plot == TRUE) {
options(warn=-1)
if (length(plot_dir) == 0) {
dir <- getwd()
plot_dir <- paste(dir, "/offset_plot", sep = "")
}
if (!dir.exists(plot_dir)) {
dir.create(plot_dir)
}
minlen <- ceiling(quantile(site_sub$length, (1 - cl/100)/2))
maxlen <- ceiling(quantile(site_sub$length, 1 - (1 - cl/100)/2))
for (len in minlen:maxlen) {
progress <- ceiling(((len + 1 - minlen)/(maxlen - minlen + 1)) * 25)
cat(sprintf("\rplotting %s\r", paste(paste(rep(c(" ", "<<", "-"),
c(25 - progress, 1, progress)), collapse = ""), " ", as.character(progress*4),
"% ", paste(rep(c("-", ">>", " "), c(progress, 1, 25 - progress)), collapse = ""), sep = "")))
site_temp <- dt[site_dist_end5 %in% seq(-len + 1, 0) & length == len]
site_tab5 <- data.table(table(factor(site_temp$site_dist_end5, levels = (-len + 1) : (len))))
site_temp <- dt[site_dist_end3 %in% seq(0, len - 2) & length == len]
site_tab3 <- data.table(table(factor(site_temp$site_dist_end3, levels = (-len) : (len - 2))))
setnames(site_tab5, c("distance", "reads"))
setnames(site_tab3, c("distance", "reads"))
site_tab5[, distance := as.numeric(as.character(site_tab5$distance))
][, extremity := "5' end"]
site_tab3[, distance := as.numeric(as.character(site_tab3$distance))
][, extremity := "3' end"]
final_tab <- rbind(site_tab5[distance <= 0], site_tab3[distance >= 0])
final_tab[, extremity := factor(extremity, levels = c("5' end", "3' end"))]
p <- ggplot(final_tab, aes(distance, reads, color = extremity)) +
geom_line() +
geom_vline(xintercept = seq(floor(min(final_tab$distance)/3) * 3, floor(max(final_tab$distance)/3) * 3, 3), linetype = 2, color = "gray90") +
geom_vline(xintercept = 0, color = "gray50") +
geom_vline(xintercept = - offset_temp[length == len, offset_from_5], color = "#D55E00", linetype = 2, size = 1.1) +
geom_vline(xintercept = offset_temp[length == len, offset_from_3], color = "#56B4E9", linetype = 2, size = 1.1) +
geom_vline(xintercept = - offset_temp[length == len, corrected_offset_from_5], color = "#D55E00", size = 1.1) +
geom_vline(xintercept = offset_temp[length == len, corrected_offset_from_3], color = "#56B4E9", size = 1.1) +
annotate("rect", ymin = -Inf, ymax = Inf, xmin = flanking - len, xmax = -flanking , fill = "#D55E00", alpha = 0.1) +
annotate("rect", ymin = -Inf, ymax = Inf, xmin = flanking - 1 , xmax = len - flanking - 1, fill = "#56B4E9", alpha = 0.1) +
labs(x = xlab_plot, y = "Number of read extremities", title = paste(n, " - length=", len, " nts", sep = ""), color= "Extremity") +
theme_bw(base_size = 20) +
scale_fill_discrete("") +
theme(panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), strip.placement = "outside") +
theme(plot.title = element_text(hjust = 0.5))
if(line_plot == "3end"){
p <- p + geom_vline(xintercept = best_offset, color = "black", linetype = 3, size = 1.1) +
geom_vline(xintercept = best_offset - len + 1, color = "black", linetype = 3, size = 1.1)
} else {
p <- p + geom_vline(xintercept = best_offset, color = "black", linetype = 3, size = 1.1) +
geom_vline(xintercept = best_offset + len - 1, color = "black", linetype = 3, size = 1.1)
}
p <- p +
scale_x_continuous(limits = c(min(final_tab$distance), max(final_tab$distance)),
breaks = seq(floor(min(final_tab$distance)/5) * 5, floor(max(final_tab$distance)/5) * 5, 5),
labels = as.character(seq(floor(min(final_tab$distance)/5) * 5, floor(max(final_tab$distance)/5) * 5, 5) + base))
subplot_dir <- paste(plot_dir, n, sep = "/")
dir.create(subplot_dir)
ggsave(paste(subplot_dir, "/", len, ".", plot_format, sep = ""), plot = p, width = 15, height = 5, units = "in")
}
cat(sprintf("\rplotting %s\n",
paste(paste(rep(c(" ", "<<", "-"), c(25 - progress, 1, progress)), collapse = ""), " ",
as.character(progress*4), "% ",
paste(rep(c("-", ">>", " "), c(progress, 1, 25 - progress)), collapse = ""), sep = "")))
options(warn=0)
}
dt[, c("site_dist_end5", "site_dist_end3") := NULL]
offset <- rbind(offset, offset_temp)
}
return(offset)
}
#' Update reads information according to the inferred P-sites.
#'
#' This function provides additional reads information according to the position
#' of the P-site identfied by \code{\link{psite}}. It attaches to each data
#' table in a list four columns reporting i) the P-site position with respect to
#' the 1st nucleotide of the transcript, ii) the P-site position with respect to
#' the start and the stop codon of the annotated coding sequence (if any) and
#' iii) the region of the transcript (5' UTR, CDS, 3' UTR) that includes the
#' P-site. Please note: for transcripts not associated to any annotated CDS the
#' position of the P-site with respect to the start and the stop codon is set to
#' NA. Optionally, additional columns reporting the three nucleotides covered by
#' the P-site, the A-site and the E-site are attached, based on FASTA files or
#' BSgenome data packages containing the transcript nucleotide sequences.
#'
#' @param data List of data tables from \code{\link{bamtolist}},
#' \code{\link{bedtolist}} or \code{\link{length_filter}}.
#' @param offset Data table from \code{\link{psite}}.
#' @param site Either "psite, "asite", "esite" or a combination of these
#' strings. It specifies if additional column(s) reporting the three
#' nucleotides covered by the ribosome P-site ("psite"), A-site ("asite") and
#' E-site ("esite") should be added. Note: either \code{fastapath} or
#' \code{bsgenome} is required for this purpose. Default is NULL.
#' @param fastapath Character string specifying the FASTA file used in the
#' alignment step, including its path, name and extension. This file can
#' contain reference nucleotide sequences either of a genome assembly or of
#' all the transcripts (see \code{Details} and \code{fasta_genome}). Please
#' make sure the sequences derive from the same release of the annotation file
#' used in the \code{\link{create_annotation}} function. Note: either
#' \code{fastapath} or \code{bsgenome} is required to generate additional
#' column(s) specified by \code{site}. Default is NULL.
#' @param fasta_genome Logical value whether the FASTA file specified by
#' \code{fastapath} contains nucleotide sequences of a genome assembly. If
#' TRUE (the default), an annotation object is required (see \code{gtfpath}
#' and \code{txdb}). FALSE implies the nucleotide sequences of all the
#' transcripts is provided instead.
#' @param refseq_sep Character specifying the separator between reference
#' sequences' name and additional information to discard, stored in the
#' headers of the FASTA file specified by \code{fastapath} (if any). It might
#' be required for matching the reference sequences' identifiers reported in
#' the input list of data tables. All characters before the first occurrence
#' of the specified separator are kept. Default is NULL i.e. no string
#' splitting is performed.
#' @param bsgenome Character string specifying the BSgenome data package with
#' the genome sequences to be loaded. If not already present in the system, it
#' is automatically installed through the biocLite.R script (check the list of
#' available BSgenome data packages by running the
#' \code{\link[BSgenome]{available.genomes}} function of the BSgenome
#' package). This parameter must be coupled with an annotation object (see
#' \code{gtfpath} and \code{txdb}). Please make sure the sequences included in
#' the specified BSgenome data pakage are in agreement with the sequences used
#' in the alignment step. Note: either \code{fastapath} or \code{bsgenome} is
#' required to generate additional column(s) specified by \code{site}. Default
#' is NULL.
#' @param gtfpath Character string specifying the location of a GTF file,
#' including its path, name and extension. Please make sure the GTF file and
#' the sequences specified by \code{fastapath} or \code{bsgenome} derive from
#' the same release. Note that either \code{gtfpath} or \code{txdb} is
#' required if and only if nucleotide sequences of a genome assembly are
#' provided (see \code{fastapath} or \code{bsgenome}). Default is NULL.
#' @param txdb Character string specifying the TxDb annotation package to be
#' loaded. If not already present in the system, it is automatically installed
#' through the biocLite.R script (check
#' \href{http://bioconductor.org/packages/release/BiocViews.html#___TxDb}{here}
#' the list of available TxDb annotation packages). Please make sure the TxDb
#' annotation package and the sequences specified by \code{fastapath} or
#' \code{bsgenome} derive from the same release. Note that either
#' \code{gtfpath} or \code{txdb} is required if and only if nucleotide
#' sequences of a genome assembly are provided (see \code{fastapath} or
#' \code{bsgenome}). Default is NULL.
#' @param dataSource Optional character string describing the origin of the GTF
#' data file. This parameter is considered only if \code{gtfpath} is
#' specified. For more information about this parameter please refer to the
#' description of \emph{dataSource} of the
#' \code{\link[GenomicFeatures]{makeTxDbFromGFF}} function included in the
#' \code{GenomicFeatures} package.
#' @param organism Optional character string reporting the genus and species of
#' the organism of the GTF data file. This parameter is considered only if
#' \code{gtfpath} is specified. For more information about this parameter
#' please refer to the description of \emph{organism} of the
#' \code{\link[GenomicFeatures]{makeTxDbFromGFF}} function included in the
#' \code{GenomicFeatures} package.
#' @param granges Logical value whether to return a GRangesList object. Default
#' is FALSE i.e. a list of data tables (the required input for downstream
#' analyses and graphical outputs provided by riboWaltz) is returned instead.
#' @details \strong{riboWaltz} only works for read alignments based on
#' transcript coordinates. This choice is due to the main purpose of RiboSeq
#' assays to study translational events through the isolation and sequencing
#' of ribosome protected fragments. Most reads from RiboSeq are supposed to
#' map on mRNAs and not on introns and intergenic regions. Nevertheless, BAM
#' based on transcript coordinates can be generated in two ways: i) aligning
#' directly against transcript sequences; ii) aligning against standard
#' chromosome sequences, requiring the outputs to be translated in transcript
#' coordinates. The first option can be easily handled by many aligners (e.g.
#' Bowtie), given a reference FASTA file where each sequence represents a
#' transcript, from the beginning of the 5' UTR to the end of the 3' UTR. The
#' second procedure is based on reference FASTA files where each sequence
#' represents a chromosome, usually coupled with comprehensive gene annotation
#' files (GTF or GFF). The STAR aligner, with its option --quantMode
#' TranscriptomeSAM (see Chapter 6 of its
#' \href{http://labshare.cshl.edu/shares/gingeraslab/www-data/dobin/STAR/STAR.posix/doc/STARmanual.pdf}{manual}),
#' is an example of tool providing such a feature.
#' @return A list of data tables or a GRangesList object.
#' @examples
#' data(reads_list)
#' data(psite_offset)
#' data(mm81cdna)
#'
#' reads_psite_list <- psite_info(reads_list, psite_offset)
#' @import data.table
#' @export
psite_info <- function(data, offset, site = NULL, fastapath = NULL,
fasta_genome = TRUE, refseq_sep = NULL, bsgenome = NULL,
gtfpath = NULL, txdb = NULL, dataSource = NA,
organism = NA, granges = FALSE) {
if(!(all(site %in% c("psite", "asite", "esite"))) & length(site) != 0){
cat("\n")
stop("parameter site must be either NULL, \"psite\", \"asite\", \"esite\" or a combination of the three strings \n\n")
} else {
if(length(site) != 0 & length(fastapath) == 0 & length(bsgenome) == 0){
cat("\n")
stop("parameter site is specified but both fastapath and bsgenome are missing \n\n")
}
}
if(length(site) != 0){
if(((length(fastapath) != 0 & (fasta_genome == TRUE | fasta_genome == T)) |
length(bsgenome) != 0) &
length(gtfpath) == 0 & length(txdb) == 0){
cat("\n")
stop("genome annotation file not specified (both GTF path and TxDb object are missing)\n\n")
}
if(length(fastapath) != 0 & length(bsgenome) != 0){
cat("\n")
warning("both fastapath and bsgenome are specified. Only fastapath will be considered\n")
bsgenome = NULL
}
if(length(gtfpath) != 0 & length(txdb) != 0){
cat("\n")
warning("both gtfpath and txdb are specified. Only gtfpath will be considered\n")
txdb = NULL
}
if((length(gtfpath) != 0 | length(txdb) != 0) &
((length(fastapath) == 0 & length(bsgenome) == 0) |
(length(fastapath) != 0 & (fasta_genome == FALSE | fasta_genome == F)))){
cat("\n")
warning("a genome annotation file is specified but no sequences from genome assembly are provided\n")
}
if(length(gtfpath) != 0 | length(txdb) != 0){
if(length(gtfpath) != 0){
path_to_gtf <- gtfpath
txdbanno <- GenomicFeatures::makeTxDbFromGFF(file = path_to_gtf, format = "gtf", dataSource = dataSource, organism = organism)
} else {
if(txdb %in% rownames(installed.packages())){
library(txdb, character.only = TRUE)
} else {
source("https://bioconductor.org/biocLite.R")
biocLite(txdb, suppressUpdates = TRUE)
library(txdb, character.only = TRUE)
}
txdbanno <- get(txdb)
}
}
if(length(fastapath) != 0 | length(bsgenome) != 0){
if(length(fastapath) != 0) {
if(fasta_genome == TRUE | fasta_genome == T){
temp_sequences <- Biostrings::readDNAStringSet(fastapath, format = "fasta", use.names = TRUE)
if(length(refseq_sep) != 0){
names(temp_sequences) <- tstrsplit(names(temp_sequences), refseq_sep, fixed = TRUE, keep = 1)[[1]]
}
exon <- suppressWarnings(GenomicFeatures::exonsBy(txdbanno, by = "tx", use.names = TRUE))
exon <- as.data.table(exon[unique(names(exon))])
sub_exon_plus <- exon[as.character(seqnames) %in% names(temp_sequences) & strand == "+"]
sub_exon_minus <- exon[as.character(seqnames) %in% names(temp_sequences) & strand == "-"
][, new_end := Biostrings::width(temp_sequences[as.character(seqnames)]) - start + 1
][, new_start := Biostrings::width(temp_sequences[as.character(seqnames)]) - end + 1]
seq_dt_plus <- sub_exon_plus[, nt_seq := "emp"
][, nt_seq := as.character(Biostrings::subseq(temp_sequences[as.character(seqnames)],
start = start,
end = end))
][, list(seq = paste(nt_seq, collapse = "")), by = group_name]
revcompl_temp_sequences <- Biostrings::reverseComplement(temp_sequences)
seq_dt_minus <- sub_exon_minus[, nt_seq := "emp"
][, nt_seq := as.character(Biostrings::subseq(revcompl_temp_sequences[as.character(seqnames)],
start = new_start,
end = new_end))
][, list(seq = paste(nt_seq, collapse = "")), by = group_name]
sequences <- Biostrings::DNAStringSet(c(seq_dt_plus$seq, seq_dt_minus$seq))
names(sequences) <- c(unique(sub_exon_plus$group_name), unique(sub_exon_minus$group_name))
} else {
sequences <- Biostrings::readDNAStringSet(fastapath, format = "fasta", use.names = TRUE)
if(length(refseq_sep) != 0){
names(sequences) <- tstrsplit(names(sequences), refseq_sep, fixed = TRUE, keep = 1)[[1]]
}
}
} else {
if(bsgenome %in% installed.genomes()){
library(bsgenome, character.only = TRUE)
} else {
source("http://www.bioconductor.org/biocLite.R")
biocLite(bsgenome, suppressUpdates = TRUE)
library(bsgenome, character.only = TRUE)
}
sequences <- GenomicFeatures::extractTranscriptSeqs(get(bsgenome), txdbanno, use.names=T)
}
}
}
names <- names(data)
for (n in names) {
cat(sprintf("processing %s\n", n))
dt <- data[[n]]
suboff <- offset[sample == n, .(length,corrected_offset_from_3)]
cat("1. adding p-site position\n")
dt[suboff, on = 'length', psite := i.corrected_offset_from_3]
dt[, psite := end3 - psite]
setcolorder(dt,c("transcript", "end5", "psite", "end3", "length", "cds_start", "cds_stop"))
dt[, psite_from_start := psite - cds_start
][cds_stop == 0, psite_from_start := 0]
dt[, psite_from_stop := psite - cds_stop
][cds_stop == 0, psite_from_stop := 0]
cat("2. adding transcript region\n")
dt[, psite_region := "5utr"
][psite_from_start >= 0 & psite_from_stop <= 0, psite_region := "cds"
][psite_from_stop > 0, psite_region := "3utr"
][cds_stop == 0, psite_region := NA]
if(length(site) != 0){
cat("3. adding nucleotide sequence(s)\n")
if("psite" %in% site){
dt[, p_site_codon := as.character(Biostrings::subseq(sequences[as.character(dt$transcript)],
start = dt$psite,
end = dt$psite + 2))]
}
if("asite" %in% site){
dt[, a_site_codon := as.character(Biostrings::subseq(sequences[as.character(dt$transcript)],
start = dt$psite + 3,
end = dt$psite + 5))]
}
if("esite" %in% site){
dt[, e_site_codon := as.character(Biostrings::subseq(sequences[as.character(dt$transcript)],
start = dt$psite - 3,
end = dt$psite - 1))]
}
}
setorder(dt, transcript, end5, end3)
if (granges == T | granges == TRUE) {
dt <- GenomicRanges::makeGRangesFromDataFrame(dt,
keep.extra.columns = TRUE,
ignore.strand = TRUE,
seqnames.field = c("transcript"),
start.field = "end5",
end.field = "end3",
strand.field = "strand",
starts.in.df.are.0based = FALSE)
GenomicRanges::strand(dt) <- "+"
}
data[[n]] <- dt
}
if (granges == T | granges == TRUE) {
data <- GenomicRanges::GRangesList(data)
}
return(data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ruth_aaron_pairs.R
\name{ruth_aaron_pairs}
\alias{ruth_aaron_pairs}
\title{Find Ruth-Aaron Pairs of Integers}
\usage{
ruth_aaron_pairs(min, max, distinct = FALSE)
}
\arguments{
\item{min}{an integer representing the minimum number to check.}
\item{max}{an integer representing the maximum number to check.}
\item{distinct}{a logical indicating whether to consider repeating prime
factors or only distinct prime number factors.}
}
\value{
A List of integer pairs.
}
\description{
Find pairs of consecutive integers where the prime factors sum to the same
value. For example, (5, 6) are Ruth-Aaron pairs because the prime factors
\eqn{5 = 2 + 3}{5 == 2 + 3}.
}
\author{
Paul Egeler, MS
}
| /man/ruth_aaron_pairs.Rd | permissive | Ironholds/primes | R | false | true | 766 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ruth_aaron_pairs.R
\name{ruth_aaron_pairs}
\alias{ruth_aaron_pairs}
\title{Find Ruth-Aaron Pairs of Integers}
\usage{
ruth_aaron_pairs(min, max, distinct = FALSE)
}
\arguments{
\item{min}{an integer representing the minimum number to check.}
\item{max}{an integer representing the maximum number to check.}
\item{distinct}{a logical indicating whether to consider repeating prime
factors or only distinct prime number factors.}
}
\value{
A List of integer pairs.
}
\description{
Find pairs of consecutive integers where the prime factors sum to the same
value. For example, (5, 6) are Ruth-Aaron pairs because the prime factors
\eqn{5 = 2 + 3}{5 == 2 + 3}.
}
\author{
Paul Egeler, MS
}
|
context("basic assumption")
require(quanteda)
require(magrittr)
test_that("simplest example", {
quanteda::corpus(c('i love you', 'you love me', 'i hate you'),
docvars = data.frame(sentiment = c(1,1,0))) -> input_corpus
quanteda::dfm(input_corpus) -> input_dfm
export_resdtmf(input_dfm, "example.json")
example_dfm <- import_resdtmf("example.json")
unlink("example.json")
expect_equal(input_dfm, example_dfm)
})
test_that("complicated example", {
inaugural_dfm <- dfm(data_corpus_inaugural)
docvars(inaugural_dfm, "Party") <- as.character(docvars(inaugural_dfm, "Party"))
export_resdtmf(inaugural_dfm, "inaug_dfm.json")
recon_dfm <- import_resdtmf("inaug_dfm.json")
unlink("inaug_dfm.json")
expect_equal(inaugural_dfm, recon_dfm)
})
| /tests/testthat/test-basic_assumption.R | permissive | chainsawriot/resdtmf | R | false | false | 800 | r | context("basic assumption")
require(quanteda)
require(magrittr)
test_that("simplest example", {
quanteda::corpus(c('i love you', 'you love me', 'i hate you'),
docvars = data.frame(sentiment = c(1,1,0))) -> input_corpus
quanteda::dfm(input_corpus) -> input_dfm
export_resdtmf(input_dfm, "example.json")
example_dfm <- import_resdtmf("example.json")
unlink("example.json")
expect_equal(input_dfm, example_dfm)
})
test_that("complicated example", {
inaugural_dfm <- dfm(data_corpus_inaugural)
docvars(inaugural_dfm, "Party") <- as.character(docvars(inaugural_dfm, "Party"))
export_resdtmf(inaugural_dfm, "inaug_dfm.json")
recon_dfm <- import_resdtmf("inaug_dfm.json")
unlink("inaug_dfm.json")
expect_equal(inaugural_dfm, recon_dfm)
})
|
#
# code: Flotim Technical Report Plots
#
# github: WWF-ConsEvidence/MPAMystery/2_Social/TechnicalReports/SBS/Plots
# --- Duplicate all code from "2_Social" onward, to maintain file structure for sourced code
#
# author: Kelly Claborn, clabornkelly@gmail.com
# created: November 2017
# modified: Amari Bauer, June 2019
#
#
# ---- inputs ----
# 1) Source Flotim.TR.Datasets.R
# - Dependencies: Flotim.TR.SigTest.R
# After_Calculate_BigFive.R
# Calculate_BigFive.R
#
# ---- code sections ----
# 1) DEFINE MPA-SPECIFIC PLOTTING DATA FRAMES
# 2) AGE/GENDER PLOT
# 3) STATUS PLOTS
# 4) TREND PLOTS
# 5) ANNEX PLOTS
# 6) WRITE TO .PNG
#
#
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 1: DEFINE MPA-SPECIFIC PLOTTING DATA FRAMES ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
source("C:/Users/bauer-intern/Dropbox/MPAMystery/MyWork/SBS_TechReport_Calculations.R")
source("C:/Users/bauer-intern/Dropbox/MPAMystery/MyWork/Flotim.TechReport.SigTest.2019.R")
source("C:/Users/bauer-intern/Dropbox/MPAMystery/MyWork/Flotim.TechReport.Datasets.2019.R")
#DETERMINING HOW MANY PEOPLE ACTUALLY RESPONDED TO THE FISH QUESTIONS
HHData %>%
filter(MPAID==16) %>%
filter(Treatment==1) %>%
group_by(SettlementName,InterviewYear) %>%
summarise(total=length(HouseholdID),
actualfishfreq=length(FreqFish[!is.na(FreqFish)]),
actualfreqsale=length(FreqSaleFish[!is.na(FreqSaleFish)]),
actualpercentinc=length(PercentIncFish[!is.na(PercentIncFish)]),
actualmajfishtech=length(MajFishTechnique[!is.na(MajFishTechnique)]),
actualproteinfish=length(PercentProteinFish[!is.na(PercentProteinFish)]),
actualeatfish=length(FreqEatFish[!is.na(FreqEatFish)])) %>% View()
# ---- 1.2 Define significance labels and (x,y) coordinates for plots ----
library(gridExtra)
Flotim.statusplot.asterisks <-
define.statusplot.asterisks(Flotim.ContData.Techreport.status.PLOTFORMAT[,c("SettlementName","FS.pval",
"MA.pval","MT.pval","PA.pval",
"SE.pval", "TimeMarket.pval",
"Unwell.pval")])
Flotim.statusplot.sigpos <-
define.statusplot.asterisk.pos(Flotim.ContData.Techreport.status.PLOTFORMAT,
Flotim.statusplot.asterisks)
# ---- 1.3 Define Flotim-specific plot labels, with significance asterisks ----
Flotim.annexplot.monitoryear.labs <- rev(define.year.monitoryear.column(Flotim.AnnexContData.Techreport.PLOTFORMAT))
Flotim.trendplot.monitoryear.labs <- (define.year.monitoryear.column(Flotim.AnnexContData.Techreport.PLOTFORMAT))
Flotim.conttrendplot.ylabs <-
define.conttrendplot.ylabels.withasterisks(Flotim.TrendContData.Techreport.PLOTFORMAT
[is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),
c("FSMean","MAMean","PAMean","MTMean",
"SEMean","TimeMarketMean","UnwellMean")])
proportional.variables.plotlabs <-colnames(propdata.trend.test.Flotim)
Flotim.proptrendplot.ylabs <-
define.proptrendplot.ylabels.withasterisks(propdata.trend.test.Flotim)
Flotim.trendplot.labs <- list(FS=labs(y=as.character(Flotim.conttrendplot.ylabs["FSMean"]),x="Monitoring Year"),
MA=labs(y=as.character(Flotim.conttrendplot.ylabs["MAMean"]),x="Monitoring Year"),
MT=labs(y=as.character(Flotim.conttrendplot.ylabs["MTMean"]),x="Monitoring Year"),
PA=labs(y=as.character(Flotim.conttrendplot.ylabs["PAMean"]),x="Monitoring Year"),
SE=labs(y=as.character(Flotim.conttrendplot.ylabs["SEMean"]),x="Monitoring Year"),
Market=labs(y=as.character(Flotim.conttrendplot.ylabs["TimeMarketMean"]),
x="Monitoring Year"),
Unwell=labs(y=as.character(Flotim.conttrendplot.ylabs["UnwellMean"]),x="Monitoring Year"),
Gender=labs(y="Gender (% head of household)",x="Monitoring Year"),
Religion=labs(y="Religion (% households)",x="Monitoring Year"),
PrimaryOcc=labs(y=as.character(Flotim.proptrendplot.ylabs["Primary occupation (% households)"]),x="Monitoring Year"),
FreqFish=labs(y=as.character(Flotim.proptrendplot.ylabs["Frequency of fishing (% households)"]),x="Monitoring Year"),
FreqSellFish=labs(y=as.character(Flotim.proptrendplot.ylabs["Frequency of selling at least some catch (% households)"]),x="Monitoring Year"),
IncFish=labs(y=as.character(Flotim.proptrendplot.ylabs["Income from fishing in past 6 months (% households)"]),x="Monitoring Year"),
FishTech=labs(y=as.character(Flotim.proptrendplot.ylabs["Fishing technique most often used in past 6 months (% households)"]),x="Monitoring Year"),
ChildFS=labs(y=as.character(Flotim.proptrendplot.ylabs["Child hunger (% households)"]),x="Monitoring Year"),
Protein=labs(y=as.character(Flotim.proptrendplot.ylabs["Dietary protein from fish in past 6 months (% households)"]),x="Monitoring Year"))
Flotim.annexplot.settnames <-
define.annexplot.settname.labels(annex.sigvals.Flotim)
Flotim.annexplot.settnames[3,] <- rep("",length(Flotim.annexplot.settnames[3,]))
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 2: AGE/GENDER PLOTS ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 2.1 3 Year ----
Flotim.age.gender.3Year <-
melt(Flotim.AgeGender,id.vars="AgeCat",measure.vars=c("Female.3Year","Male.3Year")) %>%
ggplot() +
geom_bar(aes(x=AgeCat,
y=value,
fill=variable),
stat="identity",
width=0.75,
colour="#505050",
size=0.15) +
scale_y_continuous(expand=c(0,0),
limits=c(-10,10),
labels=abs(seq(-10,10,5))) +
scale_fill_manual(name="",
labels=c("Female","Male"),
values=c("Female.3Year"=alpha("#7FCDBB",0.95),
"Male.3Year"=alpha("#253494",0.95)))+
coord_flip() + age.gender.plot.theme + plot.guides.techreport + labs(x="Age",y="2017 Population distribution (% of individuals by gender)")+
theme(legend.position="none")
Flotim.age.gender.3Year
# ---- 2.2 Baseline ----
Flotim.age.gender.Baseline <-
melt(Flotim.AgeGender,id.vars="AgeCat",measure.vars=c("Female.Baseline","Male.Baseline")) %>%
ggplot() +
geom_bar(aes(x=AgeCat,
y=value,
fill=variable),
stat="identity",
width=0.75,
colour="#505050",
size=0.15) +
scale_y_continuous(expand=c(0,0),
limits=c(-10,10),
labels=abs(seq(-10,10,5))) +
scale_fill_manual(name="",
labels=c("Female","Male"),
values=c("Female.Baseline"=alpha("#7FCDBB",0.95),
"Male.Baseline"=alpha("#253494",0.95)))+
coord_flip() + age.gender.plot.theme + plot.guides.techreport + labs(x="Age",y="2014 Population distribution (% of individuals by gender)")+
theme(legend.position="none")
Flotim.age.gender.Baseline
Flotim.agegender.legend.plot <-
melt(Flotim.AgeGender,id.vars="AgeCat",measure.vars=c("Female.3Year","Male.3Year")) %>%
ggplot() +
geom_bar(aes(x=AgeCat,
y=value,
fill=variable),
stat="identity",
width=0.75,
colour="#505050",
size=0.15) +
scale_y_continuous(expand=c(0,0),
limits=c(-10,10),
name="",
labels=abs(seq(-10,10,5))) +
scale_fill_manual(name="",
values=c("Female.3Year"=alpha("#7FCDBB",0.95),
"Male.3Year"=alpha("#253494",0.95)),
labels=c("Female","Male")) +
coord_flip() + plot.guides.techreport + theme(legend.justification="right")
Flotim.agegender.legend.plot
Flotim.agegender.legend <- g_legend(Flotim.agegender.legend.plot)
Flotim.age.gender.plot <-
grid.arrange(Flotim.agegender.legend,
arrangeGrob(
Flotim.age.gender.3Year,
Flotim.age.gender.Baseline,ncol=1),nrow=2,heights=c(0.35,10))
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 3: STATUS PLOTS ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 3.1 Continuous data plots ----
# - FOOD SECURITY
Flotim.fs.statusplot <-
rbind.data.frame(Flotim.ContData.Techreport.status.PLOTFORMAT,
cbind.data.frame(SettlementID=NA,SettlementName=" ",
matrix(rep(NA,22),ncol=22,
dimnames=list(NULL,
colnames(Flotim.ContData.Techreport.status.PLOTFORMAT)[3:24])),
SettLevel="Dummy")) %>%
ggplot(aes(x=SettlementName)) +
geom_hline(aes(yintercept=1.56),size=0.25,colour="#505050") +
geom_hline(aes(yintercept=4.02),size=0.25,colour="#505050") +
geom_bar(aes(y=FSMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=FSMean-FSErr,
ymax=FSMean+FSErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=FS),
label=Flotim.statusplot.asterisks$FS,
nudge_x=-0.07,
nudge_y=-0.1,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=FS.ref),
label=Flotim.statusplot.asterisks$FS.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
geom_text(aes(x=length(SettlementName),y=(0.5*(6.06-4.02))+4.02,label="Food secure"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(SettlementName),y=(0.5*(4.02-1.56))+1.56,label="Food insecure\nwithout hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(SettlementName),y=0.5*1.56,label="Food insecure\nwith hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
scale_y_continuous(expand=c(0,0),
limits=c(0,6.06)) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["FS"] + theme(axis.ticks=element_blank(),
panel.background=element_rect(fill="white",
colour="#909090"),
panel.border=element_rect(fill=NA,
size=0.25,
colour="#C0C0C0"),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
axis.title=element_text(size=10,
angle=0,
face="bold",
colour="#303030"),
axis.text=element_text(size=8,
angle=0,
colour="#303030"))
Flotim.fs.statusplot
# - MATERIAL ASSETS
Flotim.ma.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=MAMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=MAMean-MAErr,
ymax=MAMean+MAErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=MA),
label=Flotim.statusplot.asterisks$MA,
nudge_x=-0.07,
nudge_y=0.28,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=MA.ref),
label=Flotim.statusplot.asterisks$MA.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.ContData.Techreport.status.PLOTFORMAT$MAMean,na.rm=T)+
max(Flotim.ContData.Techreport.status.PLOTFORMAT$MAErr,na.rm=T)+
0.03*max(Flotim.ContData.Techreport.status.PLOTFORMAT$MAMean,na.rm=T))) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["MA"] + plot.theme
Flotim.ma.statusplot
# - PLACE ATTACHMENT
Flotim.pa.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=PAMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=PAMean-PAErr,
ymax=PAMean+PAErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=PA),
label=Flotim.statusplot.asterisks$PA,
nudge_x=-0.07,
nudge_y=0.07,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=PA.ref),
label=Flotim.statusplot.asterisks$PA.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["PA"] + plot.theme
Flotim.pa.statusplot
# - MARINE TENURE
Flotim.mt.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=MTMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=MTMean-MTErr,
ymax=MTMean+MTErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=MT+(0.05*MT)),
label=Flotim.statusplot.asterisks$MT,
nudge_x=-0.07,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=MT.ref),
label=Flotim.statusplot.asterisks$MT.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["MT"] + plot.theme
Flotim.mt.statusplot
# - SCHOOL ENROLLMENT
Flotim.se.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=SEMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=SEMean-SEErr,
ymax=SEMean+SEErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=SE),
label=Flotim.statusplot.asterisks$SE,
nudge_x=-0.07,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=SE.ref),
label=Flotim.statusplot.asterisks$SE.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format(),
limits=c(0,1.1)) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["SE"] + plot.theme
Flotim.se.statusplot
# - TIME TO MARKET
Flotim.time.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=TimeMarketMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=TimeMarketMean-TimeMarketErr,
ymax=TimeMarketMean+TimeMarketErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=Market),
label=Flotim.statusplot.asterisks$Market,
nudge_x=-0.07,
nudge_y=0.07,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=Market.ref),
label=Flotim.statusplot.asterisks$Market.ref,
size=rel(3),
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.ContData.Techreport.status.PLOTFORMAT$TimeMarketMean,na.rm=T)+
max(Flotim.ContData.Techreport.status.PLOTFORMAT$TimeMarketErr,na.rm=T)+
0.03*max(Flotim.ContData.Techreport.status.PLOTFORMAT$TimeMarketMean,na.rm=T))) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["Time"] + plot.theme
Flotim.time.statusplot
# - DAYS UNWELL
Flotim.unwell.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=UnwellMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=UnwellMean-UnwellErr,
ymax=UnwellMean+UnwellErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=Unwell),
label=Flotim.statusplot.asterisks$Unwell,
nudge_x=-0.07,
nudge_y=-0.1,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=Unwell.ref),
label=Flotim.statusplot.asterisks$Unwell.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.ContData.Techreport.status.PLOTFORMAT$UnwellMean,na.rm=T)+
max(Flotim.ContData.Techreport.status.PLOTFORMAT$UnwellErr,na.rm=T)+
0.03*max(Flotim.ContData.Techreport.status.PLOTFORMAT$UnwellMean,na.rm=T))) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["Unwell"] + plot.theme
Flotim.unwell.statusplot
# ---- 3.2 Proportional data plots ----
# - GENDER OF HEAD OF HOUSEHOLD
Flotim.gender.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("HHH.female","HHH.male")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Gender"]],
labels=c("Female","Male")) +
coord_flip() + plot.theme + Statusplot.labs["Gender"] + plot.guides.techreport
Flotim.gender.statusplot
# - RELIGION
Flotim.religion.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Percent.Rel.Other","Percent.Rel.Muslim","Percent.Rel.Christian")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Religion"]],
labels=c("Other","Muslim","Christian")) +
coord_flip() + plot.theme + Statusplot.labs["Religion"] +
guides(fill=guide_legend(label.vjust=0.5,
label.theme=element_text(size=rel(9),
angle=0,
colour="#505050",
lineheight=0.75),
direction="horizontal",
ncol=3,
title.position="left",
label.position="right",
keywidth=unit(0.75,"cm"),
keyheight=unit(0.5,"cm"),
reverse=T))
Flotim.religion.statusplot
# - PRIMARY OCCUPATION
Flotim.primaryocc.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Percent.PrimaryOcc.Other",
"Percent.PrimaryOcc.WageLabor",
"Percent.PrimaryOcc.Tourism",
"Percent.PrimaryOcc.Fish","Percent.PrimaryOcc.HarvestForest", "Percent.PrimaryOcc.Farm")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["PrimaryOcc"]],
labels=c("Other","Other Wage Labor","Tourism",
"Fishing","Harvest Forest Products", "Farming")) +
coord_flip() + plot.theme + Statusplot.labs["PrimaryOcc"] + plot.guides.techreport
Flotim.primaryocc.statusplot
# - FISHING FREQUENCY
Flotim.freqfish.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Prop.Fish.MoreFewTimesWk","Prop.Fish.FewTimesPerWk",
"Prop.Fish.FewTimesPerMo","Prop.Fish.FewTimesPer6Mo",
"Prop.Fish.AlmostNever")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FreqFish"]],
labels=c("More than a few times per week","A few times per week",
"A few times per month","A few times per six months",
"Once every six months")) +
coord_flip() + plot.theme + Statusplot.labs["FreqFish"] + plot.guides.techreport
Flotim.freqfish.statusplot
# - SELL FISH FREQUENCY
Flotim.freqsellfish.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Prop.SellFish.MoreFewTimesWk","Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.FewTimesPerMo","Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.AlmostNever")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FreqSellFish"]],
labels=c("More than a few times per week","A few times per week",
"A few times per month","A few times per six months",
"Once every six months")) +
coord_flip() + plot.theme + Statusplot.labs["FreqSellFish"] + plot.guides.techreport
Flotim.freqsellfish.statusplot
# - INCOME FROM FISHING
Flotim.incfish.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Prop.IncFish.All","Prop.IncFish.Most",
"Prop.IncFish.Half","Prop.IncFish.Some",
"Prop.IncFish.None")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["IncFish"]],
labels=c("All","Most","About half","Some","None")) +
coord_flip() + plot.theme + Statusplot.labs["IncFish"] + plot.guides.techreport
Flotim.incfish.statusplot
# - FISHING TECHNIQUE
Flotim.fishtech.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Prop.FishTech.MobileLine","Prop.FishTech.StatLine",
"Prop.FishTech.MobileNet","Prop.FishTech.StatNet",
"Prop.FishTech.ByHand")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FishTech"]],
labels=c("Mobile line","Stationary line",
"Mobile net","Stationary net","Fishing by hand")) +
coord_flip() + plot.theme + Statusplot.labs["FishTech"] + plot.guides.techreport
Flotim.fishtech.statusplot
# - CHILDHOOD FOOD SECURITY
Flotim.childfs.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Child.FS.yes","Child.FS.no")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["ChildFS"]],
labels=c("Evidence of child hunger","No evidence of child hunger")) +
coord_flip() + plot.theme + Statusplot.labs["ChildFS"] + plot.guides.techreport
Flotim.childfs.statusplot
# - PROTEIN FROM FISH
Flotim.proteinfish.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("ProteinFish.All","ProteinFish.Most",
"ProteinFish.Half","ProteinFish.Some",
"ProteinFish.None")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Protein"]],
labels=c("All","Most","About half","Some","None")) +
coord_flip() + plot.theme + Statusplot.labs["FishProtein"] + plot.guides.techreport
Flotim.proteinfish.statusplot
# - CATEGORICAL FOOD SECURITY
Flotim.FSCategorical.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Percent.FoodInsecure.YesHunger", "Percent.FoodInsecure.NoHunger", "Percent.FoodSecure")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FSCategorical"]],
labels=c("Food insecure with hunger", "Food insecure without hunger","Food secure" )) +
coord_flip() + plot.theme + Statusplot.labs["FSCategorical"] + plot.guides.techreport
Flotim.FSCategorical.statusplot
# ADULT EDUCATION
Flotim.AdultEduc.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("AdultEducHigher", "AdultEducSec", "AdultEducMid",
"AdultEducPrim", "AdultEducPre", "AdultEducNone")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["AdultEducation"]],
labels=c("Further or higher education","High school education","Middle school education","Primary school education","Pre-school education", "No formal education")) +
coord_flip() + plot.theme + Statusplot.labs["AdultEduc"] + plot.guides.techreport
# HOUSEHOLD HEAD EDUCATION
Flotim.HHHEduc.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("HHHEducHigher", "HHHEducSec", "HHHEducMid",
"HHHEducPrim", "HHHEducPre", "HHHEducNone")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["HHHEducation"]],
labels=c("Further or higher education","High school education","Middle school education","Primary school education","Pre-school education", "No formal education")) +
coord_flip() + plot.theme + Statusplot.labs["HHHEduc"] + plot.guides.techreport
# ECONOMIC STATUS
Flotim.econ.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Econ.Status.Much.Better","Econ.Status.Slightly.Better",
"Econ.Status.Neutral","Econ.Status.Slighly.Worse",
"Econ.Status.Much.Worse")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["EconStatus"]],
labels=c("Much better","Slightly better","Neither better or worse","Slightly worse","Much worse")) +
coord_flip() + plot.theme + Statusplot.labs["EconStatus"] + plot.guides.techreport
# RULES
Flotim.rules.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("PropRuleHab", "PropRuleSpp")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="dodge",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand = c(0, 0), limits=c(0,100)) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["PropRules"]],
labels=c("Important species","Important habitats")) +
coord_flip() + plot.theme + Statusplot.labs["Rules"] + plot.guides.techreport
# PARTICIPATION IN DECISION-MAKING
Flotim.participation.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("ParticipateRules","ParticipateBnd","ParticipateOrg", "ParticipateEstablish")) %>%
filter(., SettlementName!= "Control\nSettlements") %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="dodge",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=2),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand = c(0, 0), limits=c(0,100)) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Participate"]],
labels=c("Setting appropriation rules", "MPA boundary delineation", "Design of MPA management body", "Design of MPA-managing organization")) +
coord_flip() + plot.theme + Statusplot.labs["Participation"] + plot.guides.techreport
# - MEMBER OF MARINE RESOURCE ORGANIZATION
Flotim.member.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Member.No","Member.Yes")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Member"]],
labels=c("Non-member","Member")) +
coord_flip() + plot.theme + Statusplot.labs["Member"] + plot.guides.techreport
# - MEETING ATTENDANCE
Flotim.meeting.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Prop.Member.Yes.Meeting.No", "Prop.Member.Yes.Meeting.Yes")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Attendance"]],
labels=c("Have not attended a meeting","Attended a meeting")) +
coord_flip() + plot.theme + Statusplot.labs["Attendance"] + plot.guides.techreport
# - ILLNESS
Flotim.illness.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Percent.Not.Ill", "Percent.Ill")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Illness"]],
labels=c("Ill or injured ","Not Ill or injured")) +
coord_flip() + plot.theme + Statusplot.labs["Ill"] + plot.guides.techreport
# MARINE RESOUCE CONFLICT
Flotim.conflict.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Percent.GreatlyDecreased.SocConflict","Percent.Decreased.SocConflict",
"Percent.Same.SocConflict","Percent.Increased.SocConflict",
"Percent.GreatlyIncreased.SocConflict")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["SocialConflict"]],
labels=c("Greatly decreased","Decreased","Neither increased or decreased","Increased","Greatly Increased")) +
coord_flip() + plot.theme + Statusplot.labs["Conflict"] + plot.guides.techreport
# NUMBER OF LOCAL THREATS
Flotim.NumThreat.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Threat.Minimum.Five","Threat.Four", "Threat.Three",
"Threat.Two","Threat.One","Threat.None")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["NumThreats"]],
labels=c("More than five threats","Four threats","Three threats","Two threats","One threat", "No threats")) +
coord_flip() + plot.theme + Statusplot.labs["NumLocalThreats"] + plot.guides.techreport
# - THREAT TYPES
Flotim.ThreatType.statusplot <-
melt(Flotim.Threat.Types.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Other", "OtherMarineUses", "NaturalProcesses", "HabitatLoss",
"ClimateChange", "IllegalFishing", "DestructiveFishing", "Pollution")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["ThreatType"]],
labels=c("Other", "Other marine resource uses", "Natural processes", "Habitat loss",
"Climate change", "Illegal fishing", "Destructive fishing", "Pollution")) +
coord_flip() + plot.theme + Statusplot.labs["ThreatTypes"] + plot.guides.techreport
# - Number of Ethnicities
Flotim.ethnicity.statusplot <- ggplot(data=Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=Num.EthnicGroups,
fill="NotDummy"),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.SBSPropData.Techreport.status.PLOTFORMAT$Num.EthnicGroups,na.rm=T) +
0.03*max(Flotim.SBSPropData.Techreport.status.PLOTFORMAT$Num.EthnicGroups,na.rm=T))) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["Ethnicity"] + plot.theme
# - Contribution
Flotim.contribution.statusplot <- ggplot(data=Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=Contribution,
fill="NotDummy"),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.SBSPropData.Techreport.status.PLOTFORMAT$Contribution,na.rm=T) +
1.5* max(Flotim.SBSPropData.Techreport.status.PLOTFORMAT$Contribution,na.rm=T)), labels = scales::comma) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["Contribution"] + plot.theme
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 4: TREND PLOTS ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 4.1 Continuous data plots ----
# - FOOD SECURITY
Flotim.fs.trendplot <-
ggplot(Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),]) +
geom_hline(aes(yintercept=1.56),size=0.25,colour="#505050") +
geom_hline(aes(yintercept=4.02),size=0.25,colour="#505050") +
geom_bar(aes(x=MonitoringYear,
y=FSMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=FSMean-FSErr,
ymax=FSMean+FSErr,
x=MonitoringYear),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
geom_text(aes(x=length(MonitoringYear)+0.46,y=(0.5*(6.06-4.02))+4.02,label="Food secure"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(MonitoringYear)+0.46,y=(0.5*(4.02-1.56))+1.56,label="Food insecure\nwithout hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(MonitoringYear)+0.46,y=0.5*1.56,label="Food insecure\nwith hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
scale_y_continuous(expand=c(0,0),
limits=c(0,6.06)) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["FS"] + theme(axis.ticks=element_blank(),
panel.background=element_rect(fill="white",
colour="#909090"),
panel.border=element_rect(fill=NA,
size=0.25,
colour="#C0C0C0"),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
axis.title=element_text(size=10,
angle=0,
face="bold",
colour="#303030"),
axis.text=element_text(size=8,
angle=0,
colour="#303030"))
Flotim.fs.trendplot
# - MATERIAL ASSETS
Flotim.ma.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=MAMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=MAMean-MAErr,
ymax=MAMean+MAErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.TrendContData.Techreport.PLOTFORMAT$MAMean,na.rm=T)+
max(Flotim.TrendContData.Techreport.PLOTFORMAT$MAErr,na.rm=T)+
0.03*max(Flotim.TrendContData.Techreport.PLOTFORMAT$MAMean,na.rm=T))) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["MA"] + plot.theme
Flotim.ma.trendplot
# - PLACE ATTACHMENT
Flotim.pa.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=PAMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=PAMean-PAErr,
ymax=PAMean+PAErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["PA"] + plot.theme
Flotim.pa.trendplot
# - MARINE TENURE
Flotim.mt.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=MTMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=MTMean-MTErr,
ymax=MTMean+MTErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["MT"] + plot.theme
Flotim.mt.trendplot
# - SCHOOL ENROLLMENT
Flotim.se.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=SEMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=SEMean-SEErr,
ymax=SEMean+SEErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format(),
limits=c(0,1)) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["SE"] + plot.theme
Flotim.se.trendplot
# - TIME TO MARKET
Flotim.time.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=TimeMarketMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=TimeMarketMean-TimeMarketErr,
ymax=TimeMarketMean+TimeMarketErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.TrendContData.Techreport.PLOTFORMAT$TimeMarketMean,na.rm=T)+
max(Flotim.TrendContData.Techreport.PLOTFORMAT$TimeMarketErr,na.rm=T)+
0.03*max(Flotim.TrendContData.Techreport.PLOTFORMAT$TimeMarketMean,na.rm=T))) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["Market"] + plot.theme
Flotim.time.trendplot
# - DAYS UNWELL
Flotim.unwell.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=UnwellMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=UnwellMean-UnwellErr,
ymax=UnwellMean+UnwellErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.TrendContData.Techreport.PLOTFORMAT$UnwellMean,na.rm=T)+
max(Flotim.TrendContData.Techreport.PLOTFORMAT$UnwellErr,na.rm=T)+
0.03*max(Flotim.TrendContData.Techreport.PLOTFORMAT$UnwellMean,na.rm=T))) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["Unwell"] + plot.theme
Flotim.unwell.trendplot
# ---- 4.2 Proportional data plots ----
# - GENDER OF HEAD OF HOUSEHOLD
Flotim.gender.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("HHH.female","HHH.male")) %>%
ggplot(aes(x=rev(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Gender"]],
labels=c("Female","Male")) +
coord_flip() + Flotim.trendplot.labs["Gender"] + plot.theme + plot.guides.techreport
Flotim.gender.trendplot
# - RELIGION
Flotim.religion.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Percent.Rel.Other","Percent.Rel.Muslim","Percent.Rel.Christian")) %>%
ggplot(aes(x=rev(MonitoringYear),
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Religion"]],
labels=c("Other","Muslim","Christian")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["Religion"] +
guides(fill=guide_legend(label.vjust=0.5,
label.theme=element_text(size=rel(9),
angle=0,
colour="#505050",
lineheight=0.75),
direction="horizontal",
ncol=3,
title.position="left",
label.position="right",
keywidth=unit(0.75,"cm"),
keyheight=unit(0.5,"cm"),
reverse=T))
Flotim.religion.trendplot
# - PRIMARY OCCUPATION
Flotim.primaryocc.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Percent.PrimaryOcc.Other","Percent.PrimaryOcc.WageLabor",
"Percent.PrimaryOcc.Tourism","Percent.PrimaryOcc.Fish",
"Percent.PrimaryOcc.HarvestForest","Percent.PrimaryOcc.Farm")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["PrimaryOcc"]],
labels=c("Other","Other Wage Labor","Tourism",
"Fishing","Harvest Forest Products","Farming")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["PrimaryOcc"] + plot.guides.techreport
Flotim.primaryocc.trendplot
#USED TO CHECK DISTRIBUTION OF SECONDARY OCCUPATIONS
Flotim.Secondaryocc.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Percent.SecondaryOcc.Other","Percent.SecondaryOcc.WageLabor",
"Percent.SecondaryOcc.Tourism","Percent.SecondaryOcc.Fish",
"Percent.SecondaryOcc.HarvestForest","Percent.SecondaryOcc.Farm")) %>%
ggplot(aes(x=rev(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["SecondaryOcc"]],
labels=c("Other","Other Wage Labor","Tourism",
"Fishing","Harvest Forest Products","Farming")) +
coord_flip() + plot.theme + labs(y="Secondary occupation (% households)",x="Monitoring Year") + plot.guides.techreport
Flotim.Secondaryocc.trendplot
# - FISHING FREQUENCY
Flotim.freqfish.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Prop.Fish.MoreFewTimesWk","Prop.Fish.FewTimesPerWk",
"Prop.Fish.FewTimesPerMo","Prop.Fish.FewTimesPer6Mo",
"Prop.Fish.AlmostNever")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FreqFish"]],
labels=c("More than a few times per week","A few times per week",
"A few times per month","A few times per six months",
"Once every six months")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["FreqFish"] + plot.guides.techreport
Flotim.freqfish.trendplot
# - SELL FISH FREQUENCY
Flotim.freqsellfish.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Prop.SellFish.MoreFewTimesWk","Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.FewTimesPerMo","Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.AlmostNever")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FreqSellFish"]],
labels=c("More than a few times per week","A few times per week",
"A few times per month","A few times per six months",
"Once every six months")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["FreqSellFish"] + plot.guides.techreport
Flotim.freqsellfish.trendplot
# - INCOME FROM FISHING
Flotim.incfish.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Prop.IncFish.All","Prop.IncFish.Most",
"Prop.IncFish.Half","Prop.IncFish.Some",
"Prop.IncFish.None")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["IncFish"]],
labels=c("All","Most","About half","Some","None")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["IncFish"] + plot.guides.techreport
Flotim.incfish.trendplot
# - FISHING TECHNIQUE
Flotim.fishtech.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Prop.FishTech.MobileLine","Prop.FishTech.StatLine",
"Prop.FishTech.MobileNet","Prop.FishTech.StatNet",
"Prop.FishTech.ByHand")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FishTech"]],
labels=c("Mobile line","Stationary line",
"Mobile net","Stationary net","Fishing by hand")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["FishTech"] + plot.guides.techreport
Flotim.fishtech.trendplot
# - CHILDHOOD FOOD SECURITY
Flotim.childfs.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Child.FS.yes","Child.FS.no")) %>%
ggplot(aes(x=(MonitoringYear),
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["ChildFS"]],
labels=c("Evidence of child hunger","No evidence of child hunger")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["ChildFS"] + plot.guides.techreport
Flotim.childfs.trendplot
# - PROTEIN FROM FISH
Flotim.proteinfish.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("ProteinFish.All","ProteinFish.Most",
"ProteinFish.Half","ProteinFish.Some",
"ProteinFish.None")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Protein"]],
labels=c("All","Most","About half","Some","None")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["Protein"] + plot.guides.techreport
Flotim.proteinfish.trendplot
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 5: ANNEX PLOTS ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 5.1 Food security -----
Flotim.fs.annexplot <-
rbind.data.frame(Flotim.AnnexContData.Techreport.PLOTFORMAT,
cbind.data.frame(MonitoringYear=NA,SettlementID=NA,SettlementName=" ",
matrix(rep(NA,14),ncol=14,
dimnames=list(NULL,
colnames(Flotim.AnnexContData.Techreport.PLOTFORMAT)[4:17])),
SettLevel="Dummy")) %>%
ggplot() +
geom_hline(aes(yintercept=1.56),size=0.25,colour="#505050") +
geom_hline(aes(yintercept=4.02),size=0.25,colour="#505050") +
geom_bar(aes(x=SettlementName,
y=FSMean,
alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(x=SettlementName,
ymin=FSMean-FSErr,
ymax=FSMean+FSErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(aes(x=length(unique(SettlementName)),y=(0.5*(6.06-4.02))+4.02,label="Food secure"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(unique(SettlementName)),y=(0.5*(4.02-1.56))+1.56,label="Food insecure\nwithout hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(unique(SettlementName)),y=0.5*1.56,label="Food insecure\nwith hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=(Flotim.annexplot.monitoryear.labs),
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=c(Flotim.annexplot.settnames[,"FS"]," "),
na.value=" ") +
scale_y_continuous(expand=c(0,0),
limits=c(0,6.06)) +
coord_flip() + Statusplot.labs["FS"] + plot.guides.techreport + theme(axis.ticks=element_blank(),
panel.background=element_rect(fill="white",
colour="#909090"),
panel.border=element_rect(fill=NA,
size=0.25,
colour="#C0C0C0"),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
axis.title=element_text(size=10,
angle=0,
face="bold",
colour="#303030"),
axis.text=element_text(size=8,
angle=0,
colour="#303030"),
legend.position="top",
legend.justification="right",
legend.box.spacing=unit(0.1,"cm"))
Flotim.fs.annexplot
# ---- 5.2 Material assets -----
Flotim.ma.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=MAMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=MAMean-MAErr,
ymax=MAMean+MAErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"MA"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.AnnexContData.Techreport.PLOTFORMAT$MAMean,na.rm=T)+
max(Flotim.AnnexContData.Techreport.PLOTFORMAT$MAErr,na.rm=T)+
0.03*max(Flotim.AnnexContData.Techreport.PLOTFORMAT$MAMean,na.rm=T))) +
coord_flip() + Statusplot.labs["MA"] + plot.guides.techreport + plot.theme
Flotim.ma.annexplot
# ---- 5.3 Place attachment -----
Flotim.pa.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=PAMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=PAMean-PAErr,
ymax=PAMean+PAErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"PA"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
coord_flip() + Statusplot.labs["PA"] + plot.guides.techreport + plot.theme
Flotim.pa.annexplot
# ---- 5.4 Marine tenure -----
Flotim.mt.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=MTMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=MTMean-MTErr,
ymax=MTMean+MTErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"MT"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
coord_flip() + Statusplot.labs["MT"] + plot.guides.techreport + plot.theme
Flotim.mt.annexplot
# ---- 5.5 School enrollment -----
Flotim.se.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=SEMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=SEMean-SEErr,
ymax=SEMean+SEErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"SE"]) +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
coord_flip() + Statusplot.labs["SE"] + plot.guides.techreport + plot.theme
Flotim.se.annexplot
# ---- 5.6 Time to market -----
Flotim.time.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=TimeMarketMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=TimeMarketMean-TimeMarketErr,
ymax=TimeMarketMean+TimeMarketErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"TimeMarket"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.AnnexContData.Techreport.PLOTFORMAT$TimeMarketMean,na.rm=T)+
max(Flotim.AnnexContData.Techreport.PLOTFORMAT$TimeMarketErr,na.rm=T)+
0.03*max(Flotim.AnnexContData.Techreport.PLOTFORMAT$TimeMarketMean,na.rm=T))) +
coord_flip() + Statusplot.labs["Time"] + plot.guides.techreport + plot.theme
Flotim.time.annexplot
# ---- 5.7 Days unwell -----
Flotim.unwell.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=UnwellMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=UnwellMean-UnwellErr,
ymax=UnwellMean+UnwellErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"Unwell"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.AnnexContData.Techreport.PLOTFORMAT$UnwellMean,na.rm=T)+
max(Flotim.AnnexContData.Techreport.PLOTFORMAT$UnwellErr,na.rm=T)+
0.03*max(Flotim.AnnexContData.Techreport.PLOTFORMAT$UnwellMean,na.rm=T))) +
coord_flip() + Statusplot.labs["Unwell"] + plot.guides.techreport + plot.theme
Flotim.unwell.annexplot
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 6: WRITE TO .PNG ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
dir.create(paste("C:/Users/HP/Dropbox/Products/",
format(Sys.Date(),format="%Y_%m_%d"),sep="_"))
FigureFileName <- paste("C:/Users/HP/Dropbox/Products/",
format(Sys.Date(),format="%Y_%m_%d"),sep="_")
png(paste(FigureFileName,"FS.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.fs.trendplot)
dev.off()
png(paste(FigureFileName,"FS.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.fs.annexplot)
dev.off()
png(paste(FigureFileName,"FS.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.fs.statusplot)
dev.off()
# ---- 6.2 Material assets ----
png(paste(FigureFileName,"MA.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.ma.statusplot)
dev.off()
png(paste(FigureFileName,"MA.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.ma.trendplot)
dev.off()
png(paste(FigureFileName,"MA.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.ma.annexplot)
dev.off()
# ---- 6.3 Place attachment ----
png(paste(FigureFileName,"PA.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.pa.statusplot)
dev.off()
png(paste(FigureFileName,"PA.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.pa.trendplot)
dev.off()
png(paste(FigureFileName,"PA.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.pa.annexplot)
dev.off()
# ---- 6.4 Marine tenure ----
png(paste(FigureFileName,"MT.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.mt.statusplot)
dev.off()
png(paste(FigureFileName,"MT.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.mt.trendplot)
dev.off()
png(paste(FigureFileName,"MT.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.mt.annexplot)
dev.off()
# ---- 6.5 School enrollment ----
png(paste(FigureFileName,"SE.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.se.statusplot)
dev.off()
png(paste(FigureFileName,"SE.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.se.trendplot)
dev.off()
png(paste(FigureFileName,"SE.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.se.annexplot)
dev.off()
# ---- 6.6 Time to market ----
png(paste(FigureFileName,"TimeMarket.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.time.statusplot)
dev.off()
png(paste(FigureFileName,"TimeMarket.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.time.trendplot)
dev.off()
png(paste(FigureFileName,"TimeMarket.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.time.annexplot)
dev.off()
# ---- 6.7 Days unwell ----
png(paste(FigureFileName,"DaysUnwell.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.unwell.statusplot)
dev.off()
png(paste(FigureFileName,"DaysUnwell.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.unwell.trendplot)
dev.off()
png(paste(FigureFileName,"DaysUnwell.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.unwell.annexplot)
dev.off()
# ---- 6.8 Gender of head of household ----
png(paste(FigureFileName,"Gender.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.gender.statusplot)
dev.off()
png(paste(FigureFileName,"Gender.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.gender.trendplot)
dev.off()
# ---- 6.9 Religion ----
png(paste(FigureFileName,"Religion.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.religion.statusplot)
dev.off()
png(paste(FigureFileName,"Religion.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.religion.trendplot)
dev.off()
# ---- 6.10 Primary occupation ----
png(paste(FigureFileName,"PrimaryOcc.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.primaryocc.statusplot)
dev.off()
png(paste(FigureFileName,"PrimaryOcc.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.primaryocc.trendplot)
dev.off()
# ---- 6.15 Secondary occupation ----
png(paste(FigureFileName,"SecondaryOcc.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.Secondaryocc.statusplot)
dev.off()
png(paste(FigureFileName,"SecondaryOcc.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.Secondaryocc.trendplot)
dev.off()
# ---- 6.11 Fishing frequency ----
png(paste(FigureFileName,"FreqFish.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.freqfish.statusplot)
dev.off()
png(paste(FigureFileName,"FreqFish.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.freqfish.trendplot)
dev.off()
# ---- 6.12 Fish sale frequency ----
png(paste(FigureFileName,"FreqSellFish.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.freqsellfish.statusplot)
dev.off()
png(paste(FigureFileName,"FreqSellFish.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.freqsellfish.trendplot)
dev.off()
# ---- 6.13 Income from fishing ----
png(paste(FigureFileName,"IncFish.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.incfish.statusplot)
dev.off()
png(paste(FigureFileName,"IncFish.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.incfish.trendplot)
dev.off()
# ---- 6.14 Fishing technique ----
png(paste(FigureFileName,"FishTech.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.fishtech.statusplot)
dev.off()
png(paste(FigureFileName,"FishTech.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.fishtech.trendplot)
dev.off()
# ---- 6.15 Childhood food security ----
png(paste(FigureFileName,"ChildFS.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.childfs.statusplot)
dev.off()
png(paste(FigureFileName,"ChildFS.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.childfs.trendplot)
dev.off()
# ---- 6.16 Protein from fish ----
png(paste(FigureFileName,"FishProtein.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.proteinfish.statusplot)
dev.off()
png(paste(FigureFileName,"FishProtein.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.proteinfish.trendplot)
dev.off()
# ---- 6.17 Age/Gender ----
library(grid)
png(paste(FigureFileName,"Age.gender.png",sep="/"),
units="in",height=10,width=4,res=400)
grid.newpage()
grid.draw(Flotim.age.gender.plot)
dev.off()
# ---- 6.18 Number ethnic groups ----
png(paste(FigureFileName,"Num.Ethnic.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.ethnic.statusplot)
dev.off()
| /xx_Archive/3_Products/Status_trends/SBS_Flotim/Flotim.TechReport.Plots.2019.R | no_license | WWF-ConsEvidence/MPAMystery | R | false | false | 86,159 | r | #
# code: Flotim Technical Report Plots
#
# github: WWF-ConsEvidence/MPAMystery/2_Social/TechnicalReports/SBS/Plots
# --- Duplicate all code from "2_Social" onward, to maintain file structure for sourced code
#
# author: Kelly Claborn, clabornkelly@gmail.com
# created: November 2017
# modified: Amari Bauer, June 2019
#
#
# ---- inputs ----
# 1) Source Flotim.TR.Datasets.R
# - Dependencies: Flotim.TR.SigTest.R
# After_Calculate_BigFive.R
# Calculate_BigFive.R
#
# ---- code sections ----
# 1) DEFINE MPA-SPECIFIC PLOTTING DATA FRAMES
# 2) AGE/GENDER PLOT
# 3) STATUS PLOTS
# 4) TREND PLOTS
# 5) ANNEX PLOTS
# 6) WRITE TO .PNG
#
#
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 1: DEFINE MPA-SPECIFIC PLOTTING DATA FRAMES ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
source("C:/Users/bauer-intern/Dropbox/MPAMystery/MyWork/SBS_TechReport_Calculations.R")
source("C:/Users/bauer-intern/Dropbox/MPAMystery/MyWork/Flotim.TechReport.SigTest.2019.R")
source("C:/Users/bauer-intern/Dropbox/MPAMystery/MyWork/Flotim.TechReport.Datasets.2019.R")
#DETERMINING HOW MANY PEOPLE ACTUALLY RESPONDED TO THE FISH QUESTIONS
HHData %>%
filter(MPAID==16) %>%
filter(Treatment==1) %>%
group_by(SettlementName,InterviewYear) %>%
summarise(total=length(HouseholdID),
actualfishfreq=length(FreqFish[!is.na(FreqFish)]),
actualfreqsale=length(FreqSaleFish[!is.na(FreqSaleFish)]),
actualpercentinc=length(PercentIncFish[!is.na(PercentIncFish)]),
actualmajfishtech=length(MajFishTechnique[!is.na(MajFishTechnique)]),
actualproteinfish=length(PercentProteinFish[!is.na(PercentProteinFish)]),
actualeatfish=length(FreqEatFish[!is.na(FreqEatFish)])) %>% View()
# ---- 1.2 Define significance labels and (x,y) coordinates for plots ----
library(gridExtra)
Flotim.statusplot.asterisks <-
define.statusplot.asterisks(Flotim.ContData.Techreport.status.PLOTFORMAT[,c("SettlementName","FS.pval",
"MA.pval","MT.pval","PA.pval",
"SE.pval", "TimeMarket.pval",
"Unwell.pval")])
Flotim.statusplot.sigpos <-
define.statusplot.asterisk.pos(Flotim.ContData.Techreport.status.PLOTFORMAT,
Flotim.statusplot.asterisks)
# ---- 1.3 Define Flotim-specific plot labels, with significance asterisks ----
Flotim.annexplot.monitoryear.labs <- rev(define.year.monitoryear.column(Flotim.AnnexContData.Techreport.PLOTFORMAT))
Flotim.trendplot.monitoryear.labs <- (define.year.monitoryear.column(Flotim.AnnexContData.Techreport.PLOTFORMAT))
Flotim.conttrendplot.ylabs <-
define.conttrendplot.ylabels.withasterisks(Flotim.TrendContData.Techreport.PLOTFORMAT
[is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),
c("FSMean","MAMean","PAMean","MTMean",
"SEMean","TimeMarketMean","UnwellMean")])
proportional.variables.plotlabs <-colnames(propdata.trend.test.Flotim)
Flotim.proptrendplot.ylabs <-
define.proptrendplot.ylabels.withasterisks(propdata.trend.test.Flotim)
Flotim.trendplot.labs <- list(FS=labs(y=as.character(Flotim.conttrendplot.ylabs["FSMean"]),x="Monitoring Year"),
MA=labs(y=as.character(Flotim.conttrendplot.ylabs["MAMean"]),x="Monitoring Year"),
MT=labs(y=as.character(Flotim.conttrendplot.ylabs["MTMean"]),x="Monitoring Year"),
PA=labs(y=as.character(Flotim.conttrendplot.ylabs["PAMean"]),x="Monitoring Year"),
SE=labs(y=as.character(Flotim.conttrendplot.ylabs["SEMean"]),x="Monitoring Year"),
Market=labs(y=as.character(Flotim.conttrendplot.ylabs["TimeMarketMean"]),
x="Monitoring Year"),
Unwell=labs(y=as.character(Flotim.conttrendplot.ylabs["UnwellMean"]),x="Monitoring Year"),
Gender=labs(y="Gender (% head of household)",x="Monitoring Year"),
Religion=labs(y="Religion (% households)",x="Monitoring Year"),
PrimaryOcc=labs(y=as.character(Flotim.proptrendplot.ylabs["Primary occupation (% households)"]),x="Monitoring Year"),
FreqFish=labs(y=as.character(Flotim.proptrendplot.ylabs["Frequency of fishing (% households)"]),x="Monitoring Year"),
FreqSellFish=labs(y=as.character(Flotim.proptrendplot.ylabs["Frequency of selling at least some catch (% households)"]),x="Monitoring Year"),
IncFish=labs(y=as.character(Flotim.proptrendplot.ylabs["Income from fishing in past 6 months (% households)"]),x="Monitoring Year"),
FishTech=labs(y=as.character(Flotim.proptrendplot.ylabs["Fishing technique most often used in past 6 months (% households)"]),x="Monitoring Year"),
ChildFS=labs(y=as.character(Flotim.proptrendplot.ylabs["Child hunger (% households)"]),x="Monitoring Year"),
Protein=labs(y=as.character(Flotim.proptrendplot.ylabs["Dietary protein from fish in past 6 months (% households)"]),x="Monitoring Year"))
Flotim.annexplot.settnames <-
define.annexplot.settname.labels(annex.sigvals.Flotim)
Flotim.annexplot.settnames[3,] <- rep("",length(Flotim.annexplot.settnames[3,]))
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 2: AGE/GENDER PLOTS ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 2.1 3 Year ----
Flotim.age.gender.3Year <-
melt(Flotim.AgeGender,id.vars="AgeCat",measure.vars=c("Female.3Year","Male.3Year")) %>%
ggplot() +
geom_bar(aes(x=AgeCat,
y=value,
fill=variable),
stat="identity",
width=0.75,
colour="#505050",
size=0.15) +
scale_y_continuous(expand=c(0,0),
limits=c(-10,10),
labels=abs(seq(-10,10,5))) +
scale_fill_manual(name="",
labels=c("Female","Male"),
values=c("Female.3Year"=alpha("#7FCDBB",0.95),
"Male.3Year"=alpha("#253494",0.95)))+
coord_flip() + age.gender.plot.theme + plot.guides.techreport + labs(x="Age",y="2017 Population distribution (% of individuals by gender)")+
theme(legend.position="none")
Flotim.age.gender.3Year
# ---- 2.2 Baseline ----
Flotim.age.gender.Baseline <-
melt(Flotim.AgeGender,id.vars="AgeCat",measure.vars=c("Female.Baseline","Male.Baseline")) %>%
ggplot() +
geom_bar(aes(x=AgeCat,
y=value,
fill=variable),
stat="identity",
width=0.75,
colour="#505050",
size=0.15) +
scale_y_continuous(expand=c(0,0),
limits=c(-10,10),
labels=abs(seq(-10,10,5))) +
scale_fill_manual(name="",
labels=c("Female","Male"),
values=c("Female.Baseline"=alpha("#7FCDBB",0.95),
"Male.Baseline"=alpha("#253494",0.95)))+
coord_flip() + age.gender.plot.theme + plot.guides.techreport + labs(x="Age",y="2014 Population distribution (% of individuals by gender)")+
theme(legend.position="none")
Flotim.age.gender.Baseline
Flotim.agegender.legend.plot <-
melt(Flotim.AgeGender,id.vars="AgeCat",measure.vars=c("Female.3Year","Male.3Year")) %>%
ggplot() +
geom_bar(aes(x=AgeCat,
y=value,
fill=variable),
stat="identity",
width=0.75,
colour="#505050",
size=0.15) +
scale_y_continuous(expand=c(0,0),
limits=c(-10,10),
name="",
labels=abs(seq(-10,10,5))) +
scale_fill_manual(name="",
values=c("Female.3Year"=alpha("#7FCDBB",0.95),
"Male.3Year"=alpha("#253494",0.95)),
labels=c("Female","Male")) +
coord_flip() + plot.guides.techreport + theme(legend.justification="right")
Flotim.agegender.legend.plot
Flotim.agegender.legend <- g_legend(Flotim.agegender.legend.plot)
Flotim.age.gender.plot <-
grid.arrange(Flotim.agegender.legend,
arrangeGrob(
Flotim.age.gender.3Year,
Flotim.age.gender.Baseline,ncol=1),nrow=2,heights=c(0.35,10))
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 3: STATUS PLOTS ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 3.1 Continuous data plots ----
# - FOOD SECURITY
Flotim.fs.statusplot <-
rbind.data.frame(Flotim.ContData.Techreport.status.PLOTFORMAT,
cbind.data.frame(SettlementID=NA,SettlementName=" ",
matrix(rep(NA,22),ncol=22,
dimnames=list(NULL,
colnames(Flotim.ContData.Techreport.status.PLOTFORMAT)[3:24])),
SettLevel="Dummy")) %>%
ggplot(aes(x=SettlementName)) +
geom_hline(aes(yintercept=1.56),size=0.25,colour="#505050") +
geom_hline(aes(yintercept=4.02),size=0.25,colour="#505050") +
geom_bar(aes(y=FSMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=FSMean-FSErr,
ymax=FSMean+FSErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=FS),
label=Flotim.statusplot.asterisks$FS,
nudge_x=-0.07,
nudge_y=-0.1,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=FS.ref),
label=Flotim.statusplot.asterisks$FS.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
geom_text(aes(x=length(SettlementName),y=(0.5*(6.06-4.02))+4.02,label="Food secure"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(SettlementName),y=(0.5*(4.02-1.56))+1.56,label="Food insecure\nwithout hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(SettlementName),y=0.5*1.56,label="Food insecure\nwith hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
scale_y_continuous(expand=c(0,0),
limits=c(0,6.06)) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["FS"] + theme(axis.ticks=element_blank(),
panel.background=element_rect(fill="white",
colour="#909090"),
panel.border=element_rect(fill=NA,
size=0.25,
colour="#C0C0C0"),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
axis.title=element_text(size=10,
angle=0,
face="bold",
colour="#303030"),
axis.text=element_text(size=8,
angle=0,
colour="#303030"))
Flotim.fs.statusplot
# - MATERIAL ASSETS
Flotim.ma.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=MAMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=MAMean-MAErr,
ymax=MAMean+MAErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=MA),
label=Flotim.statusplot.asterisks$MA,
nudge_x=-0.07,
nudge_y=0.28,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=MA.ref),
label=Flotim.statusplot.asterisks$MA.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.ContData.Techreport.status.PLOTFORMAT$MAMean,na.rm=T)+
max(Flotim.ContData.Techreport.status.PLOTFORMAT$MAErr,na.rm=T)+
0.03*max(Flotim.ContData.Techreport.status.PLOTFORMAT$MAMean,na.rm=T))) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["MA"] + plot.theme
Flotim.ma.statusplot
# - PLACE ATTACHMENT
Flotim.pa.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=PAMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=PAMean-PAErr,
ymax=PAMean+PAErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=PA),
label=Flotim.statusplot.asterisks$PA,
nudge_x=-0.07,
nudge_y=0.07,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=PA.ref),
label=Flotim.statusplot.asterisks$PA.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["PA"] + plot.theme
Flotim.pa.statusplot
# - MARINE TENURE
Flotim.mt.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=MTMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=MTMean-MTErr,
ymax=MTMean+MTErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=MT+(0.05*MT)),
label=Flotim.statusplot.asterisks$MT,
nudge_x=-0.07,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=MT.ref),
label=Flotim.statusplot.asterisks$MT.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["MT"] + plot.theme
Flotim.mt.statusplot
# - SCHOOL ENROLLMENT
Flotim.se.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=SEMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=SEMean-SEErr,
ymax=SEMean+SEErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=SE),
label=Flotim.statusplot.asterisks$SE,
nudge_x=-0.07,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=SE.ref),
label=Flotim.statusplot.asterisks$SE.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format(),
limits=c(0,1.1)) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["SE"] + plot.theme
Flotim.se.statusplot
# - TIME TO MARKET
Flotim.time.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=TimeMarketMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=TimeMarketMean-TimeMarketErr,
ymax=TimeMarketMean+TimeMarketErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=Market),
label=Flotim.statusplot.asterisks$Market,
nudge_x=-0.07,
nudge_y=0.07,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=Market.ref),
label=Flotim.statusplot.asterisks$Market.ref,
size=rel(3),
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.ContData.Techreport.status.PLOTFORMAT$TimeMarketMean,na.rm=T)+
max(Flotim.ContData.Techreport.status.PLOTFORMAT$TimeMarketErr,na.rm=T)+
0.03*max(Flotim.ContData.Techreport.status.PLOTFORMAT$TimeMarketMean,na.rm=T))) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["Time"] + plot.theme
Flotim.time.statusplot
# - DAYS UNWELL
Flotim.unwell.statusplot <- ggplot(data=Flotim.ContData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=UnwellMean,
fill=SettLevel),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_errorbar(aes(ymin=UnwellMean-UnwellErr,
ymax=UnwellMean+UnwellErr,
colour=SettLevel),
width=0.25,
size=0.5,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,
y=Unwell),
label=Flotim.statusplot.asterisks$Unwell,
nudge_x=-0.07,
nudge_y=-0.1,
size=rel(4),
colour=errcols.status["NotDummy"]) +
geom_text(data=Flotim.statusplot.sigpos,
aes(x=SettlementName,y=Unwell.ref),
label=Flotim.statusplot.asterisks$Unwell.ref,
size=rel(3),
nudge_x=0.02,
fontface="bold.italic",
colour=errcols.status["NotDummy"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.ContData.Techreport.status.PLOTFORMAT$UnwellMean,na.rm=T)+
max(Flotim.ContData.Techreport.status.PLOTFORMAT$UnwellErr,na.rm=T)+
0.03*max(Flotim.ContData.Techreport.status.PLOTFORMAT$UnwellMean,na.rm=T))) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["Unwell"] + plot.theme
Flotim.unwell.statusplot
# ---- 3.2 Proportional data plots ----
# - GENDER OF HEAD OF HOUSEHOLD
Flotim.gender.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("HHH.female","HHH.male")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Gender"]],
labels=c("Female","Male")) +
coord_flip() + plot.theme + Statusplot.labs["Gender"] + plot.guides.techreport
Flotim.gender.statusplot
# - RELIGION
Flotim.religion.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Percent.Rel.Other","Percent.Rel.Muslim","Percent.Rel.Christian")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Religion"]],
labels=c("Other","Muslim","Christian")) +
coord_flip() + plot.theme + Statusplot.labs["Religion"] +
guides(fill=guide_legend(label.vjust=0.5,
label.theme=element_text(size=rel(9),
angle=0,
colour="#505050",
lineheight=0.75),
direction="horizontal",
ncol=3,
title.position="left",
label.position="right",
keywidth=unit(0.75,"cm"),
keyheight=unit(0.5,"cm"),
reverse=T))
Flotim.religion.statusplot
# - PRIMARY OCCUPATION
Flotim.primaryocc.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Percent.PrimaryOcc.Other",
"Percent.PrimaryOcc.WageLabor",
"Percent.PrimaryOcc.Tourism",
"Percent.PrimaryOcc.Fish","Percent.PrimaryOcc.HarvestForest", "Percent.PrimaryOcc.Farm")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["PrimaryOcc"]],
labels=c("Other","Other Wage Labor","Tourism",
"Fishing","Harvest Forest Products", "Farming")) +
coord_flip() + plot.theme + Statusplot.labs["PrimaryOcc"] + plot.guides.techreport
Flotim.primaryocc.statusplot
# - FISHING FREQUENCY
Flotim.freqfish.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Prop.Fish.MoreFewTimesWk","Prop.Fish.FewTimesPerWk",
"Prop.Fish.FewTimesPerMo","Prop.Fish.FewTimesPer6Mo",
"Prop.Fish.AlmostNever")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FreqFish"]],
labels=c("More than a few times per week","A few times per week",
"A few times per month","A few times per six months",
"Once every six months")) +
coord_flip() + plot.theme + Statusplot.labs["FreqFish"] + plot.guides.techreport
Flotim.freqfish.statusplot
# - SELL FISH FREQUENCY
Flotim.freqsellfish.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Prop.SellFish.MoreFewTimesWk","Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.FewTimesPerMo","Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.AlmostNever")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FreqSellFish"]],
labels=c("More than a few times per week","A few times per week",
"A few times per month","A few times per six months",
"Once every six months")) +
coord_flip() + plot.theme + Statusplot.labs["FreqSellFish"] + plot.guides.techreport
Flotim.freqsellfish.statusplot
# - INCOME FROM FISHING
Flotim.incfish.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Prop.IncFish.All","Prop.IncFish.Most",
"Prop.IncFish.Half","Prop.IncFish.Some",
"Prop.IncFish.None")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["IncFish"]],
labels=c("All","Most","About half","Some","None")) +
coord_flip() + plot.theme + Statusplot.labs["IncFish"] + plot.guides.techreport
Flotim.incfish.statusplot
# - FISHING TECHNIQUE
Flotim.fishtech.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Prop.FishTech.MobileLine","Prop.FishTech.StatLine",
"Prop.FishTech.MobileNet","Prop.FishTech.StatNet",
"Prop.FishTech.ByHand")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FishTech"]],
labels=c("Mobile line","Stationary line",
"Mobile net","Stationary net","Fishing by hand")) +
coord_flip() + plot.theme + Statusplot.labs["FishTech"] + plot.guides.techreport
Flotim.fishtech.statusplot
# - CHILDHOOD FOOD SECURITY
Flotim.childfs.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Child.FS.yes","Child.FS.no")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["ChildFS"]],
labels=c("Evidence of child hunger","No evidence of child hunger")) +
coord_flip() + plot.theme + Statusplot.labs["ChildFS"] + plot.guides.techreport
Flotim.childfs.statusplot
# - PROTEIN FROM FISH
Flotim.proteinfish.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("ProteinFish.All","ProteinFish.Most",
"ProteinFish.Half","ProteinFish.Some",
"ProteinFish.None")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Protein"]],
labels=c("All","Most","About half","Some","None")) +
coord_flip() + plot.theme + Statusplot.labs["FishProtein"] + plot.guides.techreport
Flotim.proteinfish.statusplot
# - CATEGORICAL FOOD SECURITY
Flotim.FSCategorical.statusplot <-
melt(Flotim.PropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Percent.FoodInsecure.YesHunger", "Percent.FoodInsecure.NoHunger", "Percent.FoodSecure")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FSCategorical"]],
labels=c("Food insecure with hunger", "Food insecure without hunger","Food secure" )) +
coord_flip() + plot.theme + Statusplot.labs["FSCategorical"] + plot.guides.techreport
Flotim.FSCategorical.statusplot
# ADULT EDUCATION
Flotim.AdultEduc.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("AdultEducHigher", "AdultEducSec", "AdultEducMid",
"AdultEducPrim", "AdultEducPre", "AdultEducNone")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["AdultEducation"]],
labels=c("Further or higher education","High school education","Middle school education","Primary school education","Pre-school education", "No formal education")) +
coord_flip() + plot.theme + Statusplot.labs["AdultEduc"] + plot.guides.techreport
# HOUSEHOLD HEAD EDUCATION
Flotim.HHHEduc.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("HHHEducHigher", "HHHEducSec", "HHHEducMid",
"HHHEducPrim", "HHHEducPre", "HHHEducNone")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["HHHEducation"]],
labels=c("Further or higher education","High school education","Middle school education","Primary school education","Pre-school education", "No formal education")) +
coord_flip() + plot.theme + Statusplot.labs["HHHEduc"] + plot.guides.techreport
# ECONOMIC STATUS
Flotim.econ.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Econ.Status.Much.Better","Econ.Status.Slightly.Better",
"Econ.Status.Neutral","Econ.Status.Slighly.Worse",
"Econ.Status.Much.Worse")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["EconStatus"]],
labels=c("Much better","Slightly better","Neither better or worse","Slightly worse","Much worse")) +
coord_flip() + plot.theme + Statusplot.labs["EconStatus"] + plot.guides.techreport
# RULES
Flotim.rules.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("PropRuleHab", "PropRuleSpp")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="dodge",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand = c(0, 0), limits=c(0,100)) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["PropRules"]],
labels=c("Important species","Important habitats")) +
coord_flip() + plot.theme + Statusplot.labs["Rules"] + plot.guides.techreport
# PARTICIPATION IN DECISION-MAKING
Flotim.participation.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("ParticipateRules","ParticipateBnd","ParticipateOrg", "ParticipateEstablish")) %>%
filter(., SettlementName!= "Control\nSettlements") %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="dodge",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=2),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand = c(0, 0), limits=c(0,100)) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Participate"]],
labels=c("Setting appropriation rules", "MPA boundary delineation", "Design of MPA management body", "Design of MPA-managing organization")) +
coord_flip() + plot.theme + Statusplot.labs["Participation"] + plot.guides.techreport
# - MEMBER OF MARINE RESOURCE ORGANIZATION
Flotim.member.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Member.No","Member.Yes")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Member"]],
labels=c("Non-member","Member")) +
coord_flip() + plot.theme + Statusplot.labs["Member"] + plot.guides.techreport
# - MEETING ATTENDANCE
Flotim.meeting.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Prop.Member.Yes.Meeting.No", "Prop.Member.Yes.Meeting.Yes")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Attendance"]],
labels=c("Have not attended a meeting","Attended a meeting")) +
coord_flip() + plot.theme + Statusplot.labs["Attendance"] + plot.guides.techreport
# - ILLNESS
Flotim.illness.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Percent.Not.Ill", "Percent.Ill")) %>%
ggplot(aes(x=SettlementName,
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Illness"]],
labels=c("Ill or injured ","Not Ill or injured")) +
coord_flip() + plot.theme + Statusplot.labs["Ill"] + plot.guides.techreport
# MARINE RESOUCE CONFLICT
Flotim.conflict.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Percent.GreatlyDecreased.SocConflict","Percent.Decreased.SocConflict",
"Percent.Same.SocConflict","Percent.Increased.SocConflict",
"Percent.GreatlyIncreased.SocConflict")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["SocialConflict"]],
labels=c("Greatly decreased","Decreased","Neither increased or decreased","Increased","Greatly Increased")) +
coord_flip() + plot.theme + Statusplot.labs["Conflict"] + plot.guides.techreport
# NUMBER OF LOCAL THREATS
Flotim.NumThreat.statusplot <-
melt(Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Threat.Minimum.Five","Threat.Four", "Threat.Three",
"Threat.Two","Threat.One","Threat.None")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["NumThreats"]],
labels=c("More than five threats","Four threats","Three threats","Two threats","One threat", "No threats")) +
coord_flip() + plot.theme + Statusplot.labs["NumLocalThreats"] + plot.guides.techreport
# - THREAT TYPES
Flotim.ThreatType.statusplot <-
melt(Flotim.Threat.Types.PLOTFORMAT,
id.vars="SettlementName",measure.vars=c("Other", "OtherMarineUses", "NaturalProcesses", "HabitatLoss",
"ClimateChange", "IllegalFishing", "DestructiveFishing", "Pollution")) %>%
ggplot(aes(x=SettlementName,y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.75,
size=0.15,
colour="#505050") +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["ThreatType"]],
labels=c("Other", "Other marine resource uses", "Natural processes", "Habitat loss",
"Climate change", "Illegal fishing", "Destructive fishing", "Pollution")) +
coord_flip() + plot.theme + Statusplot.labs["ThreatTypes"] + plot.guides.techreport
# - Number of Ethnicities
Flotim.ethnicity.statusplot <- ggplot(data=Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=Num.EthnicGroups,
fill="NotDummy"),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.SBSPropData.Techreport.status.PLOTFORMAT$Num.EthnicGroups,na.rm=T) +
0.03*max(Flotim.SBSPropData.Techreport.status.PLOTFORMAT$Num.EthnicGroups,na.rm=T))) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["Ethnicity"] + plot.theme
# - Contribution
Flotim.contribution.statusplot <- ggplot(data=Flotim.SBSPropData.Techreport.status.PLOTFORMAT,
aes(x=SettlementName)) +
geom_bar(aes(y=Contribution,
fill="NotDummy"),
stat="identity",
position="dodge",
width=0.75,
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.SBSPropData.Techreport.status.PLOTFORMAT$Contribution,na.rm=T) +
1.5* max(Flotim.SBSPropData.Techreport.status.PLOTFORMAT$Contribution,na.rm=T)), labels = scales::comma) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
coord_flip() + Statusplot.labs["Contribution"] + plot.theme
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 4: TREND PLOTS ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 4.1 Continuous data plots ----
# - FOOD SECURITY
Flotim.fs.trendplot <-
ggplot(Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),]) +
geom_hline(aes(yintercept=1.56),size=0.25,colour="#505050") +
geom_hline(aes(yintercept=4.02),size=0.25,colour="#505050") +
geom_bar(aes(x=MonitoringYear,
y=FSMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=FSMean-FSErr,
ymax=FSMean+FSErr,
x=MonitoringYear),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
geom_text(aes(x=length(MonitoringYear)+0.46,y=(0.5*(6.06-4.02))+4.02,label="Food secure"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(MonitoringYear)+0.46,y=(0.5*(4.02-1.56))+1.56,label="Food insecure\nwithout hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(MonitoringYear)+0.46,y=0.5*1.56,label="Food insecure\nwith hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
scale_y_continuous(expand=c(0,0),
limits=c(0,6.06)) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["FS"] + theme(axis.ticks=element_blank(),
panel.background=element_rect(fill="white",
colour="#909090"),
panel.border=element_rect(fill=NA,
size=0.25,
colour="#C0C0C0"),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
axis.title=element_text(size=10,
angle=0,
face="bold",
colour="#303030"),
axis.text=element_text(size=8,
angle=0,
colour="#303030"))
Flotim.fs.trendplot
# - MATERIAL ASSETS
Flotim.ma.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=MAMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=MAMean-MAErr,
ymax=MAMean+MAErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.TrendContData.Techreport.PLOTFORMAT$MAMean,na.rm=T)+
max(Flotim.TrendContData.Techreport.PLOTFORMAT$MAErr,na.rm=T)+
0.03*max(Flotim.TrendContData.Techreport.PLOTFORMAT$MAMean,na.rm=T))) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["MA"] + plot.theme
Flotim.ma.trendplot
# - PLACE ATTACHMENT
Flotim.pa.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=PAMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=PAMean-PAErr,
ymax=PAMean+PAErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["PA"] + plot.theme
Flotim.pa.trendplot
# - MARINE TENURE
Flotim.mt.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=MTMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=MTMean-MTErr,
ymax=MTMean+MTErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["MT"] + plot.theme
Flotim.mt.trendplot
# - SCHOOL ENROLLMENT
Flotim.se.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=SEMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=SEMean-SEErr,
ymax=SEMean+SEErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format(),
limits=c(0,1)) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["SE"] + plot.theme
Flotim.se.trendplot
# - TIME TO MARKET
Flotim.time.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=TimeMarketMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=TimeMarketMean-TimeMarketErr,
ymax=TimeMarketMean+TimeMarketErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.TrendContData.Techreport.PLOTFORMAT$TimeMarketMean,na.rm=T)+
max(Flotim.TrendContData.Techreport.PLOTFORMAT$TimeMarketErr,na.rm=T)+
0.03*max(Flotim.TrendContData.Techreport.PLOTFORMAT$TimeMarketMean,na.rm=T))) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["Market"] + plot.theme
Flotim.time.trendplot
# - DAYS UNWELL
Flotim.unwell.trendplot <-
ggplot(data=Flotim.TrendContData.Techreport.PLOTFORMAT
[!is.na(Flotim.TrendContData.Techreport.PLOTFORMAT$MonitoringYear),],
aes(x=MonitoringYear)) +
geom_bar(aes(y=UnwellMean),
fill=fillcols.trend,
stat="identity",
position="dodge",
width=0.65) +
geom_errorbar(aes(ymin=UnwellMean-UnwellErr,
ymax=UnwellMean+UnwellErr),
colour=errcols.trend,
width=0.15,
size=0.5,
position=position_dodge(width=1)) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.TrendContData.Techreport.PLOTFORMAT$UnwellMean,na.rm=T)+
max(Flotim.TrendContData.Techreport.PLOTFORMAT$UnwellErr,na.rm=T)+
0.03*max(Flotim.TrendContData.Techreport.PLOTFORMAT$UnwellMean,na.rm=T))) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
coord_flip() + Flotim.trendplot.labs["Unwell"] + plot.theme
Flotim.unwell.trendplot
# ---- 4.2 Proportional data plots ----
# - GENDER OF HEAD OF HOUSEHOLD
Flotim.gender.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("HHH.female","HHH.male")) %>%
ggplot(aes(x=rev(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Gender"]],
labels=c("Female","Male")) +
coord_flip() + Flotim.trendplot.labs["Gender"] + plot.theme + plot.guides.techreport
Flotim.gender.trendplot
# - RELIGION
Flotim.religion.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Percent.Rel.Other","Percent.Rel.Muslim","Percent.Rel.Christian")) %>%
ggplot(aes(x=rev(MonitoringYear),
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Religion"]],
labels=c("Other","Muslim","Christian")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["Religion"] +
guides(fill=guide_legend(label.vjust=0.5,
label.theme=element_text(size=rel(9),
angle=0,
colour="#505050",
lineheight=0.75),
direction="horizontal",
ncol=3,
title.position="left",
label.position="right",
keywidth=unit(0.75,"cm"),
keyheight=unit(0.5,"cm"),
reverse=T))
Flotim.religion.trendplot
# - PRIMARY OCCUPATION
Flotim.primaryocc.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Percent.PrimaryOcc.Other","Percent.PrimaryOcc.WageLabor",
"Percent.PrimaryOcc.Tourism","Percent.PrimaryOcc.Fish",
"Percent.PrimaryOcc.HarvestForest","Percent.PrimaryOcc.Farm")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["PrimaryOcc"]],
labels=c("Other","Other Wage Labor","Tourism",
"Fishing","Harvest Forest Products","Farming")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["PrimaryOcc"] + plot.guides.techreport
Flotim.primaryocc.trendplot
#USED TO CHECK DISTRIBUTION OF SECONDARY OCCUPATIONS
Flotim.Secondaryocc.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Percent.SecondaryOcc.Other","Percent.SecondaryOcc.WageLabor",
"Percent.SecondaryOcc.Tourism","Percent.SecondaryOcc.Fish",
"Percent.SecondaryOcc.HarvestForest","Percent.SecondaryOcc.Farm")) %>%
ggplot(aes(x=rev(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["SecondaryOcc"]],
labels=c("Other","Other Wage Labor","Tourism",
"Fishing","Harvest Forest Products","Farming")) +
coord_flip() + plot.theme + labs(y="Secondary occupation (% households)",x="Monitoring Year") + plot.guides.techreport
Flotim.Secondaryocc.trendplot
# - FISHING FREQUENCY
Flotim.freqfish.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Prop.Fish.MoreFewTimesWk","Prop.Fish.FewTimesPerWk",
"Prop.Fish.FewTimesPerMo","Prop.Fish.FewTimesPer6Mo",
"Prop.Fish.AlmostNever")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FreqFish"]],
labels=c("More than a few times per week","A few times per week",
"A few times per month","A few times per six months",
"Once every six months")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["FreqFish"] + plot.guides.techreport
Flotim.freqfish.trendplot
# - SELL FISH FREQUENCY
Flotim.freqsellfish.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Prop.SellFish.MoreFewTimesWk","Prop.SellFish.FewTimesPerWk",
"Prop.SellFish.FewTimesPerMo","Prop.SellFish.FewTimesPer6Mo",
"Prop.SellFish.AlmostNever")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FreqSellFish"]],
labels=c("More than a few times per week","A few times per week",
"A few times per month","A few times per six months",
"Once every six months")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["FreqSellFish"] + plot.guides.techreport
Flotim.freqsellfish.trendplot
# - INCOME FROM FISHING
Flotim.incfish.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Prop.IncFish.All","Prop.IncFish.Most",
"Prop.IncFish.Half","Prop.IncFish.Some",
"Prop.IncFish.None")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["IncFish"]],
labels=c("All","Most","About half","Some","None")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["IncFish"] + plot.guides.techreport
Flotim.incfish.trendplot
# - FISHING TECHNIQUE
Flotim.fishtech.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Prop.FishTech.MobileLine","Prop.FishTech.StatLine",
"Prop.FishTech.MobileNet","Prop.FishTech.StatNet",
"Prop.FishTech.ByHand")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["FishTech"]],
labels=c("Mobile line","Stationary line",
"Mobile net","Stationary net","Fishing by hand")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["FishTech"] + plot.guides.techreport
Flotim.fishtech.trendplot
# - CHILDHOOD FOOD SECURITY
Flotim.childfs.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("Child.FS.yes","Child.FS.no")) %>%
ggplot(aes(x=(MonitoringYear),
y=value)) +
geom_bar(aes(fill=variable),
stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["ChildFS"]],
labels=c("Evidence of child hunger","No evidence of child hunger")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["ChildFS"] + plot.guides.techreport
Flotim.childfs.trendplot
# - PROTEIN FROM FISH
Flotim.proteinfish.trendplot <-
melt(Flotim.TrendPropData.Techreport.PLOTFORMAT,
id.vars="MonitoringYear",measure.vars=c("ProteinFish.All","ProteinFish.Most",
"ProteinFish.Half","ProteinFish.Some",
"ProteinFish.None")) %>%
ggplot(aes(x=(MonitoringYear),y=value,fill=variable)) +
geom_bar(stat="identity",
position="fill",
width=0.65,
size=0.15,
colour="#505050") +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
scale_x_discrete(labels=Flotim.trendplot.monitoryear.labs) +
scale_fill_manual(name="",
values=multianswer.fillcols.status[["Protein"]],
labels=c("All","Most","About half","Some","None")) +
coord_flip() + plot.theme + Flotim.trendplot.labs["Protein"] + plot.guides.techreport
Flotim.proteinfish.trendplot
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 5: ANNEX PLOTS ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- 5.1 Food security -----
Flotim.fs.annexplot <-
rbind.data.frame(Flotim.AnnexContData.Techreport.PLOTFORMAT,
cbind.data.frame(MonitoringYear=NA,SettlementID=NA,SettlementName=" ",
matrix(rep(NA,14),ncol=14,
dimnames=list(NULL,
colnames(Flotim.AnnexContData.Techreport.PLOTFORMAT)[4:17])),
SettLevel="Dummy")) %>%
ggplot() +
geom_hline(aes(yintercept=1.56),size=0.25,colour="#505050") +
geom_hline(aes(yintercept=4.02),size=0.25,colour="#505050") +
geom_bar(aes(x=SettlementName,
y=FSMean,
alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(x=SettlementName,
ymin=FSMean-FSErr,
ymax=FSMean+FSErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
geom_text(aes(x=length(unique(SettlementName)),y=(0.5*(6.06-4.02))+4.02,label="Food secure"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(unique(SettlementName)),y=(0.5*(4.02-1.56))+1.56,label="Food insecure\nwithout hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
geom_text(aes(x=length(unique(SettlementName)),y=0.5*1.56,label="Food insecure\nwith hunger"),
size=rel(2.5),lineheight=0.8,fontface="bold.italic",colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=(Flotim.annexplot.monitoryear.labs),
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=c(Flotim.annexplot.settnames[,"FS"]," "),
na.value=" ") +
scale_y_continuous(expand=c(0,0),
limits=c(0,6.06)) +
coord_flip() + Statusplot.labs["FS"] + plot.guides.techreport + theme(axis.ticks=element_blank(),
panel.background=element_rect(fill="white",
colour="#909090"),
panel.border=element_rect(fill=NA,
size=0.25,
colour="#C0C0C0"),
panel.grid.major.x=element_blank(),
panel.grid.major.y=element_blank(),
axis.title=element_text(size=10,
angle=0,
face="bold",
colour="#303030"),
axis.text=element_text(size=8,
angle=0,
colour="#303030"),
legend.position="top",
legend.justification="right",
legend.box.spacing=unit(0.1,"cm"))
Flotim.fs.annexplot
# ---- 5.2 Material assets -----
Flotim.ma.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=MAMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=MAMean-MAErr,
ymax=MAMean+MAErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"MA"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.AnnexContData.Techreport.PLOTFORMAT$MAMean,na.rm=T)+
max(Flotim.AnnexContData.Techreport.PLOTFORMAT$MAErr,na.rm=T)+
0.03*max(Flotim.AnnexContData.Techreport.PLOTFORMAT$MAMean,na.rm=T))) +
coord_flip() + Statusplot.labs["MA"] + plot.guides.techreport + plot.theme
Flotim.ma.annexplot
# ---- 5.3 Place attachment -----
Flotim.pa.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=PAMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=PAMean-PAErr,
ymax=PAMean+PAErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"PA"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
coord_flip() + Statusplot.labs["PA"] + plot.guides.techreport + plot.theme
Flotim.pa.annexplot
# ---- 5.4 Marine tenure -----
Flotim.mt.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=MTMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=MTMean-MTErr,
ymax=MTMean+MTErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"MT"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,5)) +
coord_flip() + Statusplot.labs["MT"] + plot.guides.techreport + plot.theme
Flotim.mt.annexplot
# ---- 5.5 School enrollment -----
Flotim.se.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=SEMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=SEMean-SEErr,
ymax=SEMean+SEErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"SE"]) +
scale_y_continuous(expand=c(0,0),
labels=scales::percent_format()) +
coord_flip() + Statusplot.labs["SE"] + plot.guides.techreport + plot.theme
Flotim.se.annexplot
# ---- 5.6 Time to market -----
Flotim.time.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=TimeMarketMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=TimeMarketMean-TimeMarketErr,
ymax=TimeMarketMean+TimeMarketErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"TimeMarket"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.AnnexContData.Techreport.PLOTFORMAT$TimeMarketMean,na.rm=T)+
max(Flotim.AnnexContData.Techreport.PLOTFORMAT$TimeMarketErr,na.rm=T)+
0.03*max(Flotim.AnnexContData.Techreport.PLOTFORMAT$TimeMarketMean,na.rm=T))) +
coord_flip() + Statusplot.labs["Time"] + plot.guides.techreport + plot.theme
Flotim.time.annexplot
# ---- 5.7 Days unwell -----
Flotim.unwell.annexplot <-
ggplot(data=Flotim.AnnexContData.Techreport.PLOTFORMAT,
aes(x=SettlementName,
y=UnwellMean)) +
geom_bar(aes(alpha=MonitoringYear),
stat="identity",
position="dodge",
fill=fillcols.trend,
width=0.75,
size=0.15,
colour="#505050") +
geom_errorbar(aes(ymin=UnwellMean-UnwellErr,
ymax=UnwellMean+UnwellErr,
colour=SettLevel,
alpha=MonitoringYear),
width=0.25,
size=0.5,
position=position_dodge(width=0.75),
show.legend=F) +
geom_vline(aes(xintercept=3),
linetype=2,
size=0.35,
colour="#505050") +
scale_alpha_manual(name="",
values=c(0.3,0.6,1),
labels=Flotim.annexplot.monitoryear.labs,
na.translate=FALSE) +
scale_fill_manual(values=fillcols.status) +
scale_colour_manual(values=errcols.status) +
scale_x_discrete(labels=Flotim.annexplot.settnames[,"Unwell"]) +
scale_y_continuous(expand=c(0,0),
limits=c(0,max(Flotim.AnnexContData.Techreport.PLOTFORMAT$UnwellMean,na.rm=T)+
max(Flotim.AnnexContData.Techreport.PLOTFORMAT$UnwellErr,na.rm=T)+
0.03*max(Flotim.AnnexContData.Techreport.PLOTFORMAT$UnwellMean,na.rm=T))) +
coord_flip() + Statusplot.labs["Unwell"] + plot.guides.techreport + plot.theme
Flotim.unwell.annexplot
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ---- SECTION 6: WRITE TO .PNG ----
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
dir.create(paste("C:/Users/HP/Dropbox/Products/",
format(Sys.Date(),format="%Y_%m_%d"),sep="_"))
FigureFileName <- paste("C:/Users/HP/Dropbox/Products/",
format(Sys.Date(),format="%Y_%m_%d"),sep="_")
png(paste(FigureFileName,"FS.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.fs.trendplot)
dev.off()
png(paste(FigureFileName,"FS.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.fs.annexplot)
dev.off()
png(paste(FigureFileName,"FS.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.fs.statusplot)
dev.off()
# ---- 6.2 Material assets ----
png(paste(FigureFileName,"MA.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.ma.statusplot)
dev.off()
png(paste(FigureFileName,"MA.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.ma.trendplot)
dev.off()
png(paste(FigureFileName,"MA.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.ma.annexplot)
dev.off()
# ---- 6.3 Place attachment ----
png(paste(FigureFileName,"PA.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.pa.statusplot)
dev.off()
png(paste(FigureFileName,"PA.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.pa.trendplot)
dev.off()
png(paste(FigureFileName,"PA.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.pa.annexplot)
dev.off()
# ---- 6.4 Marine tenure ----
png(paste(FigureFileName,"MT.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.mt.statusplot)
dev.off()
png(paste(FigureFileName,"MT.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.mt.trendplot)
dev.off()
png(paste(FigureFileName,"MT.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.mt.annexplot)
dev.off()
# ---- 6.5 School enrollment ----
png(paste(FigureFileName,"SE.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.se.statusplot)
dev.off()
png(paste(FigureFileName,"SE.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.se.trendplot)
dev.off()
png(paste(FigureFileName,"SE.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.se.annexplot)
dev.off()
# ---- 6.6 Time to market ----
png(paste(FigureFileName,"TimeMarket.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.time.statusplot)
dev.off()
png(paste(FigureFileName,"TimeMarket.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.time.trendplot)
dev.off()
png(paste(FigureFileName,"TimeMarket.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.time.annexplot)
dev.off()
# ---- 6.7 Days unwell ----
png(paste(FigureFileName,"DaysUnwell.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.unwell.statusplot)
dev.off()
png(paste(FigureFileName,"DaysUnwell.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.unwell.trendplot)
dev.off()
png(paste(FigureFileName,"DaysUnwell.annex.png",sep="/"),
units="in",height=7.5,width=7.5,res=400)
plot(Flotim.unwell.annexplot)
dev.off()
# ---- 6.8 Gender of head of household ----
png(paste(FigureFileName,"Gender.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.gender.statusplot)
dev.off()
png(paste(FigureFileName,"Gender.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.gender.trendplot)
dev.off()
# ---- 6.9 Religion ----
png(paste(FigureFileName,"Religion.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.religion.statusplot)
dev.off()
png(paste(FigureFileName,"Religion.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.religion.trendplot)
dev.off()
# ---- 6.10 Primary occupation ----
png(paste(FigureFileName,"PrimaryOcc.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.primaryocc.statusplot)
dev.off()
png(paste(FigureFileName,"PrimaryOcc.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.primaryocc.trendplot)
dev.off()
# ---- 6.15 Secondary occupation ----
png(paste(FigureFileName,"SecondaryOcc.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.Secondaryocc.statusplot)
dev.off()
png(paste(FigureFileName,"SecondaryOcc.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.Secondaryocc.trendplot)
dev.off()
# ---- 6.11 Fishing frequency ----
png(paste(FigureFileName,"FreqFish.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.freqfish.statusplot)
dev.off()
png(paste(FigureFileName,"FreqFish.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.freqfish.trendplot)
dev.off()
# ---- 6.12 Fish sale frequency ----
png(paste(FigureFileName,"FreqSellFish.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.freqsellfish.statusplot)
dev.off()
png(paste(FigureFileName,"FreqSellFish.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.freqsellfish.trendplot)
dev.off()
# ---- 6.13 Income from fishing ----
png(paste(FigureFileName,"IncFish.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.incfish.statusplot)
dev.off()
png(paste(FigureFileName,"IncFish.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.incfish.trendplot)
dev.off()
# ---- 6.14 Fishing technique ----
png(paste(FigureFileName,"FishTech.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.fishtech.statusplot)
dev.off()
png(paste(FigureFileName,"FishTech.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.fishtech.trendplot)
dev.off()
# ---- 6.15 Childhood food security ----
png(paste(FigureFileName,"ChildFS.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.childfs.statusplot)
dev.off()
png(paste(FigureFileName,"ChildFS.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.childfs.trendplot)
dev.off()
# ---- 6.16 Protein from fish ----
png(paste(FigureFileName,"FishProtein.status.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.proteinfish.statusplot)
dev.off()
png(paste(FigureFileName,"FishProtein.trend.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.proteinfish.trendplot)
dev.off()
# ---- 6.17 Age/Gender ----
library(grid)
png(paste(FigureFileName,"Age.gender.png",sep="/"),
units="in",height=10,width=4,res=400)
grid.newpage()
grid.draw(Flotim.age.gender.plot)
dev.off()
# ---- 6.18 Number ethnic groups ----
png(paste(FigureFileName,"Num.Ethnic.png",sep="/"),
units="in",height=4,width=6,res=400)
plot(Flotim.ethnic.statusplot)
dev.off()
|
# Panel Data
library(tidyverse)
# repeated observation for the same units
# - Robust for certain types of omitted variables bias
# - Learn about dynamics
## Topics random sampling (in the cross-section)
## balanced panel (Ti = T)
## large N, small T
# Example: fatality rate and beertax --------------------------------------
df_beer_accid <- haven::read_dta("data/beertax.dta")
# Fatalities per 1000 population
df_beer_accid %>%
filter( state < 10) %>%
ggplot( aes( y = fatalityrate, x = (year) ) ) +
geom_line( ) +
facet_wrap(~state, scales = "free_y")
# Will increase beer tax reduce fatality rate from car accidents?
df_beer_accid %>% lm( fatalityrate ~beertax, data = .) %>% summary()
df_beer_accid %>% filter(year == 1982) %>% lm( fatalityrate ~beertax, data = .) %>% summary()
df_beer_accid %>% filter(year == 1988) %>% lm( fatalityrate ~beertax, data = .) %>% summary()
# Beertax seems to be correlated with fatalities. Seems to be wrong.
# Contrary of what we expected. Beertax should decrease deaths
# This may be explained by OVB
# Use of Multivar OLS -> can not account for unobservables
# First difference
# First diff: Difference between year 1988 and 1982
# The difference for each state, between 1988 and 1982
# Change in the taxrate (+), decrease the fatality-rate. As expeceded.
df_beer_accid_diff <-
df_beer_accid %>%
filter( year %in% c(1982, 1988) ) %>%
select( state, year, fatalityrate, beertax ) %>%
group_by(state) %>%
mutate( d.fatalityrate = fatalityrate- lag(fatalityrate),
d.beertax = beertax - lag(beertax)
) %>%
filter( year == 1988)
# Increase beertax with 1 decrease rate (per 10000) by 1.04 unit
fatal_diff_mod <- df_beer_accid_diff %>% lm( d.fatalityrate ~d.beertax, data = .)
summary(fatal_diff_mod)
# This is a rater big impact: mean is 2 per 10000
mean(df_beer_accid$fatalityrate)
# Coefftest: d.beertax robust st. significant-
lmtest::coeftest(fatal_diff_mod, vcov = sandwich::vcovHC, type = "HC1")
## The within model:
# Between and within variation --------------------------------------------
# Data per state
df_beer_accid %>%
select(state, year, fatalityrate, beertax)
# Higher tax-rate, higher beertax: positive correlation?
df_beer_accid %>%
select(state, year, frate = fatalityrate, beertax) %>%
filter( year %in% c(1982, 1988)) %>%
ggplot( aes(y = frate, x = beertax ) ) +
geom_point( ) +
geom_smooth( method = "lm" , se = F, color = "blue") +
facet_wrap( ~year, scales ="free_y")
df <-
df_beer_accid %>%
select(state, year, frate = fatalityrate, beertax) %>%
group_by( state) %>%
mutate( mean_beertax = mean(beertax)) %>%
ungroup() %>%
mutate( cat = case_when( mean_beertax > 0.75 ~ "Large",
between(mean_beertax, 0.5, 0.75) ~ "M",
T~ "small"
) )
df %>%
ggplot( aes( x = beertax, y = frate )) + geom_point() + geom_smooth( method = "lm", se = F) +
geom_smooth( data = df %>% filter( cat == "Large"), aes(y = frate, x = beertax), method = "lm", se = F, inherit.aes = F ) +
geom_smooth( data = df %>% filter( cat == "small"), aes(y = frate, x = beertax), method = "lm", se = F, inherit.aes = F , color = "red")
# Fixed effects model ----------------------------------------------------
# 1) First difference
# The cost from 1.st diff
df_sim <- tibble( id = rep(seq( from = 1, to = 1000), each =2) , t = rep( x = c(1,2), times = 1000) ) %>%
mutate(
x = runif(n = 2000),
x = ifelse( t == 1, 0.8*t + rnorm(n =2000, mean = 0, sd = 1), x),
y = 2*x + rnorm(n = 2000, 0 ,1)
) %>%
group_by( id) %>%
mutate( d.y = y - lag(y),
d.x = x - lag(x)
)
# Cost in form of decreased t
df_sim %>% lm( y ~ x, data = .) %>% summary()
df_sim %>% na.omit() %>% lm( d.y ~ d.x , data = .) %>% summary()
## example 2: Accidents and Beers
# OLS: Positive relation between beertax and fatality-rate. We expect the relation to be neg.
df_beer_accid %>%
filter( year %in% c(1982, 1988) ) %>%
lm( fatalityrate ~ beertax, data = .) %>%
summary()
# First diff: Difference between year 1988 and 1982
# The difference for each state, between 1988 and 1982
# Change in the taxrate (+), decrease the fatality-rate. As expeceded.
df_beer_accid_diff <-
df_beer_accid %>%
filter( year %in% c(1982, 1988) ) %>%
select( state, year, fatalityrate, beertax ) %>%
group_by(state) %>%
mutate( d.fatalityrate = fatalityrate- lag(fatalityrate),
d.beertax = beertax - lag(beertax)
) %>%
filter( year == 1988)
df_beer_accid_diff %>% lm( d.fatalityrate ~ 0 + d.beertax , data = . ) %>% summary( )
# The result -0.87 per 10000 people.
# Graphical presentation
df_beer_accid_diff %>% ggplot( aes(y = d.fatalityrate, x = d.beertax) ) + geom_point() +
geom_smooth( method = "lm", se = F)
# The between Estimator ---------------------------------------------------
# Between estimator: If RE-assumption holds: E[a|x] = 0 => (cov(a,x) = 0):
model_between01 <- df_beer_accid %>%
select( state, year, fatalityrate, beertax) %>%
group_by( state) %>%
mutate( m.fatalityrate = mean( fatalityrate),
m.beertax = mean(beertax)
) %>%
filter( year == 1988) %>%
lm( m.fatalityrate ~ m.beertax, data = .)
# Overall R-sq
model_between01 %>% summary()
model_between02 <- plm::plm( fatalityrate ~beertax, data = df_beer_accid, model = "between" )
model_between02 %>% summary()
ercomp( model_between02)
tibble( predicted = fitted(model_between01), df_beer_accid %>% group_by(state) %>% summarise( m_fatal = mean(fatalityrate))) %>%
mutate( a = (predicted - m_fatal)^2/48 ) %>% summarise( a = sum(a)^2)
# Between variation R2: in Within
summary(model_between01)$r.squared %>% format( digits = 4)
# Within-state variance
plm::plm( fatalityrate ~beertax, data = df_beer_accid, model = "within" ) %>% summary()
# The fixed effects model -------------------------------------------------
#
fe_model <- df_beer_accid %>%
select( state, year, fatalityrate, beertax) %>%
group_by(state) %>%
mutate( m.fatalityrate = mean(fatalityrate),
m.beertax = mean(beertax)
) %>%
ungroup( ) %>%
mutate( yhat = fatalityrate - m.fatalityrate,
xhat = beertax - m.beertax
) %>%
lm( yhat ~0 +xhat, data = .)
summary(fe_model)
# Get correct t-value from the plm-function.
lmtest::coeftest( fe_model, vcov = sandwich::vcovHC, type = "HC1")
lmtest::coeftest( plm::plm(fatalityrate ~beertax, model = "within",data = df_beer_accid), vcov = sandwich::vcovHC, type = "HC1")
tibble(a = residuals(fe_model), x = df_beer_accid$beertax ) %>%
summarise( cor = cor(a,x) )
# Least Square Dummy variables : LSDV
df_beer_accid %>%
lm( fatalityrate ~ beertax + factor(state), data = .) %>%
summary() %>%
broom::tidy() %>%
filter( ! str_detect(term, "fact") )
model_within <- plm::plm( data = df_beer_accid, formula = fatalityrate ~ beertax ,model = "within" )
summary(model_within )
#
model_fe <- df_beer_accid %>% lm( fatalityrate ~ beertax + factor(state), data = .)
summary(model_fe)
#
library(plm)
model_random <- plm( dat = df_beer_accid, formula = fatalityrate ~ beertax, model = "random")
model_random %>% summary( ) #
# Hausman test ------------------------------------------------------------
plm::phtest( model_within_plm, model_random )
# Cluster robust standard errors ------------------------------------------
rob_se <- list( sqrt(diag(sandwich::vcovHC( model_within, type = "HC2"))),
sqrt(diag(sandwich::vcovHC( model_random, type = "HC2")))
)
stargazer::stargazer( model_fe,
model_random,
digits = 3,
se = rob_se,
type = "text",
keep = c("beertax")
)
m_ols <- df_beer_accid %>%
lm( fatalityrate ~ beertax, data = .)
N <- unique(df_beer_accid$state) %>% length()
time <- unique(df_beer_accid$year) %>% length()
tibble( yhat = predict(m_ols), y = df_beer_accid$fatalityrate ) %>%
mutate( e_2 = (y-yhat)^2 ) %>%
summarise( sigma_a_simgan_e = (sum(e_2)/(N*time-2))^.5 )
0.03605+0.26604
model_re <- df_beer_accid %>% plm::plm( data = ., formula = fatalityrate ~ beertax , model = "random")
summary(model_re)
| /PanelData/2021-04-14 panel_data.R | no_license | eal024/econ | R | false | false | 8,460 | r |
# Panel Data
library(tidyverse)
# repeated observation for the same units
# - Robust for certain types of omitted variables bias
# - Learn about dynamics
## Topics random sampling (in the cross-section)
## balanced panel (Ti = T)
## large N, small T
# Example: fatality rate and beertax --------------------------------------
df_beer_accid <- haven::read_dta("data/beertax.dta")
# Fatalities per 1000 population
df_beer_accid %>%
filter( state < 10) %>%
ggplot( aes( y = fatalityrate, x = (year) ) ) +
geom_line( ) +
facet_wrap(~state, scales = "free_y")
# Will increase beer tax reduce fatality rate from car accidents?
df_beer_accid %>% lm( fatalityrate ~beertax, data = .) %>% summary()
df_beer_accid %>% filter(year == 1982) %>% lm( fatalityrate ~beertax, data = .) %>% summary()
df_beer_accid %>% filter(year == 1988) %>% lm( fatalityrate ~beertax, data = .) %>% summary()
# Beertax seems to be correlated with fatalities. Seems to be wrong.
# Contrary of what we expected. Beertax should decrease deaths
# This may be explained by OVB
# Use of Multivar OLS -> can not account for unobservables
# First difference
# First diff: Difference between year 1988 and 1982
# The difference for each state, between 1988 and 1982
# Change in the taxrate (+), decrease the fatality-rate. As expeceded.
df_beer_accid_diff <-
df_beer_accid %>%
filter( year %in% c(1982, 1988) ) %>%
select( state, year, fatalityrate, beertax ) %>%
group_by(state) %>%
mutate( d.fatalityrate = fatalityrate- lag(fatalityrate),
d.beertax = beertax - lag(beertax)
) %>%
filter( year == 1988)
# Increase beertax with 1 decrease rate (per 10000) by 1.04 unit
fatal_diff_mod <- df_beer_accid_diff %>% lm( d.fatalityrate ~d.beertax, data = .)
summary(fatal_diff_mod)
# This is a rater big impact: mean is 2 per 10000
mean(df_beer_accid$fatalityrate)
# Coefftest: d.beertax robust st. significant-
lmtest::coeftest(fatal_diff_mod, vcov = sandwich::vcovHC, type = "HC1")
## The within model:
# Between and within variation --------------------------------------------
# Data per state
df_beer_accid %>%
select(state, year, fatalityrate, beertax)
# Higher tax-rate, higher beertax: positive correlation?
df_beer_accid %>%
select(state, year, frate = fatalityrate, beertax) %>%
filter( year %in% c(1982, 1988)) %>%
ggplot( aes(y = frate, x = beertax ) ) +
geom_point( ) +
geom_smooth( method = "lm" , se = F, color = "blue") +
facet_wrap( ~year, scales ="free_y")
df <-
df_beer_accid %>%
select(state, year, frate = fatalityrate, beertax) %>%
group_by( state) %>%
mutate( mean_beertax = mean(beertax)) %>%
ungroup() %>%
mutate( cat = case_when( mean_beertax > 0.75 ~ "Large",
between(mean_beertax, 0.5, 0.75) ~ "M",
T~ "small"
) )
df %>%
ggplot( aes( x = beertax, y = frate )) + geom_point() + geom_smooth( method = "lm", se = F) +
geom_smooth( data = df %>% filter( cat == "Large"), aes(y = frate, x = beertax), method = "lm", se = F, inherit.aes = F ) +
geom_smooth( data = df %>% filter( cat == "small"), aes(y = frate, x = beertax), method = "lm", se = F, inherit.aes = F , color = "red")
# Fixed effects model ----------------------------------------------------
# 1) First difference
# The cost from 1.st diff
df_sim <- tibble( id = rep(seq( from = 1, to = 1000), each =2) , t = rep( x = c(1,2), times = 1000) ) %>%
mutate(
x = runif(n = 2000),
x = ifelse( t == 1, 0.8*t + rnorm(n =2000, mean = 0, sd = 1), x),
y = 2*x + rnorm(n = 2000, 0 ,1)
) %>%
group_by( id) %>%
mutate( d.y = y - lag(y),
d.x = x - lag(x)
)
# Cost in form of decreased t
df_sim %>% lm( y ~ x, data = .) %>% summary()
df_sim %>% na.omit() %>% lm( d.y ~ d.x , data = .) %>% summary()
## example 2: Accidents and Beers
# OLS: Positive relation between beertax and fatality-rate. We expect the relation to be neg.
df_beer_accid %>%
filter( year %in% c(1982, 1988) ) %>%
lm( fatalityrate ~ beertax, data = .) %>%
summary()
# First diff: Difference between year 1988 and 1982
# The difference for each state, between 1988 and 1982
# Change in the taxrate (+), decrease the fatality-rate. As expeceded.
df_beer_accid_diff <-
df_beer_accid %>%
filter( year %in% c(1982, 1988) ) %>%
select( state, year, fatalityrate, beertax ) %>%
group_by(state) %>%
mutate( d.fatalityrate = fatalityrate- lag(fatalityrate),
d.beertax = beertax - lag(beertax)
) %>%
filter( year == 1988)
df_beer_accid_diff %>% lm( d.fatalityrate ~ 0 + d.beertax , data = . ) %>% summary( )
# The result -0.87 per 10000 people.
# Graphical presentation
df_beer_accid_diff %>% ggplot( aes(y = d.fatalityrate, x = d.beertax) ) + geom_point() +
geom_smooth( method = "lm", se = F)
# The between Estimator ---------------------------------------------------
# Between estimator: If RE-assumption holds: E[a|x] = 0 => (cov(a,x) = 0):
model_between01 <- df_beer_accid %>%
select( state, year, fatalityrate, beertax) %>%
group_by( state) %>%
mutate( m.fatalityrate = mean( fatalityrate),
m.beertax = mean(beertax)
) %>%
filter( year == 1988) %>%
lm( m.fatalityrate ~ m.beertax, data = .)
# Overall R-sq
model_between01 %>% summary()
model_between02 <- plm::plm( fatalityrate ~beertax, data = df_beer_accid, model = "between" )
model_between02 %>% summary()
ercomp( model_between02)
tibble( predicted = fitted(model_between01), df_beer_accid %>% group_by(state) %>% summarise( m_fatal = mean(fatalityrate))) %>%
mutate( a = (predicted - m_fatal)^2/48 ) %>% summarise( a = sum(a)^2)
# Between variation R2: in Within
summary(model_between01)$r.squared %>% format( digits = 4)
# Within-state variance
plm::plm( fatalityrate ~beertax, data = df_beer_accid, model = "within" ) %>% summary()
# The fixed effects model -------------------------------------------------
#
fe_model <- df_beer_accid %>%
select( state, year, fatalityrate, beertax) %>%
group_by(state) %>%
mutate( m.fatalityrate = mean(fatalityrate),
m.beertax = mean(beertax)
) %>%
ungroup( ) %>%
mutate( yhat = fatalityrate - m.fatalityrate,
xhat = beertax - m.beertax
) %>%
lm( yhat ~0 +xhat, data = .)
summary(fe_model)
# Get correct t-value from the plm-function.
lmtest::coeftest( fe_model, vcov = sandwich::vcovHC, type = "HC1")
lmtest::coeftest( plm::plm(fatalityrate ~beertax, model = "within",data = df_beer_accid), vcov = sandwich::vcovHC, type = "HC1")
tibble(a = residuals(fe_model), x = df_beer_accid$beertax ) %>%
summarise( cor = cor(a,x) )
# Least Square Dummy variables : LSDV
df_beer_accid %>%
lm( fatalityrate ~ beertax + factor(state), data = .) %>%
summary() %>%
broom::tidy() %>%
filter( ! str_detect(term, "fact") )
model_within <- plm::plm( data = df_beer_accid, formula = fatalityrate ~ beertax ,model = "within" )
summary(model_within )
#
model_fe <- df_beer_accid %>% lm( fatalityrate ~ beertax + factor(state), data = .)
summary(model_fe)
#
library(plm)
model_random <- plm( dat = df_beer_accid, formula = fatalityrate ~ beertax, model = "random")
model_random %>% summary( ) #
# Hausman test ------------------------------------------------------------
plm::phtest( model_within_plm, model_random )
# Cluster robust standard errors ------------------------------------------
rob_se <- list( sqrt(diag(sandwich::vcovHC( model_within, type = "HC2"))),
sqrt(diag(sandwich::vcovHC( model_random, type = "HC2")))
)
stargazer::stargazer( model_fe,
model_random,
digits = 3,
se = rob_se,
type = "text",
keep = c("beertax")
)
m_ols <- df_beer_accid %>%
lm( fatalityrate ~ beertax, data = .)
N <- unique(df_beer_accid$state) %>% length()
time <- unique(df_beer_accid$year) %>% length()
tibble( yhat = predict(m_ols), y = df_beer_accid$fatalityrate ) %>%
mutate( e_2 = (y-yhat)^2 ) %>%
summarise( sigma_a_simgan_e = (sum(e_2)/(N*time-2))^.5 )
0.03605+0.26604
model_re <- df_beer_accid %>% plm::plm( data = ., formula = fatalityrate ~ beertax , model = "random")
summary(model_re)
|
## iplotMScanone
## Karl W Broman
#' Interactive LOD curve
#'
#' Creates an interactive graph of a set of single-QTL genome scans, as
#' calculated by \code{\link[qtl]{scanone}}. If \code{cross} or
#' \code{effects} are provide, LOD curves will be linked to a panel
#' with estimated QTL effects.
#'
#' @param scanoneOutput Object of class \code{"scanone"}, as output
#' from \code{\link[qtl]{scanone}}.
#' @param cross (Optional) Object of class \code{"cross"}, see
#' \code{\link[qtl]{read.cross}}.
#' @param lodcolumn Numeric value indicating LOD score column to plot.
#' @param pheno.col (Optional) Phenotype column in cross object.
#' @param effects (Optional)
#' @param chr (Optional) Optional vector indicating the chromosomes
#' for which LOD scores should be calculated. This should be a vector
#' of character strings referring to chromosomes by name; numeric
#' values are converted to strings. Refer to chromosomes with a
#' preceding - to have all chromosomes but those considered. A logical
#' (TRUE/FALSE) vector may also be used.
#' @param file Optional character vector with file to contain the
#' output
#' @param onefile If TRUE, have output file contain all necessary
#' javascript/css code
#' @param openfile If TRUE, open the plot in the default web browser
#' @param title Character string with title for plot
#' @param caption Character vector with text for a caption (to be
#' combined to one string with \code{\link[base]{paste}}, with
#' \code{collapse=''})
#' @param chartOpts A list of options for configuring the chart (see
#' the coffeescript code). Each element must be named using the
#' corresponding option.
#' @param ... Additional arguments passed to the
#' \code{\link[RJSONIO]{toJSON}} function
#'
#' @return Character string with the name of the file created.
#'
#' @details If \code{cross} is provided, Haley-Knott regression is
#' used to estimate QTL effects at each pseudomarker.
#'
#' @importFrom utils browseURL
#'
#' @keywords hplot
#' @seealso \code{\link{iplotScanone}}
#'
#' @examples
#' data(grav)
#' grav <- calc.genoprob(grav, step=1)
#' grav <- reduce2grid(grav)
#' out <- scanone(grav, phe=seq(1, nphe(grav), by=5), method="hk")
#' iplotMScanone(out, title="iplotMScanone example, no effects")
#'
#' eff <- estQTLeffects(grav, phe=seq(1, nphe(grav), by=5), what="effects")
#' iplotMScanone(out, effects=eff, title="iplotMScanone example, with effects",
#' chartOpts=list(eff_ylab="QTL effect"))
#'
#' @export
iplotMScanone <-
function(scanoneOutput, cross, lodcolumn, pheno.col,
effects, chr,
file, onefile=FALSE, openfile=TRUE, title="", caption,
chartOpts=NULL, ...)
{
if(missing(file) || is.null(file))
file <- tempfile(tmpdir=tempdir(), fileext='.html')
else file <- path.expand(file)
if(file.exists(file))
stop('The file already exists; please remove it first: ', file)
if(!any(class(scanoneOutput) == "scanone"))
stop('"scanoneOutput" should have class "scanone".')
if(!missing(chr) && !is.null(chr)) {
rn <- rownames(scanoneOutput)
scanoneOutput <- subset(scanoneOutput, chr=chr)
if(!missing(effects) && !is.null(effects)) effects <- effects[match(rownames(scanoneOutput), rn)]
if(!missing(cross) && !is.null(cross)) cross <- subset(cross, chr=chr)
}
if(missing(caption) || is.null(caption)) caption <- NULL
if(missing(lodcolumn) || is.null(lodcolumn)) lodcolumn <- 1:(ncol(scanoneOutput)-2)
stopifnot(all(lodcolumn >= 1 & lodcolumn <= ncol(scanoneOutput)-2))
scanoneOutput <- scanoneOutput[,c(1,2,lodcolumn+2),drop=FALSE]
if(missing(pheno.col) || is.null(pheno.col)) pheno.col <- seq(along=lodcolumn)
if((missing(cross) || is.null(cross)) && (missing(effects) || is.null(effects)))
return(iplotMScanone_noeff(scanoneOutput,
file=file, onefile=onefile, openfile=openfile, title=title,
caption=caption, chartOpts=chartOpts, ...))
if(missing(effects) || is.null(effects)) {
stopifnot(length(pheno.col) == length(lodcolumn))
stopifnot(class(cross)[2] == "cross")
crosstype <- class(cross)[1]
handled_crosses <- c("bc", "bcsft", "dh", "riself", "risib", "f2", "haploid") # handled for add/dom effects
what <- ifelse(crosstype %in% handled_crosses, "effects", "means")
effects <- estQTLeffects(cross, pheno.col, what=what)
}
stopifnot(length(effects) == nrow(scanoneOutput))
stopifnot(all(vapply(effects, nrow, 1) == ncol(scanoneOutput)-2))
scanoneOutput <- calcSignedLOD(scanoneOutput, effects)
iplotMScanone_eff(scanoneOutput, effects,
file=file, onefile=onefile, openfile=openfile, title=title,
caption=caption, chartOpts=chartOpts, ...)
}
# iplotMScanone_noeff: multiple LOD curves; no QTL effects
iplotMScanone_noeff <-
function(scanoneOutput,
file, onefile=FALSE, openfile=TRUE,
title="", caption, chartOpts=NULL, ...)
{
scanone_json <- scanone2json(scanoneOutput, ...)
write_html_top(file, title=title)
link_d3(file, onefile=onefile)
link_d3tip(file, onefile=onefile)
link_panelutil(file, onefile=onefile)
link_panel('lodheatmap', file, onefile=onefile)
link_panel('lodchart', file, onefile=onefile)
link_panel('curvechart', file, onefile=onefile)
link_chart('iplotMScanone_noeff', file, onefile=onefile)
append_html_middle(file, title, 'chart')
if(missing(caption) || is.null(caption))
caption <- c('Hover over rows in the LOD image at top to see the individual curves below and, ',
'to the right, a plot of LOD score for each column at that genomic position.')
append_caption(caption, file)
append_html_jscode(file, 'scanoneData = ', scanone_json, ';')
append_html_chartopts(file, chartOpts)
append_html_jscode(file, 'iplotMScanone_noeff(scanoneData, chartOpts);')
append_html_bottom(file)
if(openfile) browseURL(file)
invisible(file)
}
# iplotMScanone_eff: multiple LOD curves + QTL effects
iplotMScanone_eff <-
function(scanoneOutput, effects,
file, onefile=FALSE, openfile=TRUE,
title="", caption, chartOpts=NULL, ...)
{
scanone_json <- scanone2json(scanoneOutput, ...)
effects_json <- effects2json(effects, ...)
write_html_top(file, title=title)
link_d3(file, onefile=onefile)
link_d3tip(file, onefile=onefile)
link_colorbrewer(file, onefile=onefile)
link_panelutil(file, onefile=onefile)
link_panel('lodheatmap', file, onefile=onefile)
link_panel('lodchart', file, onefile=onefile)
link_panel('curvechart', file, onefile=onefile)
link_chart('iplotMScanone_eff', file, onefile=onefile)
append_html_middle(file, title, 'chart')
if(missing(caption) || is.null(caption))
caption <- c('Hover over LOD heat map to view individual curves below and ',
'estimated QTL effects to the right.')
append_caption(caption, file)
append_html_jscode(file, 'scanoneData = ', scanone_json, ';')
append_html_jscode(file, 'effectsData = ', effects_json, ';')
append_html_chartopts(file, chartOpts)
append_html_jscode(file, 'iplotMScanone_eff(scanoneData, effectsData, chartOpts);')
append_html_bottom(file)
if(openfile) browseURL(file)
invisible(file)
}
| /R/iplotMScanone.R | permissive | TAwosanya/qtlcharts | R | false | false | 7,264 | r | ## iplotMScanone
## Karl W Broman
#' Interactive LOD curve
#'
#' Creates an interactive graph of a set of single-QTL genome scans, as
#' calculated by \code{\link[qtl]{scanone}}. If \code{cross} or
#' \code{effects} are provide, LOD curves will be linked to a panel
#' with estimated QTL effects.
#'
#' @param scanoneOutput Object of class \code{"scanone"}, as output
#' from \code{\link[qtl]{scanone}}.
#' @param cross (Optional) Object of class \code{"cross"}, see
#' \code{\link[qtl]{read.cross}}.
#' @param lodcolumn Numeric value indicating LOD score column to plot.
#' @param pheno.col (Optional) Phenotype column in cross object.
#' @param effects (Optional)
#' @param chr (Optional) Optional vector indicating the chromosomes
#' for which LOD scores should be calculated. This should be a vector
#' of character strings referring to chromosomes by name; numeric
#' values are converted to strings. Refer to chromosomes with a
#' preceding - to have all chromosomes but those considered. A logical
#' (TRUE/FALSE) vector may also be used.
#' @param file Optional character vector with file to contain the
#' output
#' @param onefile If TRUE, have output file contain all necessary
#' javascript/css code
#' @param openfile If TRUE, open the plot in the default web browser
#' @param title Character string with title for plot
#' @param caption Character vector with text for a caption (to be
#' combined to one string with \code{\link[base]{paste}}, with
#' \code{collapse=''})
#' @param chartOpts A list of options for configuring the chart (see
#' the coffeescript code). Each element must be named using the
#' corresponding option.
#' @param ... Additional arguments passed to the
#' \code{\link[RJSONIO]{toJSON}} function
#'
#' @return Character string with the name of the file created.
#'
#' @details If \code{cross} is provided, Haley-Knott regression is
#' used to estimate QTL effects at each pseudomarker.
#'
#' @importFrom utils browseURL
#'
#' @keywords hplot
#' @seealso \code{\link{iplotScanone}}
#'
#' @examples
#' data(grav)
#' grav <- calc.genoprob(grav, step=1)
#' grav <- reduce2grid(grav)
#' out <- scanone(grav, phe=seq(1, nphe(grav), by=5), method="hk")
#' iplotMScanone(out, title="iplotMScanone example, no effects")
#'
#' eff <- estQTLeffects(grav, phe=seq(1, nphe(grav), by=5), what="effects")
#' iplotMScanone(out, effects=eff, title="iplotMScanone example, with effects",
#' chartOpts=list(eff_ylab="QTL effect"))
#'
#' @export
iplotMScanone <-
function(scanoneOutput, cross, lodcolumn, pheno.col,
effects, chr,
file, onefile=FALSE, openfile=TRUE, title="", caption,
chartOpts=NULL, ...)
{
if(missing(file) || is.null(file))
file <- tempfile(tmpdir=tempdir(), fileext='.html')
else file <- path.expand(file)
if(file.exists(file))
stop('The file already exists; please remove it first: ', file)
if(!any(class(scanoneOutput) == "scanone"))
stop('"scanoneOutput" should have class "scanone".')
if(!missing(chr) && !is.null(chr)) {
rn <- rownames(scanoneOutput)
scanoneOutput <- subset(scanoneOutput, chr=chr)
if(!missing(effects) && !is.null(effects)) effects <- effects[match(rownames(scanoneOutput), rn)]
if(!missing(cross) && !is.null(cross)) cross <- subset(cross, chr=chr)
}
if(missing(caption) || is.null(caption)) caption <- NULL
if(missing(lodcolumn) || is.null(lodcolumn)) lodcolumn <- 1:(ncol(scanoneOutput)-2)
stopifnot(all(lodcolumn >= 1 & lodcolumn <= ncol(scanoneOutput)-2))
scanoneOutput <- scanoneOutput[,c(1,2,lodcolumn+2),drop=FALSE]
if(missing(pheno.col) || is.null(pheno.col)) pheno.col <- seq(along=lodcolumn)
if((missing(cross) || is.null(cross)) && (missing(effects) || is.null(effects)))
return(iplotMScanone_noeff(scanoneOutput,
file=file, onefile=onefile, openfile=openfile, title=title,
caption=caption, chartOpts=chartOpts, ...))
if(missing(effects) || is.null(effects)) {
stopifnot(length(pheno.col) == length(lodcolumn))
stopifnot(class(cross)[2] == "cross")
crosstype <- class(cross)[1]
handled_crosses <- c("bc", "bcsft", "dh", "riself", "risib", "f2", "haploid") # handled for add/dom effects
what <- ifelse(crosstype %in% handled_crosses, "effects", "means")
effects <- estQTLeffects(cross, pheno.col, what=what)
}
stopifnot(length(effects) == nrow(scanoneOutput))
stopifnot(all(vapply(effects, nrow, 1) == ncol(scanoneOutput)-2))
scanoneOutput <- calcSignedLOD(scanoneOutput, effects)
iplotMScanone_eff(scanoneOutput, effects,
file=file, onefile=onefile, openfile=openfile, title=title,
caption=caption, chartOpts=chartOpts, ...)
}
# iplotMScanone_noeff: multiple LOD curves; no QTL effects
iplotMScanone_noeff <-
function(scanoneOutput,
file, onefile=FALSE, openfile=TRUE,
title="", caption, chartOpts=NULL, ...)
{
scanone_json <- scanone2json(scanoneOutput, ...)
write_html_top(file, title=title)
link_d3(file, onefile=onefile)
link_d3tip(file, onefile=onefile)
link_panelutil(file, onefile=onefile)
link_panel('lodheatmap', file, onefile=onefile)
link_panel('lodchart', file, onefile=onefile)
link_panel('curvechart', file, onefile=onefile)
link_chart('iplotMScanone_noeff', file, onefile=onefile)
append_html_middle(file, title, 'chart')
if(missing(caption) || is.null(caption))
caption <- c('Hover over rows in the LOD image at top to see the individual curves below and, ',
'to the right, a plot of LOD score for each column at that genomic position.')
append_caption(caption, file)
append_html_jscode(file, 'scanoneData = ', scanone_json, ';')
append_html_chartopts(file, chartOpts)
append_html_jscode(file, 'iplotMScanone_noeff(scanoneData, chartOpts);')
append_html_bottom(file)
if(openfile) browseURL(file)
invisible(file)
}
# iplotMScanone_eff: multiple LOD curves + QTL effects
iplotMScanone_eff <-
function(scanoneOutput, effects,
file, onefile=FALSE, openfile=TRUE,
title="", caption, chartOpts=NULL, ...)
{
scanone_json <- scanone2json(scanoneOutput, ...)
effects_json <- effects2json(effects, ...)
write_html_top(file, title=title)
link_d3(file, onefile=onefile)
link_d3tip(file, onefile=onefile)
link_colorbrewer(file, onefile=onefile)
link_panelutil(file, onefile=onefile)
link_panel('lodheatmap', file, onefile=onefile)
link_panel('lodchart', file, onefile=onefile)
link_panel('curvechart', file, onefile=onefile)
link_chart('iplotMScanone_eff', file, onefile=onefile)
append_html_middle(file, title, 'chart')
if(missing(caption) || is.null(caption))
caption <- c('Hover over LOD heat map to view individual curves below and ',
'estimated QTL effects to the right.')
append_caption(caption, file)
append_html_jscode(file, 'scanoneData = ', scanone_json, ';')
append_html_jscode(file, 'effectsData = ', effects_json, ';')
append_html_chartopts(file, chartOpts)
append_html_jscode(file, 'iplotMScanone_eff(scanoneData, effectsData, chartOpts);')
append_html_bottom(file)
if(openfile) browseURL(file)
invisible(file)
}
|
###Loter acc
library(data.table)
library(argparse)
library(dplyr)
"%&%" = function(a,b) paste(a,b,sep="")
parser <- ArgumentParser()
parser$add_argument("--loter", help="Loter results file")
parser$add_argument("--poslist", help="haplotype genome file")
parser$add_argument("--ref.map", help="admixed sample list")
parser$add_argument("--classes", help="classes file made for lotermix input")
parser$add_argument("--nanc", help="number of ancestries estimated")
parser$add_argument("--result", help="results file output by adsim")
parser$add_argument("--out", help="file you would like to output as")
args <- parser$parse_args()
print("processing snp ids")
snps<-fread(args$poslist, header = F)
snps$chm<-22
colnames(snps)<-c("pos","chm")
loterout<-fread(args$loter, header = F) %>% t() %>% as.data.frame()
loterout<-as.data.frame(cbind.data.frame(snps,loterout))
true_ancestry<-fread(args$result, header = T)
# str(true_ancestry)
intersection<-select(true_ancestry,chm,pos) %>% inner_join(snps,by=c("chm","pos"))
true_ancestry_subset<-inner_join(true_ancestry,intersection,by=c("chm","pos"))
loterout<-inner_join(loterout,intersection,by=c("chm","pos"))
dim(true_ancestry_subset)
dim(loterout)
#separating true ancesty into ancestral groups
snp_count_true<-nrow(true_ancestry_subset)
nanc<-as.numeric(args$nanc)
n_haps<-(ncol(true_ancestry_subset) - 2)
nindv<-n_haps/2
true_ancestry_decomposed_haploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*n_haps)
loter_ancestry_decomposed_haploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*n_haps)
decompose_hap_to_ancestries_res<-function(haplotype, nanc){
decomposed_anc<-matrix(,nrow = nrow(haplotype),ncol = nanc)
anc1<-ifelse(haplotype==1,1,0)
anc2<-ifelse(haplotype==2,1,0)
decomposed_anc[,1]<-anc1
decomposed_anc[,2]<-anc2
if (nanc == 3){
anc3<-ifelse(haplotype==3,1,0)
decomposed_anc[,3]<-anc3
return(decomposed_anc)
} else {
return(decomposed_anc)
}
}
decompose_hap_to_ancestries_loter<-function(haplotype, nanc){
decomposed_anc<-matrix(,nrow = nrow(haplotype),ncol = nanc)
anc1<-ifelse(haplotype==0,1,0)
anc2<-ifelse(haplotype==1,1,0)
decomposed_anc[,1]<-anc1
decomposed_anc[,2]<-anc2
if (nanc == 3){
anc3<-ifelse(haplotype==2,1,0)
decomposed_anc[,3]<-anc3
return(decomposed_anc)
} else {
return(decomposed_anc)
}
}
print("separating haplotypes into composite ancestries")
# dim(loterout)
# dim(true_ancestry_subset)
for (i in c(1:n_haps)){
j<-i+2
k<-i*nanc
if(nanc==3){
storage_indices<-c(k-2,k-1,k)
} else {
storage_indices<-c(k-1,k)
}
true_ancestry_decomposed_haploid[,storage_indices]<-decompose_hap_to_ancestries_res(select(true_ancestry_subset, c(j)),nanc)
# print(dim( loter_ancestry_decomposed_haploid))
# print(dim(select(loterout, c(j))))
loter_ancestry_decomposed_haploid[,storage_indices]<-decompose_hap_to_ancestries_loter(select(loterout, c(j)),nanc)
}
true_ancestry_decomposed_diploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*nindv)
loter_ancestry_decomposed_diploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*nindv)
print("converting haploid to diploid")
for (i in c(1:nindv)){
k<-i*nanc*2
j<-i*nanc
if(nanc==3){
hap1_indices<-c(k-5,k-4,k-3)
hap2_indices<-c(k-2,k-1,k)
storage_indices<-c(j-2,j-1,j)
} else {
hap1_indices<-c(k-3,k-2)
hap2_indices<-c(k-1,k)
storage_indices<-c(j-1,j)
}
hap1<-true_ancestry_decomposed_haploid[,hap1_indices]
hap2<-true_ancestry_decomposed_haploid[,hap2_indices]
dip<-(hap1 + hap2)
true_ancestry_decomposed_diploid[,storage_indices]<-dip
loterhap1<-loter_ancestry_decomposed_haploid[,hap1_indices]
loterhap2<-loter_ancestry_decomposed_haploid[,hap2_indices]
loterdip<-(loterhap1 + loterhap2)
loter_ancestry_decomposed_diploid[,storage_indices]<-loterdip
}
#ls hap_corr<-c(rep(NA,n_haps))
dip_corr<-c(rep(NA,nindv))
print("correlating diploid")
for (i in c(1:nindv)){
j<-i*nanc
threshold<-(1/nanc)
if(nanc==3){
storage_indices<-c(j-2,j-1,j)
flip<-c(2,1,3)
} else {
storage_indices<-c(j-1,j)
flip<-c(2,1)
}
loter_indiv_i<-loter_ancestry_decomposed_diploid[,storage_indices]
true_indiv_i<-true_ancestry_decomposed_diploid[,storage_indices]
corr<-cor.test(loter_indiv_i,true_indiv_i)
# str(corr)
# str(corr$estimate)
if ((((nanc == 2) & ((corr$estimate < 0) | is.na(corr$estimate))))){
loter_indiv_i<-loter_indiv_i[,flip]
#str(loter_indiv_i)
corr<-cor.test(loter_indiv_i,true_indiv_i)
}
dip_corr[i]<-corr$estimate
# j<-i*nanc
# if(nanc==3){
# storage_indices<-c(j-2,j-1,j)
# corr1<-cor.test(loter_ancestry_decomposed_diploid[,j-2],true_ancestry_decomposed_diploid[,j-2], method="pearson")
# corr2<-cor.test(loter_ancestry_decomposed_diploid[,j-1],true_ancestry_decomposed_diploid[,j-1], method="pearson")
# corr3<-cor.test(loter_ancestry_decomposed_diploid[,j],true_ancestry_decomposed_diploid[,j], method="pearson")
# # if (corr1$estimate < 0){
# # corr1<-cor.test(loter_ancestry_decomposed_diploid[,j-2],true_ancestry_decomposed_diploid[,j-2], method="pearson")
# # corr2<-cor.test(loter_ancestry_decomposed_diploid[,j-1],true_ancestry_decomposed_diploid[,j-1], method="pearson")
# # }
# dip_corr[storage_indices]<-c(corr1$estimate,corr2$estimate,corr3$estimate)
# } else {
# storage_indices<-c(j-1,j)
# corr1<-cor.test(loter_ancestry_decomposed_diploid[,j],true_ancestry_decomposed_diploid[,j], method="pearson")
# corr2<-cor.test(loter_ancestry_decomposed_diploid[,j-1],true_ancestry_decomposed_diploid[,j-1], method="pearson")
# # if (corr1$estimate < 0){
# # corr1<-cor.test(loter_ancestry_decomposed_diploid[,j-1],true_ancestry_decomposed_diploid[,j-1], method="pearson")
# # corr2<-cor.test(loter_ancestry_decomposed_diploid[,j],true_ancestry_decomposed_diploid[,j], method="pearson")
# # }
# dip_corr[storage_indices]<-c(corr1$estimate,corr2$estimate)
# }
}
dip_corr
# quit()
fwrite(as.list(dip_corr),args$out,sep ="\t")
| /03estimate_accuracy/LOTER_accuracy.R | no_license | WheelerLab/LAI_benchmarking | R | false | false | 6,183 | r | ###Loter acc
library(data.table)
library(argparse)
library(dplyr)
"%&%" = function(a,b) paste(a,b,sep="")
parser <- ArgumentParser()
parser$add_argument("--loter", help="Loter results file")
parser$add_argument("--poslist", help="haplotype genome file")
parser$add_argument("--ref.map", help="admixed sample list")
parser$add_argument("--classes", help="classes file made for lotermix input")
parser$add_argument("--nanc", help="number of ancestries estimated")
parser$add_argument("--result", help="results file output by adsim")
parser$add_argument("--out", help="file you would like to output as")
args <- parser$parse_args()
print("processing snp ids")
snps<-fread(args$poslist, header = F)
snps$chm<-22
colnames(snps)<-c("pos","chm")
loterout<-fread(args$loter, header = F) %>% t() %>% as.data.frame()
loterout<-as.data.frame(cbind.data.frame(snps,loterout))
true_ancestry<-fread(args$result, header = T)
# str(true_ancestry)
intersection<-select(true_ancestry,chm,pos) %>% inner_join(snps,by=c("chm","pos"))
true_ancestry_subset<-inner_join(true_ancestry,intersection,by=c("chm","pos"))
loterout<-inner_join(loterout,intersection,by=c("chm","pos"))
dim(true_ancestry_subset)
dim(loterout)
#separating true ancesty into ancestral groups
snp_count_true<-nrow(true_ancestry_subset)
nanc<-as.numeric(args$nanc)
n_haps<-(ncol(true_ancestry_subset) - 2)
nindv<-n_haps/2
true_ancestry_decomposed_haploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*n_haps)
loter_ancestry_decomposed_haploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*n_haps)
decompose_hap_to_ancestries_res<-function(haplotype, nanc){
decomposed_anc<-matrix(,nrow = nrow(haplotype),ncol = nanc)
anc1<-ifelse(haplotype==1,1,0)
anc2<-ifelse(haplotype==2,1,0)
decomposed_anc[,1]<-anc1
decomposed_anc[,2]<-anc2
if (nanc == 3){
anc3<-ifelse(haplotype==3,1,0)
decomposed_anc[,3]<-anc3
return(decomposed_anc)
} else {
return(decomposed_anc)
}
}
decompose_hap_to_ancestries_loter<-function(haplotype, nanc){
decomposed_anc<-matrix(,nrow = nrow(haplotype),ncol = nanc)
anc1<-ifelse(haplotype==0,1,0)
anc2<-ifelse(haplotype==1,1,0)
decomposed_anc[,1]<-anc1
decomposed_anc[,2]<-anc2
if (nanc == 3){
anc3<-ifelse(haplotype==2,1,0)
decomposed_anc[,3]<-anc3
return(decomposed_anc)
} else {
return(decomposed_anc)
}
}
print("separating haplotypes into composite ancestries")
# dim(loterout)
# dim(true_ancestry_subset)
for (i in c(1:n_haps)){
j<-i+2
k<-i*nanc
if(nanc==3){
storage_indices<-c(k-2,k-1,k)
} else {
storage_indices<-c(k-1,k)
}
true_ancestry_decomposed_haploid[,storage_indices]<-decompose_hap_to_ancestries_res(select(true_ancestry_subset, c(j)),nanc)
# print(dim( loter_ancestry_decomposed_haploid))
# print(dim(select(loterout, c(j))))
loter_ancestry_decomposed_haploid[,storage_indices]<-decompose_hap_to_ancestries_loter(select(loterout, c(j)),nanc)
}
true_ancestry_decomposed_diploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*nindv)
loter_ancestry_decomposed_diploid<-matrix(NA,nrow=snp_count_true,ncol=nanc*nindv)
print("converting haploid to diploid")
for (i in c(1:nindv)){
k<-i*nanc*2
j<-i*nanc
if(nanc==3){
hap1_indices<-c(k-5,k-4,k-3)
hap2_indices<-c(k-2,k-1,k)
storage_indices<-c(j-2,j-1,j)
} else {
hap1_indices<-c(k-3,k-2)
hap2_indices<-c(k-1,k)
storage_indices<-c(j-1,j)
}
hap1<-true_ancestry_decomposed_haploid[,hap1_indices]
hap2<-true_ancestry_decomposed_haploid[,hap2_indices]
dip<-(hap1 + hap2)
true_ancestry_decomposed_diploid[,storage_indices]<-dip
loterhap1<-loter_ancestry_decomposed_haploid[,hap1_indices]
loterhap2<-loter_ancestry_decomposed_haploid[,hap2_indices]
loterdip<-(loterhap1 + loterhap2)
loter_ancestry_decomposed_diploid[,storage_indices]<-loterdip
}
#ls hap_corr<-c(rep(NA,n_haps))
dip_corr<-c(rep(NA,nindv))
print("correlating diploid")
for (i in c(1:nindv)){
j<-i*nanc
threshold<-(1/nanc)
if(nanc==3){
storage_indices<-c(j-2,j-1,j)
flip<-c(2,1,3)
} else {
storage_indices<-c(j-1,j)
flip<-c(2,1)
}
loter_indiv_i<-loter_ancestry_decomposed_diploid[,storage_indices]
true_indiv_i<-true_ancestry_decomposed_diploid[,storage_indices]
corr<-cor.test(loter_indiv_i,true_indiv_i)
# str(corr)
# str(corr$estimate)
if ((((nanc == 2) & ((corr$estimate < 0) | is.na(corr$estimate))))){
loter_indiv_i<-loter_indiv_i[,flip]
#str(loter_indiv_i)
corr<-cor.test(loter_indiv_i,true_indiv_i)
}
dip_corr[i]<-corr$estimate
# j<-i*nanc
# if(nanc==3){
# storage_indices<-c(j-2,j-1,j)
# corr1<-cor.test(loter_ancestry_decomposed_diploid[,j-2],true_ancestry_decomposed_diploid[,j-2], method="pearson")
# corr2<-cor.test(loter_ancestry_decomposed_diploid[,j-1],true_ancestry_decomposed_diploid[,j-1], method="pearson")
# corr3<-cor.test(loter_ancestry_decomposed_diploid[,j],true_ancestry_decomposed_diploid[,j], method="pearson")
# # if (corr1$estimate < 0){
# # corr1<-cor.test(loter_ancestry_decomposed_diploid[,j-2],true_ancestry_decomposed_diploid[,j-2], method="pearson")
# # corr2<-cor.test(loter_ancestry_decomposed_diploid[,j-1],true_ancestry_decomposed_diploid[,j-1], method="pearson")
# # }
# dip_corr[storage_indices]<-c(corr1$estimate,corr2$estimate,corr3$estimate)
# } else {
# storage_indices<-c(j-1,j)
# corr1<-cor.test(loter_ancestry_decomposed_diploid[,j],true_ancestry_decomposed_diploid[,j], method="pearson")
# corr2<-cor.test(loter_ancestry_decomposed_diploid[,j-1],true_ancestry_decomposed_diploid[,j-1], method="pearson")
# # if (corr1$estimate < 0){
# # corr1<-cor.test(loter_ancestry_decomposed_diploid[,j-1],true_ancestry_decomposed_diploid[,j-1], method="pearson")
# # corr2<-cor.test(loter_ancestry_decomposed_diploid[,j],true_ancestry_decomposed_diploid[,j], method="pearson")
# # }
# dip_corr[storage_indices]<-c(corr1$estimate,corr2$estimate)
# }
}
dip_corr
# quit()
fwrite(as.list(dip_corr),args$out,sep ="\t")
|
#
# R in action (2 ed.)
#
# Indexing
# 1. atomic vector without named elements
x <- c(20, 30, 40)
x[3]
x[c(2,3)]
# 2. atomic vector with named elements
x <- c(A=20, B=30, C=40)
x[c(2,3)]
x[c("B", "C")]
# lists
set.seed(1234)
fit <- kmeans(iris[1:4], 3)
fit[c(2,7)]
fit[2] # return "list"
fit[[2]] # return "matrix"
fit$centers
# Notations can be combined to obtain the elements within components
fit[[2]][1,]
# ------------------------------------------------------
# Plotting the centroids from a k-means cluster analysis
# ------------------------------------------------------
library(reshape2)
set.seed(1234)
fit <- kmeans(iris[1:4], 3)
means <- fit$centers
dfm <- melt(means)
names(dfm) <- c("Cluster", "Measurement", "Centimeters")
dfm$Cluster <- factor(dfm$Cluster)
head(dfm)
library(ggplot2)
ggplot(data=dfm,
aes(x=Measurement, y=Centimeters, group=Cluster)) +
geom_point(size=3, aes(shape=Cluster, color=Cluster)) +
geom_line(size=1, aes(color=Cluster)) +
ggtitle("Profiles for Iris Clusters")
| /book/r_in_action/25_advanced_programming/indexing.R | no_license | dataikido/tech | R | false | false | 1,074 | r | #
# R in action (2 ed.)
#
# Indexing
# 1. atomic vector without named elements
x <- c(20, 30, 40)
x[3]
x[c(2,3)]
# 2. atomic vector with named elements
x <- c(A=20, B=30, C=40)
x[c(2,3)]
x[c("B", "C")]
# lists
set.seed(1234)
fit <- kmeans(iris[1:4], 3)
fit[c(2,7)]
fit[2] # return "list"
fit[[2]] # return "matrix"
fit$centers
# Notations can be combined to obtain the elements within components
fit[[2]][1,]
# ------------------------------------------------------
# Plotting the centroids from a k-means cluster analysis
# ------------------------------------------------------
library(reshape2)
set.seed(1234)
fit <- kmeans(iris[1:4], 3)
means <- fit$centers
dfm <- melt(means)
names(dfm) <- c("Cluster", "Measurement", "Centimeters")
dfm$Cluster <- factor(dfm$Cluster)
head(dfm)
library(ggplot2)
ggplot(data=dfm,
aes(x=Measurement, y=Centimeters, group=Cluster)) +
geom_point(size=3, aes(shape=Cluster, color=Cluster)) +
geom_line(size=1, aes(color=Cluster)) +
ggtitle("Profiles for Iris Clusters")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fn_updateParentAphiaIDs.R
\name{updateParentAphiaIDs}
\alias{updateParentAphiaIDs}
\title{Update or Add Parent Aphia IDs}
\usage{
updateParentAphiaIDs(dataset)
}
\arguments{
\item{dataset}{data frame that has the column "AphiaID".}
}
\description{
This function allows you add Parent records to your data frame. If there is no column called "Parent.AphidID" it will generate one and populate it.
}
\examples{
data(marineKingdoms)
#run 'updateParentAphiaIDs' function. It add in the column 'Parent.AphiaID' and populates the column using the 'getParentID' function.
x <- updateParentAphiaIDs(marineKingdoms)
#view the outputs
View(x)
}
\keyword{AphiaID,}
\keyword{WoRMS}
| /marineRecorder/man/updateParentAphiaIDs.Rd | no_license | uk-gov-mirror/jncc.marine-recorder-tools | R | false | true | 752 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fn_updateParentAphiaIDs.R
\name{updateParentAphiaIDs}
\alias{updateParentAphiaIDs}
\title{Update or Add Parent Aphia IDs}
\usage{
updateParentAphiaIDs(dataset)
}
\arguments{
\item{dataset}{data frame that has the column "AphiaID".}
}
\description{
This function allows you add Parent records to your data frame. If there is no column called "Parent.AphidID" it will generate one and populate it.
}
\examples{
data(marineKingdoms)
#run 'updateParentAphiaIDs' function. It add in the column 'Parent.AphiaID' and populates the column using the 'getParentID' function.
x <- updateParentAphiaIDs(marineKingdoms)
#view the outputs
View(x)
}
\keyword{AphiaID,}
\keyword{WoRMS}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_constraint_MigClim.R
\name{add_constraint_MigClim}
\alias{add_constraint_MigClim}
\title{Add constrains to the modelled distribution projection using the MigClim
approach}
\usage{
\S4method{add_constraint_MigClim}{BiodiversityScenario,character,numeric,numeric,character,numeric,numeric,numeric,numeric,numeric,character}(mod,rcThresholdMode,dispSteps,dispKernel,barrierType,lddFreq,lddRange,iniMatAge,propaguleProdProb,replicateNb,dtmp)
}
\arguments{
\item{mod}{A \code{\link{BiodiversityScenario}} object with specified predictors.}
\item{rcThresholdMode}{A \code{\link{character}} of either \strong{binary} or \strong{continuous}
value (Default: \strong{continuous}).}
\item{dispSteps}{\code{\link{numeric}} parameters on the number of dispersal steps.
Dispersal steps are executed for each timestep (prediction layer). and
ideally should be aligned with the number of steps for projection. Minimum
is \code{1} (Default) and maximum is \code{99}.}
\item{dispKernel}{A \code{\link{vector}} with the number of the dispersal Kernel to be
applied. Can be set either to a uniform numeric \link{vector}, e.g.
\code{c(1,1,1,1)} or to a proportional decline
\code{(1,0.4,0.16,0.06,0.03)} (Default).
\strong{Depending on the resolution of the raster, this parameter needs to be adapted}}
\item{barrierType}{A \link{character} indicating whether any set barrier should be
set as \code{'strong'} or \code{'weak'} barriers. Strong barriers prevent
any dispersal across the barrier and weak barriers only do so if the whole
\code{"dispKernel"} length is covered by the barrier (Default:
\code{'strong'}).}
\item{lddFreq}{\code{\link{numeric}} parameter indicating the frequency of
long-distance dispersal (LDD) events. Default is \code{0}, so no
long-distance dispersal.}
\item{lddRange}{A \code{\link{numeric}} value highlighting the minimum and maximum
distance of LDD events.
\strong{Note: The units for those distance are in cells, thus the projection units in the raster.}}
\item{iniMatAge}{Initial maturity age. Used together with \code{propaguleProd} as
a proxy of population growth. Must be set to the cell age in time units
which are dispersal steps (Default: \code{1}).}
\item{replicateNb}{Number of replicates to be used for the analysis (Default:
\code{10}).}
\item{dtmp}{A \code{\link{character}} to a folder where temporary files are to be
created.}
\item{propaguleProd}{Probability of a source cell to produce propagules as a
function of time since colonization. Set as probability vector that defines
the probability of a cell producing propagules.}
}
\value{
Adds a MigClim onstrain to a \code{\link{BiodiversityScenario}} object.
}
\description{
This function adds constrain as defined by the MigClim approach
(Engler et al. 2013) to a \code{\linkS4class{BiodiversityScenario}} object to
constrain future projections. For a detailed description of MigClim, please
the respective reference and the UserGuide. \strong{The default parameters chosen
here are suggestions.}
}
\details{
The barrier parameter is defined through \code{"add_barrier"}.
}
\examples{
\dontrun{
# Assumes that a trained 'model' object exists
mod <- scenario(model) |>
add_predictors(env = predictors, transform = 'scale',
derivates = "none") |>
add_constraint_MigClim() |>
project()
}
}
\references{
\itemize{
\item Engler R., Hordijk W. and Guisan A. The MIGCLIM R package – seamless integration of
dispersal constraints into projections of species distribution models.
Ecography,
\item Robin Engler, Wim Hordijk and Loic Pellissier (2013). MigClim: Implementing dispersal
into species distribution models. R package version 1.6.
}
}
\seealso{
\code{"MigClim::MigClim.userGuide()"}
}
\concept{constrain}
\keyword{scenario}
| /man/add_constraint_MigClim.Rd | permissive | iiasa/ibis.iSDM | R | false | true | 3,823 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_constraint_MigClim.R
\name{add_constraint_MigClim}
\alias{add_constraint_MigClim}
\title{Add constrains to the modelled distribution projection using the MigClim
approach}
\usage{
\S4method{add_constraint_MigClim}{BiodiversityScenario,character,numeric,numeric,character,numeric,numeric,numeric,numeric,numeric,character}(mod,rcThresholdMode,dispSteps,dispKernel,barrierType,lddFreq,lddRange,iniMatAge,propaguleProdProb,replicateNb,dtmp)
}
\arguments{
\item{mod}{A \code{\link{BiodiversityScenario}} object with specified predictors.}
\item{rcThresholdMode}{A \code{\link{character}} of either \strong{binary} or \strong{continuous}
value (Default: \strong{continuous}).}
\item{dispSteps}{\code{\link{numeric}} parameters on the number of dispersal steps.
Dispersal steps are executed for each timestep (prediction layer). and
ideally should be aligned with the number of steps for projection. Minimum
is \code{1} (Default) and maximum is \code{99}.}
\item{dispKernel}{A \code{\link{vector}} with the number of the dispersal Kernel to be
applied. Can be set either to a uniform numeric \link{vector}, e.g.
\code{c(1,1,1,1)} or to a proportional decline
\code{(1,0.4,0.16,0.06,0.03)} (Default).
\strong{Depending on the resolution of the raster, this parameter needs to be adapted}}
\item{barrierType}{A \link{character} indicating whether any set barrier should be
set as \code{'strong'} or \code{'weak'} barriers. Strong barriers prevent
any dispersal across the barrier and weak barriers only do so if the whole
\code{"dispKernel"} length is covered by the barrier (Default:
\code{'strong'}).}
\item{lddFreq}{\code{\link{numeric}} parameter indicating the frequency of
long-distance dispersal (LDD) events. Default is \code{0}, so no
long-distance dispersal.}
\item{lddRange}{A \code{\link{numeric}} value highlighting the minimum and maximum
distance of LDD events.
\strong{Note: The units for those distance are in cells, thus the projection units in the raster.}}
\item{iniMatAge}{Initial maturity age. Used together with \code{propaguleProd} as
a proxy of population growth. Must be set to the cell age in time units
which are dispersal steps (Default: \code{1}).}
\item{replicateNb}{Number of replicates to be used for the analysis (Default:
\code{10}).}
\item{dtmp}{A \code{\link{character}} to a folder where temporary files are to be
created.}
\item{propaguleProd}{Probability of a source cell to produce propagules as a
function of time since colonization. Set as probability vector that defines
the probability of a cell producing propagules.}
}
\value{
Adds a MigClim onstrain to a \code{\link{BiodiversityScenario}} object.
}
\description{
This function adds constrain as defined by the MigClim approach
(Engler et al. 2013) to a \code{\linkS4class{BiodiversityScenario}} object to
constrain future projections. For a detailed description of MigClim, please
the respective reference and the UserGuide. \strong{The default parameters chosen
here are suggestions.}
}
\details{
The barrier parameter is defined through \code{"add_barrier"}.
}
\examples{
\dontrun{
# Assumes that a trained 'model' object exists
mod <- scenario(model) |>
add_predictors(env = predictors, transform = 'scale',
derivates = "none") |>
add_constraint_MigClim() |>
project()
}
}
\references{
\itemize{
\item Engler R., Hordijk W. and Guisan A. The MIGCLIM R package – seamless integration of
dispersal constraints into projections of species distribution models.
Ecography,
\item Robin Engler, Wim Hordijk and Loic Pellissier (2013). MigClim: Implementing dispersal
into species distribution models. R package version 1.6.
}
}
\seealso{
\code{"MigClim::MigClim.userGuide()"}
}
\concept{constrain}
\keyword{scenario}
|
# dump tidy data produced using run_analysis.R into txt file
write.table(tidy_data, "tidy_data.txt", row.name=FALSE) | /save_tidy_data.R | no_license | rosa-garcia/datasciencecoursera_gettingandcleaningdataproject | R | false | false | 116 | r | # dump tidy data produced using run_analysis.R into txt file
write.table(tidy_data, "tidy_data.txt", row.name=FALSE) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/value.R
\name{value}
\alias{value}
\alias{value.integrate}
\alias{value.default}
\title{Extract value from an object}
\usage{
value(object, ...)
\method{value}{integrate}(object, ...)
\method{value}{default}(object, ...)
}
\arguments{
\item{object}{an object from which a "value" is to be extracted.}
\item{...}{additional arguments (currently ignored).}
}
\description{
Functions like \code{\link[=integrate]{integrate()}} and \code{\link[=nlm]{nlm()}} return objects that contain more
information that simply the value of the integration or optimization. \code{value()} extracts
the primary value from such objects. Currently implemented situations include the output from
\code{\link[=integrate]{integrate()}},
\code{\link[=nlm]{nlm()}},
\code{\link[cubature:hcubature]{cubature::adaptIntegrate()}}, and
\code{\link[=uniroot]{uniroot()}}.
}
\examples{
integrate(sin, 0, 1) \%>\% value()
nlm(cos, p = 0) \%>\% value()
uniroot(cos, c(0, 2)) \%>\% value()
}
| /man/value.Rd | no_license | ProjectMOSAIC/mosaic | R | false | true | 1,041 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/value.R
\name{value}
\alias{value}
\alias{value.integrate}
\alias{value.default}
\title{Extract value from an object}
\usage{
value(object, ...)
\method{value}{integrate}(object, ...)
\method{value}{default}(object, ...)
}
\arguments{
\item{object}{an object from which a "value" is to be extracted.}
\item{...}{additional arguments (currently ignored).}
}
\description{
Functions like \code{\link[=integrate]{integrate()}} and \code{\link[=nlm]{nlm()}} return objects that contain more
information that simply the value of the integration or optimization. \code{value()} extracts
the primary value from such objects. Currently implemented situations include the output from
\code{\link[=integrate]{integrate()}},
\code{\link[=nlm]{nlm()}},
\code{\link[cubature:hcubature]{cubature::adaptIntegrate()}}, and
\code{\link[=uniroot]{uniroot()}}.
}
\examples{
integrate(sin, 0, 1) \%>\% value()
nlm(cos, p = 0) \%>\% value()
uniroot(cos, c(0, 2)) \%>\% value()
}
|
library("shiny")
suppressPackageStartupMessages(library(googleVis))
#loading dataset
load('bcities.rda')
SP <- list() # # Hit counter, Courtesy: Francis Smart: http://www.econometricsbysimulation.com/2013/06/more-explorations-of-shiny.html
SP$npers <- 0
shinyServer(function(input, output) {
# An increment to the hit counter saved in global server environment.
SP$npers <<- SP$npers+1
# Convenience interface to gvisMotionChart that allows to set default columns: Courtesy: Sebastian Kranz: http://stackoverflow.com/questions/10258970/default-variables-for-a-googlevis-motionchart
myMotionChart = function(df,idvar=colnames(df)[1],timevar=colnames(df)[2],xvar=colnames(df)[3],yvar=colnames(df)[4], colorvar=colnames(df)[5], sizevar = colnames(df)[6],...) {
# Generate a constant variable as column for time if not provided
# Unfortunately the motion plot still shows 1900...
if (is.null(timevar)) {
.TIME.VAR = rep(0,NROW(df))
df = cbind(df,.TIME.VAR)
timevar=".TIME.VAR"
}
# Transform booleans into 0 and 1 since otherwise an error will be thrown
for (i in 1:NCOL(df)) {
if (is.logical(df [,i])[1])
df[,i] = df[,i]*1
}
# Rearrange columns in order to have the desired default values for
# xvar, yvar, colorvar and sizevar
firstcols = c(idvar,timevar,xvar,yvar,colorvar,sizevar)
colorder = c(firstcols, setdiff(colnames(df),firstcols))
df = df[,colorder]
gvisMotionChart(df,idvar=idvar,timevar=timevar,...)
}
# creating temp dataset with two new variables
Bcities<-bcities
#Adding a column for the year: Why? see tab 2 discussion below
Bcities$Year<-c("2012")
Bcities$Year<-as.numeric(Bcities$Year)
#New variable which converts ranks from 1 through 50 to 50 through 1....why? see tab 2 discussion below
Bcities$RankReordered<-(51-Bcities$Rank)
#Output for hits
output$hits <- renderText({
paste0("App Hits:" , SP$npers)
})
#Output for tab 1 - geo chart
output$gvisgeoplot <- renderGvis({
gvisGeoChart(Bcities,locationvar="City",colorvar="Rank",sizevar=input$var1,
hovervar="City",
options=list(region="US",displayMode="markers",resolution="provinces",
colorAxis="{colors:['blue', 'green', 'yellow','orange','red']}",
width=640,height=480)
)
})
#output for tab 2 - okay, using a motion chart and modifying code to show scatter plots instead.AND YES, THIS COULD'VE BEEN DONE WITHOUT THE SHINY SERVER
# Here the column with value of 2012 is used as the time variable, and because its constant, there's no motion.
# The size of bubbles could've been the same, but figured sizing them based on rank might be better.
# Since ranking of 1 is better than 50, the size var would've placed smaller dots for better ranks.
#So, have it give larger bubbles for better cities by creating the new "Rankreordered" variable.
output$scatterplot <- renderGvis({
myMotionChart(Bcities, idvar="City", timevar="Year",xvar="Percent.unemployed",yvar="Percent.with.Graduate.Degree",sizevar="RankReordered",colorvar="City",
options=list(showSidePanel=FALSE,showSelectListComponent=FALSE,showXScalePicker=FALSE,
showYScalePicker=FALSE
))})
#Output table for tab 3 # going back to the original dataset, without the two temp vars created
output$bestcitiesdata <- renderGvis({
gvisTable(bcities)})
})
| /server.r | no_license | patilv/bb50citiesrank | R | false | false | 3,686 | r | library("shiny")
suppressPackageStartupMessages(library(googleVis))
#loading dataset
load('bcities.rda')
SP <- list() # # Hit counter, Courtesy: Francis Smart: http://www.econometricsbysimulation.com/2013/06/more-explorations-of-shiny.html
SP$npers <- 0
shinyServer(function(input, output) {
# An increment to the hit counter saved in global server environment.
SP$npers <<- SP$npers+1
# Convenience interface to gvisMotionChart that allows to set default columns: Courtesy: Sebastian Kranz: http://stackoverflow.com/questions/10258970/default-variables-for-a-googlevis-motionchart
myMotionChart = function(df,idvar=colnames(df)[1],timevar=colnames(df)[2],xvar=colnames(df)[3],yvar=colnames(df)[4], colorvar=colnames(df)[5], sizevar = colnames(df)[6],...) {
# Generate a constant variable as column for time if not provided
# Unfortunately the motion plot still shows 1900...
if (is.null(timevar)) {
.TIME.VAR = rep(0,NROW(df))
df = cbind(df,.TIME.VAR)
timevar=".TIME.VAR"
}
# Transform booleans into 0 and 1 since otherwise an error will be thrown
for (i in 1:NCOL(df)) {
if (is.logical(df [,i])[1])
df[,i] = df[,i]*1
}
# Rearrange columns in order to have the desired default values for
# xvar, yvar, colorvar and sizevar
firstcols = c(idvar,timevar,xvar,yvar,colorvar,sizevar)
colorder = c(firstcols, setdiff(colnames(df),firstcols))
df = df[,colorder]
gvisMotionChart(df,idvar=idvar,timevar=timevar,...)
}
# creating temp dataset with two new variables
Bcities<-bcities
#Adding a column for the year: Why? see tab 2 discussion below
Bcities$Year<-c("2012")
Bcities$Year<-as.numeric(Bcities$Year)
#New variable which converts ranks from 1 through 50 to 50 through 1....why? see tab 2 discussion below
Bcities$RankReordered<-(51-Bcities$Rank)
#Output for hits
output$hits <- renderText({
paste0("App Hits:" , SP$npers)
})
#Output for tab 1 - geo chart
output$gvisgeoplot <- renderGvis({
gvisGeoChart(Bcities,locationvar="City",colorvar="Rank",sizevar=input$var1,
hovervar="City",
options=list(region="US",displayMode="markers",resolution="provinces",
colorAxis="{colors:['blue', 'green', 'yellow','orange','red']}",
width=640,height=480)
)
})
#output for tab 2 - okay, using a motion chart and modifying code to show scatter plots instead.AND YES, THIS COULD'VE BEEN DONE WITHOUT THE SHINY SERVER
# Here the column with value of 2012 is used as the time variable, and because its constant, there's no motion.
# The size of bubbles could've been the same, but figured sizing them based on rank might be better.
# Since ranking of 1 is better than 50, the size var would've placed smaller dots for better ranks.
#So, have it give larger bubbles for better cities by creating the new "Rankreordered" variable.
output$scatterplot <- renderGvis({
myMotionChart(Bcities, idvar="City", timevar="Year",xvar="Percent.unemployed",yvar="Percent.with.Graduate.Degree",sizevar="RankReordered",colorvar="City",
options=list(showSidePanel=FALSE,showSelectListComponent=FALSE,showXScalePicker=FALSE,
showYScalePicker=FALSE
))})
#Output table for tab 3 # going back to the original dataset, without the two temp vars created
output$bestcitiesdata <- renderGvis({
gvisTable(bcities)})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AnnoBroadGseaResItem.R
\name{AnnoBroadGseaResItem}
\alias{AnnoBroadGseaResItem}
\title{Convert a BroadGseaResItem object to an AnnoBroadGseaResItem object}
\usage{
AnnoBroadGseaResItem(object, genes, geneValues)
}
\arguments{
\item{object}{A BroadGseaResItem object}
\item{genes}{A character string vector}
\item{geneValues}{A numeric vector}
}
\value{
An annoBroadGseaResItem object
}
\description{
Convert a BroadGseaResItem object to an AnnoBroadGseaResItem object
}
| /man/AnnoBroadGseaResItem.Rd | no_license | bedapub/ribiosGSEA | R | false | true | 550 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AnnoBroadGseaResItem.R
\name{AnnoBroadGseaResItem}
\alias{AnnoBroadGseaResItem}
\title{Convert a BroadGseaResItem object to an AnnoBroadGseaResItem object}
\usage{
AnnoBroadGseaResItem(object, genes, geneValues)
}
\arguments{
\item{object}{A BroadGseaResItem object}
\item{genes}{A character string vector}
\item{geneValues}{A numeric vector}
}
\value{
An annoBroadGseaResItem object
}
\description{
Convert a BroadGseaResItem object to an AnnoBroadGseaResItem object
}
|
\name{CAS Actions}
\alias{cas.aStore.describe}
\alias{cas.aStore.download}
\alias{cas.aStore.score}
\alias{cas.aStore.upload}
\alias{cas.accessControl.assumeRole}
\alias{cas.accessControl.checkInAllObjects}
\alias{cas.accessControl.checkOutObject}
\alias{cas.accessControl.commitTransaction}
\alias{cas.accessControl.completeBackup}
\alias{cas.accessControl.createBackup}
\alias{cas.accessControl.deleteBWList}
\alias{cas.accessControl.dropRole}
\alias{cas.accessControl.isAuthorized}
\alias{cas.accessControl.isAuthorizedActions}
\alias{cas.accessControl.isAuthorizedColumns}
\alias{cas.accessControl.isAuthorizedTables}
\alias{cas.accessControl.isInRole}
\alias{cas.accessControl.listAcsActionSet}
\alias{cas.accessControl.listAcsData}
\alias{cas.accessControl.listAllPrincipals}
\alias{cas.accessControl.listMetadata}
\alias{cas.accessControl.operActionMd}
\alias{cas.accessControl.operActionSetMd}
\alias{cas.accessControl.operAdminMd}
\alias{cas.accessControl.operBWPaths}
\alias{cas.accessControl.operColumnMd}
\alias{cas.accessControl.operTableMd}
\alias{cas.accessControl.remAllAcsActionSet}
\alias{cas.accessControl.remAllAcsData}
\alias{cas.accessControl.repAllAcsAction}
\alias{cas.accessControl.repAllAcsActionSet}
\alias{cas.accessControl.repAllAcsCaslib}
\alias{cas.accessControl.repAllAcsColumn}
\alias{cas.accessControl.repAllAcsTable}
\alias{cas.accessControl.rollbackTransaction}
\alias{cas.accessControl.showRolesAllowed}
\alias{cas.accessControl.showRolesIn}
\alias{cas.accessControl.startTransaction}
\alias{cas.accessControl.statusTransaction}
\alias{cas.accessControl.updSomeAcsAction}
\alias{cas.accessControl.updSomeAcsActionSet}
\alias{cas.accessControl.updSomeAcsCaslib}
\alias{cas.accessControl.updSomeAcsColumn}
\alias{cas.accessControl.updSomeAcsTable}
\alias{cas.accessControl.whatCheckoutsExist}
\alias{cas.accessControl.whatIsEffective}
\alias{cas.aggregation.aggregate}
\alias{cas.bayesianNetClassifier.bnet}
\alias{cas.bglimmix.bglimmix}
\alias{cas.bioMedImage.buildSurface}
\alias{cas.boolRule.brScore}
\alias{cas.boolRule.brTrain}
\alias{cas.builtins.about}
\alias{cas.builtins.actionSetInfo}
\alias{cas.builtins.addNode}
\alias{cas.builtins.casCommon}
\alias{cas.builtins.echo}
\alias{cas.builtins.getLicenseInfo}
\alias{cas.builtins.getLicensedProductInfo}
\alias{cas.builtins.help}
\alias{cas.builtins.history}
\alias{cas.builtins.httpAddress}
\alias{cas.builtins.installActionSet}
\alias{cas.builtins.listNodes}
\alias{cas.builtins.loadActionSet}
\alias{cas.builtins.log}
\alias{cas.builtins.modifyQueue}
\alias{cas.builtins.ping}
\alias{cas.builtins.queryActionSet}
\alias{cas.builtins.queryName}
\alias{cas.builtins.reflect}
\alias{cas.builtins.refreshLicense}
\alias{cas.builtins.removeNode}
\alias{cas.builtins.serverStatus}
\alias{cas.builtins.shutdown}
\alias{cas.builtins.userInfo}
\alias{cas.cardinality.summarize}
\alias{cas.casclp.solveCsp}
\alias{cas.clustering.kClus}
\alias{cas.configuration.getServOpt}
\alias{cas.configuration.listServOpts}
\alias{cas.copula.copulaFit}
\alias{cas.copula.copulaSimulate}
\alias{cas.countreg.countregFitModel}
\alias{cas.dataDiscovery.profile}
\alias{cas.dataPreprocess.binning}
\alias{cas.dataPreprocess.catTrans}
\alias{cas.dataPreprocess.discretize}
\alias{cas.dataPreprocess.highCardinality}
\alias{cas.dataPreprocess.histogram}
\alias{cas.dataPreprocess.impute}
\alias{cas.dataPreprocess.kde}
\alias{cas.dataPreprocess.outlier}
\alias{cas.dataPreprocess.rustats}
\alias{cas.dataPreprocess.transform}
\alias{cas.dataStep.runCode}
\alias{cas.decisionTree.dtreeCode}
\alias{cas.decisionTree.dtreeMerge}
\alias{cas.decisionTree.dtreePrune}
\alias{cas.decisionTree.dtreeScore}
\alias{cas.decisionTree.dtreeSplit}
\alias{cas.decisionTree.dtreeTrain}
\alias{cas.decisionTree.forestCode}
\alias{cas.decisionTree.forestScore}
\alias{cas.decisionTree.forestTrain}
\alias{cas.decisionTree.gbtreeCode}
\alias{cas.decisionTree.gbtreeScore}
\alias{cas.decisionTree.gbtreeTrain}
\alias{cas.ds2.runDS2}
\alias{cas.elasticsearch.index}
\alias{cas.elasticsearch.sandIndex}
\alias{cas.espCluster.listservers}
\alias{cas.espCluster.startservers}
\alias{cas.factmac.factmac}
\alias{cas.fastknn.fastknn}
\alias{cas.fedSql.execDirect}
\alias{cas.freqTab.freqTab}
\alias{cas.gam.gampl}
\alias{cas.gam.gamplScore}
\alias{cas.glrm.hdpca}
\alias{cas.glrm.nnmf}
\alias{cas.gvarclus.gvarclus}
\alias{cas.hiddenMarkovModel.hmm}
\alias{cas.hyperGroup.hypergroup}
\alias{cas.image.augmentImages}
\alias{cas.image.compareImages}
\alias{cas.image.fetchImages}
\alias{cas.image.flattenImageTable}
\alias{cas.image.loadImages}
\alias{cas.image.matchImages}
\alias{cas.image.processImages}
\alias{cas.image.saveImages}
\alias{cas.image.summarizeImages}
\alias{cas.loadStreams.appendSnapshot}
\alias{cas.loadStreams.loadSnapshot}
\alias{cas.loadStreams.loadStream}
\alias{cas.loadStreams.mMetaData}
\alias{cas.localSearch.solveLso}
\alias{cas.mixed.blup}
\alias{cas.mixed.mixed}
\alias{cas.network.biconnectedComponents}
\alias{cas.network.centrality}
\alias{cas.network.clique}
\alias{cas.network.community}
\alias{cas.network.connectedComponents}
\alias{cas.network.core}
\alias{cas.network.cycle}
\alias{cas.network.path}
\alias{cas.network.reach}
\alias{cas.network.readGraph}
\alias{cas.network.shortestPath}
\alias{cas.network.summary}
\alias{cas.network.transitiveClosure}
\alias{cas.networkOptimization.LAP}
\alias{cas.networkOptimization.MCF}
\alias{cas.networkOptimization.MST}
\alias{cas.networkOptimization.linearAssignment}
\alias{cas.networkOptimization.minCostFlow}
\alias{cas.networkOptimization.minCut}
\alias{cas.networkOptimization.minSpanTree}
\alias{cas.networkOptimization.tsp}
\alias{cas.networkSocial.centrality}
\alias{cas.networkSocial.community}
\alias{cas.networkSocial.core}
\alias{cas.networkSocial.reach}
\alias{cas.neuralNet.annCode}
\alias{cas.neuralNet.annScore}
\alias{cas.neuralNet.annTrain}
\alias{cas.optML.basis}
\alias{cas.optML.lasso}
\alias{cas.optML.lsqr}
\alias{cas.optML.randmat}
\alias{cas.optML.svm}
\alias{cas.optMiner.tuneDecisionTree}
\alias{cas.optMiner.tuneFactMac}
\alias{cas.optMiner.tuneForest}
\alias{cas.optMiner.tuneGradientBoostTree}
\alias{cas.optMiner.tuneNeuralNet}
\alias{cas.optMiner.tuneSvm}
\alias{cas.optNetwork.LAP}
\alias{cas.optNetwork.MCF}
\alias{cas.optNetwork.MST}
\alias{cas.optNetwork.biconnectedComponents}
\alias{cas.optNetwork.clique}
\alias{cas.optNetwork.connectedComponents}
\alias{cas.optNetwork.cycle}
\alias{cas.optNetwork.linearAssignment}
\alias{cas.optNetwork.minCostFlow}
\alias{cas.optNetwork.minCut}
\alias{cas.optNetwork.minSpanTree}
\alias{cas.optNetwork.path}
\alias{cas.optNetwork.readGraph}
\alias{cas.optNetwork.shortestPath}
\alias{cas.optNetwork.summary}
\alias{cas.optNetwork.transitiveClosure}
\alias{cas.optNetwork.tsp}
\alias{cas.optimization.solveLp}
\alias{cas.optimization.solveMilp}
\alias{cas.optimization.solveQp}
\alias{cas.optimization.tuner}
\alias{cas.pca.eig}
\alias{cas.pca.itergs}
\alias{cas.pca.nipals}
\alias{cas.pca.randompca}
\alias{cas.percentile.assess}
\alias{cas.percentile.boxPlot}
\alias{cas.percentile.percentile}
\alias{cas.pls.pls}
\alias{cas.qlim.qlim}
\alias{cas.quantreg.quantreg}
\alias{cas.recommend.recomAls}
\alias{cas.recommend.recomAppend}
\alias{cas.recommend.recomCreate}
\alias{cas.recommend.recomDocDist}
\alias{cas.recommend.recomKnnScore}
\alias{cas.recommend.recomKnnTrain}
\alias{cas.recommend.recomMfScore}
\alias{cas.recommend.recomRateinfo}
\alias{cas.recommend.recomSample}
\alias{cas.recommend.recomSearchIndex}
\alias{cas.recommend.recomSearchQuery}
\alias{cas.recommend.recomSim}
\alias{cas.regression.genmod}
\alias{cas.regression.glm}
\alias{cas.regression.logistic}
\alias{cas.ruleMining.fpgrowth}
\alias{cas.ruleMining.taxonomy}
\alias{cas.sampling.oversample}
\alias{cas.sampling.srs}
\alias{cas.sampling.stratified}
\alias{cas.search.appendIndex}
\alias{cas.search.buildAutoComplete}
\alias{cas.search.buildIndex}
\alias{cas.search.deleteDocuments}
\alias{cas.search.getSchema}
\alias{cas.search.searchAggregate}
\alias{cas.search.searchAutocomplete}
\alias{cas.search.searchIndex}
\alias{cas.search.valueCount}
\alias{cas.sentimentAnalysis.applySent}
\alias{cas.sequence.pathing}
\alias{cas.session.addNodeStatus}
\alias{cas.session.batchresults}
\alias{cas.session.endSession}
\alias{cas.session.fetchresult}
\alias{cas.session.flushresult}
\alias{cas.session.listSessions}
\alias{cas.session.listresults}
\alias{cas.session.metrics}
\alias{cas.session.sessionId}
\alias{cas.session.sessionName}
\alias{cas.session.sessionStatus}
\alias{cas.session.setLocale}
\alias{cas.session.timeout}
\alias{cas.sessionProp.addFmtLib}
\alias{cas.sessionProp.addFormat}
\alias{cas.sessionProp.deleteFormat}
\alias{cas.sessionProp.dropFmtLib}
\alias{cas.sessionProp.getSessOpt}
\alias{cas.sessionProp.listFmtLibs}
\alias{cas.sessionProp.listFmtRanges}
\alias{cas.sessionProp.listFmtSearch}
\alias{cas.sessionProp.listFmtValues}
\alias{cas.sessionProp.listSessOpts}
\alias{cas.sessionProp.promoteFmtLib}
\alias{cas.sessionProp.saveFmtLib}
\alias{cas.sessionProp.setFmtSearch}
\alias{cas.sessionProp.setSessOpt}
\alias{cas.severity.severity}
\alias{cas.severity.severityValidate}
\alias{cas.simple.correlation}
\alias{cas.simple.crossTab}
\alias{cas.simple.distinct}
\alias{cas.simple.freq}
\alias{cas.simple.groupBy}
\alias{cas.simple.mdSummary}
\alias{cas.simple.numRows}
\alias{cas.simple.paraCoord}
\alias{cas.simple.regression}
\alias{cas.simple.summary}
\alias{cas.simple.topK}
\alias{cas.simpleForecast.forecast}
\alias{cas.svDataDescription.svddTrain}
\alias{cas.svm.svmTrain}
\alias{cas.table.addCaslib}
\alias{cas.table.addTable}
\alias{cas.table.attribute}
\alias{cas.table.caslibInfo}
\alias{cas.table.columnInfo}
\alias{cas.table.deleteSource}
\alias{cas.table.dropCaslib}
\alias{cas.table.dropTable}
\alias{cas.table.fetch}
\alias{cas.table.fileInfo}
\alias{cas.table.loadDataSource}
\alias{cas.table.loadTable}
\alias{cas.table.partition}
\alias{cas.table.promote}
\alias{cas.table.queryCaslib}
\alias{cas.table.recordCount}
\alias{cas.table.save}
\alias{cas.table.shuffle}
\alias{cas.table.tableDetails}
\alias{cas.table.tableExists}
\alias{cas.table.tableInfo}
\alias{cas.table.update}
\alias{cas.table.upload}
\alias{cas.table.view}
\alias{cas.textMining.createtopic}
\alias{cas.textMining.tmMine}
\alias{cas.textMining.tmScore}
\alias{cas.textMining.tmSvd}
\alias{cas.textParse.tpAccumulate}
\alias{cas.textParse.tpParse}
\alias{cas.textParse.validateCategory}
\alias{cas.textRuleDevelop.compileCategory}
\alias{cas.textRuleDevelop.compileConcept}
\alias{cas.textRuleDevelop.saveTableToDisk}
\alias{cas.textRuleDevelop.validateCategory}
\alias{cas.textRuleDevelop.validateConcept}
\alias{cas.textRuleDiscover.termMap}
\alias{cas.textRuleScore.applyCategory}
\alias{cas.textRuleScore.applyConcept}
\alias{cas.textRuleScore.loadTableFromDisk}
\alias{cas.textSummarization.textSummarize}
\alias{cas.textTopic.tmCreateTopic}
\alias{cas.textUtil.scoreAstore}
\alias{cas.textUtil.tmAstore}
\alias{cas.textUtil.tmCooccur}
\alias{cas.textUtil.tmfindSimilar}
\alias{cas.timeData.forecast}
\alias{cas.timeData.runTimeCode}
\alias{cas.timeData.timeSeries}
\alias{cas.timeFrequency.stft}
\alias{cas.timeFrequency.window}
\alias{cas.transpose.transpose}
\alias{cas.tsReconcile.reconcileTwoLevels}
\alias{cas.tsinfo.getinfo}
\alias{cas.varReduce.super}
\alias{cas.varReduce.unsuper}
\title{Common Page for CAS Actions}
\usage{
cas.actionSet.action(CASorCASTab, parameters...)
}
\arguments{
\item{CASorCASTab}{An instance of a \code{\link{CAS}} object that
represents a connection and CAS session, or an instance of a
\code{\link{CASTable}}.}
\item{parameters}{Actions accept a series of parameters in
key=value pair format. The parameters are action-specific. See
the product documentation.}
}
\description{
When you connect to SAS Cloud Analytic Services (CAS), the SWAT
package software generates an \R function for each CAS
action that is available on the server.
}
\section{Examples}{
The following two functions are generated and correspond to the
table.tableInfo and simple.summary actions:
\code{
cas.table.tableInfo(irisct)
}
\code{
cas.simple.summary(irisct)
}
}
\section{Product Documentation}{
For a list of all the CAS actions that are available with
SAS Visual Analytics, SAS Visual Statistics, and SAS Visual
Data Mining and Machine Learning, see the following URL:
\href{http://documentation.sas.com/?cdcId=vdmmlcdc&cdcVersion=8.11
&docsetId=allprodsactions&docsetTarget=actionsByName.htm}{SAS Viya
3.2 Programming: Actions and Action Sets by Name and Product}
The preceding URL applies to the SAS Viya 3.2 release. For
the latest product documentation for SAS Viya, see
\href{http://support.sas.com/documentation/onlinedoc/viya/index.html}{
Documentation for SAS Viya}.
}
% Copyright SAS Institute
| /man/generatedFunctions.Rd | permissive | samkart/R-swat | R | false | false | 12,827 | rd | \name{CAS Actions}
\alias{cas.aStore.describe}
\alias{cas.aStore.download}
\alias{cas.aStore.score}
\alias{cas.aStore.upload}
\alias{cas.accessControl.assumeRole}
\alias{cas.accessControl.checkInAllObjects}
\alias{cas.accessControl.checkOutObject}
\alias{cas.accessControl.commitTransaction}
\alias{cas.accessControl.completeBackup}
\alias{cas.accessControl.createBackup}
\alias{cas.accessControl.deleteBWList}
\alias{cas.accessControl.dropRole}
\alias{cas.accessControl.isAuthorized}
\alias{cas.accessControl.isAuthorizedActions}
\alias{cas.accessControl.isAuthorizedColumns}
\alias{cas.accessControl.isAuthorizedTables}
\alias{cas.accessControl.isInRole}
\alias{cas.accessControl.listAcsActionSet}
\alias{cas.accessControl.listAcsData}
\alias{cas.accessControl.listAllPrincipals}
\alias{cas.accessControl.listMetadata}
\alias{cas.accessControl.operActionMd}
\alias{cas.accessControl.operActionSetMd}
\alias{cas.accessControl.operAdminMd}
\alias{cas.accessControl.operBWPaths}
\alias{cas.accessControl.operColumnMd}
\alias{cas.accessControl.operTableMd}
\alias{cas.accessControl.remAllAcsActionSet}
\alias{cas.accessControl.remAllAcsData}
\alias{cas.accessControl.repAllAcsAction}
\alias{cas.accessControl.repAllAcsActionSet}
\alias{cas.accessControl.repAllAcsCaslib}
\alias{cas.accessControl.repAllAcsColumn}
\alias{cas.accessControl.repAllAcsTable}
\alias{cas.accessControl.rollbackTransaction}
\alias{cas.accessControl.showRolesAllowed}
\alias{cas.accessControl.showRolesIn}
\alias{cas.accessControl.startTransaction}
\alias{cas.accessControl.statusTransaction}
\alias{cas.accessControl.updSomeAcsAction}
\alias{cas.accessControl.updSomeAcsActionSet}
\alias{cas.accessControl.updSomeAcsCaslib}
\alias{cas.accessControl.updSomeAcsColumn}
\alias{cas.accessControl.updSomeAcsTable}
\alias{cas.accessControl.whatCheckoutsExist}
\alias{cas.accessControl.whatIsEffective}
\alias{cas.aggregation.aggregate}
\alias{cas.bayesianNetClassifier.bnet}
\alias{cas.bglimmix.bglimmix}
\alias{cas.bioMedImage.buildSurface}
\alias{cas.boolRule.brScore}
\alias{cas.boolRule.brTrain}
\alias{cas.builtins.about}
\alias{cas.builtins.actionSetInfo}
\alias{cas.builtins.addNode}
\alias{cas.builtins.casCommon}
\alias{cas.builtins.echo}
\alias{cas.builtins.getLicenseInfo}
\alias{cas.builtins.getLicensedProductInfo}
\alias{cas.builtins.help}
\alias{cas.builtins.history}
\alias{cas.builtins.httpAddress}
\alias{cas.builtins.installActionSet}
\alias{cas.builtins.listNodes}
\alias{cas.builtins.loadActionSet}
\alias{cas.builtins.log}
\alias{cas.builtins.modifyQueue}
\alias{cas.builtins.ping}
\alias{cas.builtins.queryActionSet}
\alias{cas.builtins.queryName}
\alias{cas.builtins.reflect}
\alias{cas.builtins.refreshLicense}
\alias{cas.builtins.removeNode}
\alias{cas.builtins.serverStatus}
\alias{cas.builtins.shutdown}
\alias{cas.builtins.userInfo}
\alias{cas.cardinality.summarize}
\alias{cas.casclp.solveCsp}
\alias{cas.clustering.kClus}
\alias{cas.configuration.getServOpt}
\alias{cas.configuration.listServOpts}
\alias{cas.copula.copulaFit}
\alias{cas.copula.copulaSimulate}
\alias{cas.countreg.countregFitModel}
\alias{cas.dataDiscovery.profile}
\alias{cas.dataPreprocess.binning}
\alias{cas.dataPreprocess.catTrans}
\alias{cas.dataPreprocess.discretize}
\alias{cas.dataPreprocess.highCardinality}
\alias{cas.dataPreprocess.histogram}
\alias{cas.dataPreprocess.impute}
\alias{cas.dataPreprocess.kde}
\alias{cas.dataPreprocess.outlier}
\alias{cas.dataPreprocess.rustats}
\alias{cas.dataPreprocess.transform}
\alias{cas.dataStep.runCode}
\alias{cas.decisionTree.dtreeCode}
\alias{cas.decisionTree.dtreeMerge}
\alias{cas.decisionTree.dtreePrune}
\alias{cas.decisionTree.dtreeScore}
\alias{cas.decisionTree.dtreeSplit}
\alias{cas.decisionTree.dtreeTrain}
\alias{cas.decisionTree.forestCode}
\alias{cas.decisionTree.forestScore}
\alias{cas.decisionTree.forestTrain}
\alias{cas.decisionTree.gbtreeCode}
\alias{cas.decisionTree.gbtreeScore}
\alias{cas.decisionTree.gbtreeTrain}
\alias{cas.ds2.runDS2}
\alias{cas.elasticsearch.index}
\alias{cas.elasticsearch.sandIndex}
\alias{cas.espCluster.listservers}
\alias{cas.espCluster.startservers}
\alias{cas.factmac.factmac}
\alias{cas.fastknn.fastknn}
\alias{cas.fedSql.execDirect}
\alias{cas.freqTab.freqTab}
\alias{cas.gam.gampl}
\alias{cas.gam.gamplScore}
\alias{cas.glrm.hdpca}
\alias{cas.glrm.nnmf}
\alias{cas.gvarclus.gvarclus}
\alias{cas.hiddenMarkovModel.hmm}
\alias{cas.hyperGroup.hypergroup}
\alias{cas.image.augmentImages}
\alias{cas.image.compareImages}
\alias{cas.image.fetchImages}
\alias{cas.image.flattenImageTable}
\alias{cas.image.loadImages}
\alias{cas.image.matchImages}
\alias{cas.image.processImages}
\alias{cas.image.saveImages}
\alias{cas.image.summarizeImages}
\alias{cas.loadStreams.appendSnapshot}
\alias{cas.loadStreams.loadSnapshot}
\alias{cas.loadStreams.loadStream}
\alias{cas.loadStreams.mMetaData}
\alias{cas.localSearch.solveLso}
\alias{cas.mixed.blup}
\alias{cas.mixed.mixed}
\alias{cas.network.biconnectedComponents}
\alias{cas.network.centrality}
\alias{cas.network.clique}
\alias{cas.network.community}
\alias{cas.network.connectedComponents}
\alias{cas.network.core}
\alias{cas.network.cycle}
\alias{cas.network.path}
\alias{cas.network.reach}
\alias{cas.network.readGraph}
\alias{cas.network.shortestPath}
\alias{cas.network.summary}
\alias{cas.network.transitiveClosure}
\alias{cas.networkOptimization.LAP}
\alias{cas.networkOptimization.MCF}
\alias{cas.networkOptimization.MST}
\alias{cas.networkOptimization.linearAssignment}
\alias{cas.networkOptimization.minCostFlow}
\alias{cas.networkOptimization.minCut}
\alias{cas.networkOptimization.minSpanTree}
\alias{cas.networkOptimization.tsp}
\alias{cas.networkSocial.centrality}
\alias{cas.networkSocial.community}
\alias{cas.networkSocial.core}
\alias{cas.networkSocial.reach}
\alias{cas.neuralNet.annCode}
\alias{cas.neuralNet.annScore}
\alias{cas.neuralNet.annTrain}
\alias{cas.optML.basis}
\alias{cas.optML.lasso}
\alias{cas.optML.lsqr}
\alias{cas.optML.randmat}
\alias{cas.optML.svm}
\alias{cas.optMiner.tuneDecisionTree}
\alias{cas.optMiner.tuneFactMac}
\alias{cas.optMiner.tuneForest}
\alias{cas.optMiner.tuneGradientBoostTree}
\alias{cas.optMiner.tuneNeuralNet}
\alias{cas.optMiner.tuneSvm}
\alias{cas.optNetwork.LAP}
\alias{cas.optNetwork.MCF}
\alias{cas.optNetwork.MST}
\alias{cas.optNetwork.biconnectedComponents}
\alias{cas.optNetwork.clique}
\alias{cas.optNetwork.connectedComponents}
\alias{cas.optNetwork.cycle}
\alias{cas.optNetwork.linearAssignment}
\alias{cas.optNetwork.minCostFlow}
\alias{cas.optNetwork.minCut}
\alias{cas.optNetwork.minSpanTree}
\alias{cas.optNetwork.path}
\alias{cas.optNetwork.readGraph}
\alias{cas.optNetwork.shortestPath}
\alias{cas.optNetwork.summary}
\alias{cas.optNetwork.transitiveClosure}
\alias{cas.optNetwork.tsp}
\alias{cas.optimization.solveLp}
\alias{cas.optimization.solveMilp}
\alias{cas.optimization.solveQp}
\alias{cas.optimization.tuner}
\alias{cas.pca.eig}
\alias{cas.pca.itergs}
\alias{cas.pca.nipals}
\alias{cas.pca.randompca}
\alias{cas.percentile.assess}
\alias{cas.percentile.boxPlot}
\alias{cas.percentile.percentile}
\alias{cas.pls.pls}
\alias{cas.qlim.qlim}
\alias{cas.quantreg.quantreg}
\alias{cas.recommend.recomAls}
\alias{cas.recommend.recomAppend}
\alias{cas.recommend.recomCreate}
\alias{cas.recommend.recomDocDist}
\alias{cas.recommend.recomKnnScore}
\alias{cas.recommend.recomKnnTrain}
\alias{cas.recommend.recomMfScore}
\alias{cas.recommend.recomRateinfo}
\alias{cas.recommend.recomSample}
\alias{cas.recommend.recomSearchIndex}
\alias{cas.recommend.recomSearchQuery}
\alias{cas.recommend.recomSim}
\alias{cas.regression.genmod}
\alias{cas.regression.glm}
\alias{cas.regression.logistic}
\alias{cas.ruleMining.fpgrowth}
\alias{cas.ruleMining.taxonomy}
\alias{cas.sampling.oversample}
\alias{cas.sampling.srs}
\alias{cas.sampling.stratified}
\alias{cas.search.appendIndex}
\alias{cas.search.buildAutoComplete}
\alias{cas.search.buildIndex}
\alias{cas.search.deleteDocuments}
\alias{cas.search.getSchema}
\alias{cas.search.searchAggregate}
\alias{cas.search.searchAutocomplete}
\alias{cas.search.searchIndex}
\alias{cas.search.valueCount}
\alias{cas.sentimentAnalysis.applySent}
\alias{cas.sequence.pathing}
\alias{cas.session.addNodeStatus}
\alias{cas.session.batchresults}
\alias{cas.session.endSession}
\alias{cas.session.fetchresult}
\alias{cas.session.flushresult}
\alias{cas.session.listSessions}
\alias{cas.session.listresults}
\alias{cas.session.metrics}
\alias{cas.session.sessionId}
\alias{cas.session.sessionName}
\alias{cas.session.sessionStatus}
\alias{cas.session.setLocale}
\alias{cas.session.timeout}
\alias{cas.sessionProp.addFmtLib}
\alias{cas.sessionProp.addFormat}
\alias{cas.sessionProp.deleteFormat}
\alias{cas.sessionProp.dropFmtLib}
\alias{cas.sessionProp.getSessOpt}
\alias{cas.sessionProp.listFmtLibs}
\alias{cas.sessionProp.listFmtRanges}
\alias{cas.sessionProp.listFmtSearch}
\alias{cas.sessionProp.listFmtValues}
\alias{cas.sessionProp.listSessOpts}
\alias{cas.sessionProp.promoteFmtLib}
\alias{cas.sessionProp.saveFmtLib}
\alias{cas.sessionProp.setFmtSearch}
\alias{cas.sessionProp.setSessOpt}
\alias{cas.severity.severity}
\alias{cas.severity.severityValidate}
\alias{cas.simple.correlation}
\alias{cas.simple.crossTab}
\alias{cas.simple.distinct}
\alias{cas.simple.freq}
\alias{cas.simple.groupBy}
\alias{cas.simple.mdSummary}
\alias{cas.simple.numRows}
\alias{cas.simple.paraCoord}
\alias{cas.simple.regression}
\alias{cas.simple.summary}
\alias{cas.simple.topK}
\alias{cas.simpleForecast.forecast}
\alias{cas.svDataDescription.svddTrain}
\alias{cas.svm.svmTrain}
\alias{cas.table.addCaslib}
\alias{cas.table.addTable}
\alias{cas.table.attribute}
\alias{cas.table.caslibInfo}
\alias{cas.table.columnInfo}
\alias{cas.table.deleteSource}
\alias{cas.table.dropCaslib}
\alias{cas.table.dropTable}
\alias{cas.table.fetch}
\alias{cas.table.fileInfo}
\alias{cas.table.loadDataSource}
\alias{cas.table.loadTable}
\alias{cas.table.partition}
\alias{cas.table.promote}
\alias{cas.table.queryCaslib}
\alias{cas.table.recordCount}
\alias{cas.table.save}
\alias{cas.table.shuffle}
\alias{cas.table.tableDetails}
\alias{cas.table.tableExists}
\alias{cas.table.tableInfo}
\alias{cas.table.update}
\alias{cas.table.upload}
\alias{cas.table.view}
\alias{cas.textMining.createtopic}
\alias{cas.textMining.tmMine}
\alias{cas.textMining.tmScore}
\alias{cas.textMining.tmSvd}
\alias{cas.textParse.tpAccumulate}
\alias{cas.textParse.tpParse}
\alias{cas.textParse.validateCategory}
\alias{cas.textRuleDevelop.compileCategory}
\alias{cas.textRuleDevelop.compileConcept}
\alias{cas.textRuleDevelop.saveTableToDisk}
\alias{cas.textRuleDevelop.validateCategory}
\alias{cas.textRuleDevelop.validateConcept}
\alias{cas.textRuleDiscover.termMap}
\alias{cas.textRuleScore.applyCategory}
\alias{cas.textRuleScore.applyConcept}
\alias{cas.textRuleScore.loadTableFromDisk}
\alias{cas.textSummarization.textSummarize}
\alias{cas.textTopic.tmCreateTopic}
\alias{cas.textUtil.scoreAstore}
\alias{cas.textUtil.tmAstore}
\alias{cas.textUtil.tmCooccur}
\alias{cas.textUtil.tmfindSimilar}
\alias{cas.timeData.forecast}
\alias{cas.timeData.runTimeCode}
\alias{cas.timeData.timeSeries}
\alias{cas.timeFrequency.stft}
\alias{cas.timeFrequency.window}
\alias{cas.transpose.transpose}
\alias{cas.tsReconcile.reconcileTwoLevels}
\alias{cas.tsinfo.getinfo}
\alias{cas.varReduce.super}
\alias{cas.varReduce.unsuper}
\title{Common Page for CAS Actions}
\usage{
cas.actionSet.action(CASorCASTab, parameters...)
}
\arguments{
\item{CASorCASTab}{An instance of a \code{\link{CAS}} object that
represents a connection and CAS session, or an instance of a
\code{\link{CASTable}}.}
\item{parameters}{Actions accept a series of parameters in
key=value pair format. The parameters are action-specific. See
the product documentation.}
}
\description{
When you connect to SAS Cloud Analytic Services (CAS), the SWAT
package software generates an \R function for each CAS
action that is available on the server.
}
\section{Examples}{
The following two functions are generated and correspond to the
table.tableInfo and simple.summary actions:
\code{
cas.table.tableInfo(irisct)
}
\code{
cas.simple.summary(irisct)
}
}
\section{Product Documentation}{
For a list of all the CAS actions that are available with
SAS Visual Analytics, SAS Visual Statistics, and SAS Visual
Data Mining and Machine Learning, see the following URL:
\href{http://documentation.sas.com/?cdcId=vdmmlcdc&cdcVersion=8.11
&docsetId=allprodsactions&docsetTarget=actionsByName.htm}{SAS Viya
3.2 Programming: Actions and Action Sets by Name and Product}
The preceding URL applies to the SAS Viya 3.2 release. For
the latest product documentation for SAS Viya, see
\href{http://support.sas.com/documentation/onlinedoc/viya/index.html}{
Documentation for SAS Viya}.
}
% Copyright SAS Institute
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.