content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
seed <- 443
log.wt <- -13.039547689358397
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225576.7931840716
df.resid <- 35402
df <- 165
coefs <- c(6.273865912444194, 5.9427492664344985, 5.833468416660316, 5.307638068838451, 5.0566345814133555, 4.847600968896227, 4.756618163818065, 4.616410036686033, 4.403738162079844, 4.33125194962408, 4.251319014882228, 4.196929307957316, 4.048673016190725, 3.9770919514335863, 3.745674779946277, 3.5504012736220956, 3.240086565602179, 2.9335700596881518, 2.476950261129381, 2.066300103839319, 1.5686869653562296, 0.8683309401055308, 1.0549770158355412, 0.23874943502242923, 0.7669882854121908, -0.9472003540264197, -0.2758188148286955, 0.9289539098886616, 1.1036146853562012, -1.5734330089988597, -2.424641277192171, -2.5888171720129485, -0.35302950935294364, 0.7539523890570301, 1.1812117885427909, -0.6627738513427436, 0.1020374047952669, -0.6975596375875955, -0.22512488419527887, -1.2284244153451884, 0.940351823646318, 0.8588511559907446, -0.8817620799176219, -2.158096120554302, -1.2958966890184296, -0.7198737139156405, -0.823711661164602, 0.15804389400729615, 8.060119596340458e-2, -0.5166958458728207, 0.2042402645045054, 1.2494151968592986, -2.2423098159808132, 1.779214401276957, 0.7744505548001541, 0.9457731696502433, -1.9034271627607156, -1.0742789154748815e-2, -0.3991776037965962, 1.1101391454094667, 1.3438996417155735, 0.8202886904822653, -1.5901826679765723, -1.256070617777711, -0.3479308221964437, 0.21087946784328257, 0.6481516702379349, -0.30980484178719614, -0.8921116577484328, -0.6524487657202764, -1.5967438494961426, -0.2879602126832531, 0.503115087385363, 0.9088970679437003, 0.6778421177282337, -1.196537615559549, -1.4419830987253803, -0.9149292357021979, -0.11473458280499486, 0.7007574065657147, 1.1048234759973523, 0.164406353216957, 0.3643005560021671, -1.6362810142046966, -0.874937791540391, 0.35381310483859413, 1.1961747011886992, 0.4535919390192175, 0.8714874003589732, -2.6074864146486245, 0.44813275613229675, 0.668301437914234, 0.7338076048134309, 0.32631300211013625, -0.15220639780675485, 1.2887590954954513, -0.583852568477676, 0.18974579803514777, -0.11018067811128726, 0.30412040127422263, 0.28963731618196153, -0.3438620257993571, 0.8585548608935085, 0.4478708701649417, 0.6435667918878608, 0.8054106406170713, 1.123910468524296, -5.422653761569927e-2, -0.642244645926774, -1.222989300281235, 0.3222921462186022, 0.6661925212786556, 1.6059777357790546, -0.4225196981467416, -7.852966443339363e-2, -0.7126854010030856, 0.7126276077252486, -0.287021992213713, 0.48658340796686517, 0.3422199224809993, -0.514477842222183, -0.5264569263351204, -1.2111931597376169, -0.751249715131331, 0.28393273930766383, 0.8584895571751736, -3.7125748117117316e-2, 0.9491796547119291, -0.4565793563105563, -0.4475430228307524, 0.23641650599981046, 0.8478940524435055, 0.976739260969989, 0.3060659509396908, 8.620901859733192e-2, 1.2084200510835084, -0.25681415268526725, 1.1124099229179303, 0.7652811566059695, 0.9350942600522487, 0.7537256473284444, -0.6639320231118521, -1.3842323825969962, 0.5877963187900378, 0.4395781867693936, 0.5167840006610358, -0.2540553509609325, -0.33301662766861667, -2.392001637319492, 1.340137991652436, 0.20687111585457232, 1.1997651172463362, -0.20612045985450836, -0.13685101815967537, -9.132784801037801e-2, -1.9323602478027866, -1.5974462148441482, 0.8153074512027432, 1.179797307572893, -4.5395027921182246e-2, 1.5860378262770862, -0.29775211356093545, -0.16464876205912798, -7.733838094812018e-2, 1.1586137072932088)
| /analysis/boot/boot443.R | no_license | patperry/interaction-proc | R | false | false | 3,762 | r | seed <- 443
log.wt <- -13.039547689358397
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225576.7931840716
df.resid <- 35402
df <- 165
coefs <- c(6.273865912444194, 5.9427492664344985, 5.833468416660316, 5.307638068838451, 5.0566345814133555, 4.847600968896227, 4.756618163818065, 4.616410036686033, 4.403738162079844, 4.33125194962408, 4.251319014882228, 4.196929307957316, 4.048673016190725, 3.9770919514335863, 3.745674779946277, 3.5504012736220956, 3.240086565602179, 2.9335700596881518, 2.476950261129381, 2.066300103839319, 1.5686869653562296, 0.8683309401055308, 1.0549770158355412, 0.23874943502242923, 0.7669882854121908, -0.9472003540264197, -0.2758188148286955, 0.9289539098886616, 1.1036146853562012, -1.5734330089988597, -2.424641277192171, -2.5888171720129485, -0.35302950935294364, 0.7539523890570301, 1.1812117885427909, -0.6627738513427436, 0.1020374047952669, -0.6975596375875955, -0.22512488419527887, -1.2284244153451884, 0.940351823646318, 0.8588511559907446, -0.8817620799176219, -2.158096120554302, -1.2958966890184296, -0.7198737139156405, -0.823711661164602, 0.15804389400729615, 8.060119596340458e-2, -0.5166958458728207, 0.2042402645045054, 1.2494151968592986, -2.2423098159808132, 1.779214401276957, 0.7744505548001541, 0.9457731696502433, -1.9034271627607156, -1.0742789154748815e-2, -0.3991776037965962, 1.1101391454094667, 1.3438996417155735, 0.8202886904822653, -1.5901826679765723, -1.256070617777711, -0.3479308221964437, 0.21087946784328257, 0.6481516702379349, -0.30980484178719614, -0.8921116577484328, -0.6524487657202764, -1.5967438494961426, -0.2879602126832531, 0.503115087385363, 0.9088970679437003, 0.6778421177282337, -1.196537615559549, -1.4419830987253803, -0.9149292357021979, -0.11473458280499486, 0.7007574065657147, 1.1048234759973523, 0.164406353216957, 0.3643005560021671, -1.6362810142046966, -0.874937791540391, 0.35381310483859413, 1.1961747011886992, 0.4535919390192175, 0.8714874003589732, -2.6074864146486245, 0.44813275613229675, 0.668301437914234, 0.7338076048134309, 0.32631300211013625, -0.15220639780675485, 1.2887590954954513, -0.583852568477676, 0.18974579803514777, -0.11018067811128726, 0.30412040127422263, 0.28963731618196153, -0.3438620257993571, 0.8585548608935085, 0.4478708701649417, 0.6435667918878608, 0.8054106406170713, 1.123910468524296, -5.422653761569927e-2, -0.642244645926774, -1.222989300281235, 0.3222921462186022, 0.6661925212786556, 1.6059777357790546, -0.4225196981467416, -7.852966443339363e-2, -0.7126854010030856, 0.7126276077252486, -0.287021992213713, 0.48658340796686517, 0.3422199224809993, -0.514477842222183, -0.5264569263351204, -1.2111931597376169, -0.751249715131331, 0.28393273930766383, 0.8584895571751736, -3.7125748117117316e-2, 0.9491796547119291, -0.4565793563105563, -0.4475430228307524, 0.23641650599981046, 0.8478940524435055, 0.976739260969989, 0.3060659509396908, 8.620901859733192e-2, 1.2084200510835084, -0.25681415268526725, 1.1124099229179303, 0.7652811566059695, 0.9350942600522487, 0.7537256473284444, -0.6639320231118521, -1.3842323825969962, 0.5877963187900378, 0.4395781867693936, 0.5167840006610358, -0.2540553509609325, -0.33301662766861667, -2.392001637319492, 1.340137991652436, 0.20687111585457232, 1.1997651172463362, -0.20612045985450836, -0.13685101815967537, -9.132784801037801e-2, -1.9323602478027866, -1.5974462148441482, 0.8153074512027432, 1.179797307572893, -4.5395027921182246e-2, 1.5860378262770862, -0.29775211356093545, -0.16464876205912798, -7.733838094812018e-2, 1.1586137072932088)
|
library(testthat)
library(CNAIM)
context("COF:Financial Transformers: EHV")
test_that("33kV Transformer (GM)", {
res <- financial_cof_transformers(tf_asset_category = "33kV Transformer (GM)",
type_financial_factor_size = "33/20kV, CMR equivalent",
type_financial_factor_kva_mva = 20,
access_factor_criteria = "Type A")
expected_value <- 91250
expect_equal(res, expected_value)
})
context("COF:Financial Transformers: 132kV")
test_that("132kV Transformer (GM)", {
res <- financial_cof_transformers(tf_asset_category = "132kV Transformer (GM)",
type_financial_factor_size = "132/33kV",
type_financial_factor_kva_mva = 20,
access_factor_criteria = "Type A")
expected_value <- 197038.8
expect_equal(res, expected_value)
})
context("COF:Financial Transformers: 20kV")
test_that("20kV Transformer (GM)", {
res <- financial_cof_transformers(tf_asset_category = "20kV Transformer (GM)",
type_financial_factor_kva_mva = 20,
access_factor_criteria = "Type A")
expected_value <- 7489.35
expect_equal(res, expected_value)
})
#-----------------------------------------
context("COF:Safety Transformers: EHV")
test_that("33kV Transformer (GM)", {
res <- safety_cof_transformers(tf_asset_category = "33kV Transformer (GM)",
location_risk = "Default",
type_risk = "Default")
expected_value <- 20771
expect_equal(res, expected_value)
})
context("COF:Safety Transformers: 132kV")
test_that("132kV Transformer (GM)", {
res <- safety_cof_transformers(tf_asset_category = "132kV Transformer (GM)",
location_risk = "Default",
type_risk = "Default")
expected_value <- 31968
expect_equal(res, expected_value)
})
context("COF:Safety Transformers: HV")
test_that("20kV Transformer (GM)", {
res <- safety_cof_transformers(tf_asset_category = "20kV Transformer (GM)",
location_risk = "Default",
type_risk = "Default")
expected_value <- 4262
expect_equal(res, expected_value)
})
#-----------------------------------------
context("COF:Env Transformers: EHV")
test_that("33kV Transformer (GM)", {
res <- environmental_cof_transformers(tf_asset_category = "33kV Transformer (GM)",
prox_water = 95, bunded = "Yes", size_kva_mva = 20, size_conversion = "33/20kV")
expected_value <- 11352
expect_equal(res, expected_value)
})
context("COF:Env Transformers: 132kV")
test_that("132kV Transformer (GM)", {
res <- environmental_cof_transformers(tf_asset_category = "132kV Transformer (GM)",
prox_water = 95, bunded = "Yes", size_kva_mva = 20,
size_conversion = "132/33kV")
expected_value <- 11684.8
expect_equal(res, expected_value)
})
context("COF:Env Transformers: HV")
test_that("20kV Transformer (GM)", {
res <- environmental_cof_transformers(tf_asset_category = "20kV Transformer (GM)",
prox_water = 95, bunded = "Yes", size_kva_mva = 20)
expected_value <- 951.3
expect_equal(res, expected_value)
})
#-----------------------------------------
context("COF:Network Transformers: EHV")
test_that("33kV Transformer", {
res <- network_cof_transformers(tf_asset_category = "33kV Transformer (GM)",
actual_load_mva = 15) %>% round(1)
expected_value <- 24098.5
expect_equal(res, expected_value)
})
context("COF:Network Transformers: 132kV")
test_that("132kV transformer", {
res <- network_cof_transformers(tf_asset_category = "132kV Transformer (GM)",
actual_load_mva = 75) %>% round(1)
expected_value <- 239862.2
expect_equal(res, expected_value)
})
| /tests/testthat/test-cof_transformers.R | permissive | scoultersdcoe/CNAIM | R | false | false | 4,042 | r | library(testthat)
library(CNAIM)
context("COF:Financial Transformers: EHV")
test_that("33kV Transformer (GM)", {
res <- financial_cof_transformers(tf_asset_category = "33kV Transformer (GM)",
type_financial_factor_size = "33/20kV, CMR equivalent",
type_financial_factor_kva_mva = 20,
access_factor_criteria = "Type A")
expected_value <- 91250
expect_equal(res, expected_value)
})
context("COF:Financial Transformers: 132kV")
test_that("132kV Transformer (GM)", {
res <- financial_cof_transformers(tf_asset_category = "132kV Transformer (GM)",
type_financial_factor_size = "132/33kV",
type_financial_factor_kva_mva = 20,
access_factor_criteria = "Type A")
expected_value <- 197038.8
expect_equal(res, expected_value)
})
context("COF:Financial Transformers: 20kV")
test_that("20kV Transformer (GM)", {
res <- financial_cof_transformers(tf_asset_category = "20kV Transformer (GM)",
type_financial_factor_kva_mva = 20,
access_factor_criteria = "Type A")
expected_value <- 7489.35
expect_equal(res, expected_value)
})
#-----------------------------------------
context("COF:Safety Transformers: EHV")
test_that("33kV Transformer (GM)", {
res <- safety_cof_transformers(tf_asset_category = "33kV Transformer (GM)",
location_risk = "Default",
type_risk = "Default")
expected_value <- 20771
expect_equal(res, expected_value)
})
context("COF:Safety Transformers: 132kV")
test_that("132kV Transformer (GM)", {
res <- safety_cof_transformers(tf_asset_category = "132kV Transformer (GM)",
location_risk = "Default",
type_risk = "Default")
expected_value <- 31968
expect_equal(res, expected_value)
})
context("COF:Safety Transformers: HV")
test_that("20kV Transformer (GM)", {
res <- safety_cof_transformers(tf_asset_category = "20kV Transformer (GM)",
location_risk = "Default",
type_risk = "Default")
expected_value <- 4262
expect_equal(res, expected_value)
})
#-----------------------------------------
context("COF:Env Transformers: EHV")
test_that("33kV Transformer (GM)", {
res <- environmental_cof_transformers(tf_asset_category = "33kV Transformer (GM)",
prox_water = 95, bunded = "Yes", size_kva_mva = 20, size_conversion = "33/20kV")
expected_value <- 11352
expect_equal(res, expected_value)
})
context("COF:Env Transformers: 132kV")
test_that("132kV Transformer (GM)", {
res <- environmental_cof_transformers(tf_asset_category = "132kV Transformer (GM)",
prox_water = 95, bunded = "Yes", size_kva_mva = 20,
size_conversion = "132/33kV")
expected_value <- 11684.8
expect_equal(res, expected_value)
})
context("COF:Env Transformers: HV")
test_that("20kV Transformer (GM)", {
res <- environmental_cof_transformers(tf_asset_category = "20kV Transformer (GM)",
prox_water = 95, bunded = "Yes", size_kva_mva = 20)
expected_value <- 951.3
expect_equal(res, expected_value)
})
#-----------------------------------------
context("COF:Network Transformers: EHV")
test_that("33kV Transformer", {
res <- network_cof_transformers(tf_asset_category = "33kV Transformer (GM)",
actual_load_mva = 15) %>% round(1)
expected_value <- 24098.5
expect_equal(res, expected_value)
})
context("COF:Network Transformers: 132kV")
test_that("132kV transformer", {
res <- network_cof_transformers(tf_asset_category = "132kV Transformer (GM)",
actual_load_mva = 75) %>% round(1)
expected_value <- 239862.2
expect_equal(res, expected_value)
})
|
# Pass functions as arguments to other functions, and evaluate data with
# Examples of how it should act:
# 1. evaluate(sum, c(2, 4, 6)) should evaluate to 12
# 2. evaluate(median, c(7, 40, 9)) should evaluate to 9
# 3. evaluate(floor, 11.1) should evaluate to 11
evaluate <- function(func, dat){
# Write your code here!
# Remember: the last expression evaluated will be returned!
func(dat)
}
| /evaluate.R | no_license | dassiorleando/cousera-Rprogramming-week2 | R | false | false | 412 | r | # Pass functions as arguments to other functions, and evaluate data with
# Examples of how it should act:
# 1. evaluate(sum, c(2, 4, 6)) should evaluate to 12
# 2. evaluate(median, c(7, 40, 9)) should evaluate to 9
# 3. evaluate(floor, 11.1) should evaluate to 11
evaluate <- function(func, dat){
# Write your code here!
# Remember: the last expression evaluated will be returned!
func(dat)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracelines.R
\name{tsc}
\alias{tsc}
\title{True scores with standard errors}
\usage{
tsc(ip, theta)
}
\arguments{
\item{ip}{Item parameters: the output of \code{est}, or a 3-column matrix
corresponding to its first element, \code{est}.}
\item{theta}{An object containing ability estimates, as output by function
\code{mlebme} or \code{eap}}
}
\value{
A matrix with the true scores in column 1, and their standard errors
of measurement (SEM) in column 2
}
\description{
Computes the IRT true scores (test response function at the estimated
ability) and an estimate of their standard error via the delta theorem,
treating item parameters as known).
}
\examples{
th <- mlebme(resp=Scored, ip=Scored2pl)
tsc(Scored2pl, th)
}
\seealso{
\code{\link{mlebme}}, \code{\link{eap}}, \code{\link{trf}}
}
\author{
Ivailo Partchev
}
\keyword{models}
| /man/tsc.Rd | no_license | cran/irtoys | R | false | true | 918 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracelines.R
\name{tsc}
\alias{tsc}
\title{True scores with standard errors}
\usage{
tsc(ip, theta)
}
\arguments{
\item{ip}{Item parameters: the output of \code{est}, or a 3-column matrix
corresponding to its first element, \code{est}.}
\item{theta}{An object containing ability estimates, as output by function
\code{mlebme} or \code{eap}}
}
\value{
A matrix with the true scores in column 1, and their standard errors
of measurement (SEM) in column 2
}
\description{
Computes the IRT true scores (test response function at the estimated
ability) and an estimate of their standard error via the delta theorem,
treating item parameters as known).
}
\examples{
th <- mlebme(resp=Scored, ip=Scored2pl)
tsc(Scored2pl, th)
}
\seealso{
\code{\link{mlebme}}, \code{\link{eap}}, \code{\link{trf}}
}
\author{
Ivailo Partchev
}
\keyword{models}
|
#Load in all the required libraries
library (caret)
library(rpart)
library(rpart.plot)
set.seed(1001)
#Read in the data
data <- read.csv('LMO.csv',stringsAsFactors = F)
######
#NOTE: IC in this code stands for the initial discharge capacities
#EC stands for the end discharge capacities
######################### Section 1. Data splitting ##################################
data_splitting <- function (dat, split_ratio)
{
n <- nrow (dat)
n_split <- round (n*split_ratio)
ind <- sample(n,n_split, replace = F)
train <- dat[-ind,]
test <- dat[ind,]
return (list( train = train, test = test))
}
#Split into train, validate and test
split_root <- data_splitting(data,0.2)
train = split_root$train
test = split_root$test
split_10_fold <- data_splitting(train,0.1)
training = split_10_fold$train
validate =split_10_fold$test
#Build the decision_tree_model
############################## Section 2. Optimise the complexity factor parameter (cp) in each case ######################
####### IC
caret.control <- trainControl(method = "cv", number = 10, savePredictions = TRUE)
DT_IC <- train(IC ~ M + Mn + M_EN + Mr + LC_a + CD, data = train, method ="rpart",
trControl= caret.control)
####### EC
DT_EC <- train(EC ~ M + Mn + M_EN + Mr + LC_a + CD, data = train, method ="rpart",
trControl= caret.control)
DT_IC$bestTune
DT_EC$bestTune
################################ Section 3. IC model training ####################################
IC_train_data <- data.frame(0,0,0)
names(IC_train_data)<- c('Fold','train_predict_IC','train_experimental_IC')
results_IC_train_error <- data.frame (0,0)
names(results_IC_train_error) <- c('fold','RMSE_train')
for (fold in 1:10){
split_10_fold <- data_splitting(train,0.1)
training = split_10_fold$train
validate =split_10_fold$test
decision_tree_ic <-rpart(IC ~ M + Mn + M_EN + Mr + LC_a + CD,data = training, cp=0.03598739)
predict_IC_train<- predict(decision_tree,validate)
RMSE_train <- sqrt(mean((validate$IC-predict_IC_train)^2))
new_results_IC_train <- data.frame(fold,RMSE_train)
names(new_results_IC_train) <- c('fold','RMSE_train')
new_IC_train_data <- data.frame(fold,predict_IC_train,validate$IC)
names(new_IC_train_data) <- c('Fold','train_predict_IC','train_experimental_IC')
results_IC_train_error <- rbind(results_IC_train_error, new_results_IC_train)
IC_train_data<-rbind(IC_train_data,new_IC_train_data)
}
new_results_IC_train[-c(1),];
results_IC_train_error[-c(1),]
#Make prediction on the test side with respect to test set
predict <- predict(decision_tree,test)
predict
#Calculate the RMSQ on the prediction
RMSE <- sqrt(mean((predict-test$IC)^2))
RMSE
mean(new_results_IC_train$RMSE_train)
#Check for the variable importance
varImp(decision_tree)
### Combine the obv and predict variables
IC_test_data <- cbind(test$EC,predict)
IC_test_data
##### Save the results-file
saveRDS(decision_tree_ic,'DT-IC.RDS')
write.csv(IC_train_data,'DT-IC_TRAIN.csv')
write.csv(results_IC_train_error,'DT-IC-TRAIN-FOLD.csv')
write.csv(IC_test_data,'DT-IC-TEST.csv')
write.csv(varImp(decision_tree_ic),'DT-IC-VAR-IMPO.csv')
################################ Section 4. EC model training ####################################
EC_train_data <- data.frame(0,0,0)
names(EC_train_data)<- c('Fold','train_predict_EC','train_experimental_EC')
results_EC_train_error <- data.frame (0,0)
names(results_EC_train_error) <- c('fold','RMSE_train')
for (fold in 1:10){
split_10_fold <- data_splitting(train,0.1)
training = split_10_fold$train
validate =split_10_fold$test
decision_tree <-rpart(EC ~ M + Mn + M_EN + Mr + LC_a + CD,data = training, cp=0.06079231)
predict_EC_train<- predict(decision_tree,validate)
RMSE_train <- sqrt(mean((validate$EC-predict_EC_train)^2))
new_results_EC_train <- data.frame(fold,RMSE_train)
names(new_results_EC_train) <- c('fold','RMSE_train')
new_EC_train_data <- data.frame(fold,predict_EC_train,validate$EC)
names(new_EC_train_data) <- c('Fold','train_predict_EC','train_experimental_EC')
results_EC_train_error <- rbind(results_EC_train_error, new_results_EC_train)
EC_train_data<-rbind(EC_train_data,new_EC_train_data)
}
new_results_EC_train[-c(1),];
results_EC_train_error[-c(1),]
#View the training results
pre<-decision_tree_cv$result
#Summary the model complexity
summary(decision_tree_cv)
#Make prediction on the test side with respect to test set
predict <- predict(decision_tree,test)
predict
#Calculate the RMSQ on the prediction
RMSE <- sqrt(mean((predict-test$EC)^2))
RMSE
mean(new_results_EC_train$RMSE_train)
#Check for the variable importance
varImp(decision_tree)
### Combine the obv and predict variables
EC_test_data <- cbind(test$EC,predict)
EC_test_data
##### Save the results-file
saveRDS(decision_tree,'DT-EC.RDS')
write.csv(EC_train_data,'DT-EC_TRAIN.csv')
write.csv(results_EC_train_error,'DT-EC-TRAIN-FOLD.csv')
write.csv(EC_test_data,'DT-EC-TEST.csv')
write.csv(varImp(decision_tree),'DT-EC-VAR-IMPO.csv')
| /Decision_tree.R | no_license | thepowerligand/LMO-ML | R | false | false | 5,090 | r |
#Load in all the required libraries
library (caret)
library(rpart)
library(rpart.plot)
set.seed(1001)
#Read in the data
data <- read.csv('LMO.csv',stringsAsFactors = F)
######
#NOTE: IC in this code stands for the initial discharge capacities
#EC stands for the end discharge capacities
######################### Section 1. Data splitting ##################################
data_splitting <- function (dat, split_ratio)
{
n <- nrow (dat)
n_split <- round (n*split_ratio)
ind <- sample(n,n_split, replace = F)
train <- dat[-ind,]
test <- dat[ind,]
return (list( train = train, test = test))
}
#Split into train, validate and test
split_root <- data_splitting(data,0.2)
train = split_root$train
test = split_root$test
split_10_fold <- data_splitting(train,0.1)
training = split_10_fold$train
validate =split_10_fold$test
#Build the decision_tree_model
############################## Section 2. Optimise the complexity factor parameter (cp) in each case ######################
####### IC
caret.control <- trainControl(method = "cv", number = 10, savePredictions = TRUE)
DT_IC <- train(IC ~ M + Mn + M_EN + Mr + LC_a + CD, data = train, method ="rpart",
trControl= caret.control)
####### EC
DT_EC <- train(EC ~ M + Mn + M_EN + Mr + LC_a + CD, data = train, method ="rpart",
trControl= caret.control)
DT_IC$bestTune
DT_EC$bestTune
################################ Section 3. IC model training ####################################
IC_train_data <- data.frame(0,0,0)
names(IC_train_data)<- c('Fold','train_predict_IC','train_experimental_IC')
results_IC_train_error <- data.frame (0,0)
names(results_IC_train_error) <- c('fold','RMSE_train')
for (fold in 1:10){
split_10_fold <- data_splitting(train,0.1)
training = split_10_fold$train
validate =split_10_fold$test
decision_tree_ic <-rpart(IC ~ M + Mn + M_EN + Mr + LC_a + CD,data = training, cp=0.03598739)
predict_IC_train<- predict(decision_tree,validate)
RMSE_train <- sqrt(mean((validate$IC-predict_IC_train)^2))
new_results_IC_train <- data.frame(fold,RMSE_train)
names(new_results_IC_train) <- c('fold','RMSE_train')
new_IC_train_data <- data.frame(fold,predict_IC_train,validate$IC)
names(new_IC_train_data) <- c('Fold','train_predict_IC','train_experimental_IC')
results_IC_train_error <- rbind(results_IC_train_error, new_results_IC_train)
IC_train_data<-rbind(IC_train_data,new_IC_train_data)
}
new_results_IC_train[-c(1),];
results_IC_train_error[-c(1),]
#Make prediction on the test side with respect to test set
predict <- predict(decision_tree,test)
predict
#Calculate the RMSQ on the prediction
RMSE <- sqrt(mean((predict-test$IC)^2))
RMSE
mean(new_results_IC_train$RMSE_train)
#Check for the variable importance
varImp(decision_tree)
### Combine the obv and predict variables
IC_test_data <- cbind(test$EC,predict)
IC_test_data
##### Save the results-file
saveRDS(decision_tree_ic,'DT-IC.RDS')
write.csv(IC_train_data,'DT-IC_TRAIN.csv')
write.csv(results_IC_train_error,'DT-IC-TRAIN-FOLD.csv')
write.csv(IC_test_data,'DT-IC-TEST.csv')
write.csv(varImp(decision_tree_ic),'DT-IC-VAR-IMPO.csv')
################################ Section 4. EC model training ####################################
EC_train_data <- data.frame(0,0,0)
names(EC_train_data)<- c('Fold','train_predict_EC','train_experimental_EC')
results_EC_train_error <- data.frame (0,0)
names(results_EC_train_error) <- c('fold','RMSE_train')
for (fold in 1:10){
split_10_fold <- data_splitting(train,0.1)
training = split_10_fold$train
validate =split_10_fold$test
decision_tree <-rpart(EC ~ M + Mn + M_EN + Mr + LC_a + CD,data = training, cp=0.06079231)
predict_EC_train<- predict(decision_tree,validate)
RMSE_train <- sqrt(mean((validate$EC-predict_EC_train)^2))
new_results_EC_train <- data.frame(fold,RMSE_train)
names(new_results_EC_train) <- c('fold','RMSE_train')
new_EC_train_data <- data.frame(fold,predict_EC_train,validate$EC)
names(new_EC_train_data) <- c('Fold','train_predict_EC','train_experimental_EC')
results_EC_train_error <- rbind(results_EC_train_error, new_results_EC_train)
EC_train_data<-rbind(EC_train_data,new_EC_train_data)
}
new_results_EC_train[-c(1),];
results_EC_train_error[-c(1),]
#View the training results
pre<-decision_tree_cv$result
#Summary the model complexity
summary(decision_tree_cv)
#Make prediction on the test side with respect to test set
predict <- predict(decision_tree,test)
predict
#Calculate the RMSQ on the prediction
RMSE <- sqrt(mean((predict-test$EC)^2))
RMSE
mean(new_results_EC_train$RMSE_train)
#Check for the variable importance
varImp(decision_tree)
### Combine the obv and predict variables
EC_test_data <- cbind(test$EC,predict)
EC_test_data
##### Save the results-file
saveRDS(decision_tree,'DT-EC.RDS')
write.csv(EC_train_data,'DT-EC_TRAIN.csv')
write.csv(results_EC_train_error,'DT-EC-TRAIN-FOLD.csv')
write.csv(EC_test_data,'DT-EC-TEST.csv')
write.csv(varImp(decision_tree),'DT-EC-VAR-IMPO.csv')
|
# ------------------------------------------------------------------------------
# Plot distribution of signals
# ------------------------------------------------------------------------------
rm(list = ls())
library(tidyverse)
#----- File names --------------------------------------------------------------
source("options_master_trans_serology.R")
fn <- list(
i = list( # input
c19 = data_file_path
# c19 = '../data/dbs_covid19.v2.RData'
),
o = list( # output
ut = "../results/distribution of MFIs of each binder.pdf"
)
)
# Check all exists
stopifnot(all(file.exists(c('.', unlist(fn$i)))),
all(file.exists(dirname(c('.', unlist(fn$o))))))
#----- MAIN --------------------------------------------------------------------
load(fn$i$c19)
stopifnot(identical(rownames(mfi), sinfo$id))
# dummy binder IDs to minimize any errors
colnames(mfi) <- binder$id <- paste0("b", seq(1, ncol(mfi)))
si_mfi <- mfi %>%
as_tibble() %>%
bind_cols(sinfo, .) %>%
filter(grepl("CBC\\+01", id)) %>%
filter(Nr_OK_spots > 0)
ctrl_mfi <- mfi %>%
as_tibble() %>%
bind_cols(sinfo, .) %>%
filter(grepl("Pos|Neg", Control))
## Plot
pdf(fn$o$ut)
for(ii in binder$id) {
is_IgA <- binder$detect_Ab[binder$id == ii] == "IgA"
p <- ggplot() +
aes_string(x = ii) +
scale_x_log10() +
xlab(
paste(
binder$detect_Ab[binder$id == ii],
"-",
binder$Binder[binder$id == ii]
)
) +
ylab("Density") +
geom_histogram(
aes(y= stat(density)/10, fill = Control),
data = ctrl_mfi,
bins = 30,
alpha = 0.4
) +
scale_fill_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
geom_density(
aes(color= if(is_IgA) plate_iga else plate),
data = si_mfi,
na.rm= T,
lwd = 2
) +
guides(
color = guide_legend(order = 1),
fill = guide_legend(
order = 2,
override.aes = list(
linetype = "blank"
)
)
) +
labs(
color= "Plate"
)
print(p)
}
dev.off()
| /plot-distribution_of_MFI.R | no_license | Schwenk-Lab/covid19-serology | R | false | false | 2,119 | r | # ------------------------------------------------------------------------------
# Plot distribution of signals
# ------------------------------------------------------------------------------
rm(list = ls())
library(tidyverse)
#----- File names --------------------------------------------------------------
source("options_master_trans_serology.R")
fn <- list(
i = list( # input
c19 = data_file_path
# c19 = '../data/dbs_covid19.v2.RData'
),
o = list( # output
ut = "../results/distribution of MFIs of each binder.pdf"
)
)
# Check all exists
stopifnot(all(file.exists(c('.', unlist(fn$i)))),
all(file.exists(dirname(c('.', unlist(fn$o))))))
#----- MAIN --------------------------------------------------------------------
load(fn$i$c19)
stopifnot(identical(rownames(mfi), sinfo$id))
# dummy binder IDs to minimize any errors
colnames(mfi) <- binder$id <- paste0("b", seq(1, ncol(mfi)))
si_mfi <- mfi %>%
as_tibble() %>%
bind_cols(sinfo, .) %>%
filter(grepl("CBC\\+01", id)) %>%
filter(Nr_OK_spots > 0)
ctrl_mfi <- mfi %>%
as_tibble() %>%
bind_cols(sinfo, .) %>%
filter(grepl("Pos|Neg", Control))
## Plot
pdf(fn$o$ut)
for(ii in binder$id) {
is_IgA <- binder$detect_Ab[binder$id == ii] == "IgA"
p <- ggplot() +
aes_string(x = ii) +
scale_x_log10() +
xlab(
paste(
binder$detect_Ab[binder$id == ii],
"-",
binder$Binder[binder$id == ii]
)
) +
ylab("Density") +
geom_histogram(
aes(y= stat(density)/10, fill = Control),
data = ctrl_mfi,
bins = 30,
alpha = 0.4
) +
scale_fill_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
geom_density(
aes(color= if(is_IgA) plate_iga else plate),
data = si_mfi,
na.rm= T,
lwd = 2
) +
guides(
color = guide_legend(order = 1),
fill = guide_legend(
order = 2,
override.aes = list(
linetype = "blank"
)
)
) +
labs(
color= "Plate"
)
print(p)
}
dev.off()
|
library(rerf)
library(data.table)
nTimes <- 1
num_trees <- 1
#ML <- c(32,48)
ML <- c(1)
dataset <- "temp"
algorithm <- "temp"
numCores <- 0
time <- 0
maxPercent <- .95
binSizePercent <- c(seq(.05,maxPercent,.05),1)
binSizePercent <- .00005
resultData <- data.frame(as.character(dataset), algorithm, numCores, time,time, stringsAsFactors=FALSE)
##############################
######### Now test with binning
#############################
for(algName in c("rfBase")){
####################################################
########## HIGGS1
####################################################
x <- as.matrix(fread(file="../../res/HIGGS.csv", header=FALSE, sep=","))
y <- as.integer(x[,1])
x <- as.matrix(x[, -1])
smp_size <- floor(0.80*nrow(x))
gc()
for (p in ML){
for(j in binSizePercent){
binSize <- j*nrow(x)
for (i in 1:nTimes){
train_ind <- sample(seq_len(nrow(x)),size=smp_size)
X <- x[train_ind,]
Y <- y[train_ind]
Xt <- x[-train_ind,]
Yt <- y[-train_ind]
gc()
ptm <- proc.time()
forest <- fpRerF(X =X, Y = Y, forestType=algName,minParent=1,numCores=p,numTreesInForest=num_trees,nodeSizeToBin=binSize, nodeSizeBin=binSize)
# forest <- RerF(X,Y, trees=num_trees, bagging=.3, min.parent=1, max.depth=0, store.oob=TRUE, stratify=TRUE, num.cores=p, seed=sample(1:100000,1))
predictions <- fpPredict(forest, Xt)
error <- sum(predictions==Yt)/length(Yt)
ptm_hold <- (proc.time() - ptm)[3]
resultData <- rbind(resultData, c("higgs 10M", "fastRF(Bin)",j, ptm_hold,error))
rm(forest)
}
}
}
}
resultData <- resultData[2:nrow(resultData),]
resultData[,1] <- as.factor(resultData[,1])
resultData[,2] <- as.factor(resultData[,2])
resultData[,3] <- as.numeric(resultData[,3])
resultData[,4] <- as.numeric(resultData[,4])
write.table(resultData, file="bench.csv", col.names=FALSE, row.names=FALSE, append=TRUE, sep=",", quote=FALSE)
| /IO_Chapter/test6b/testBigRF.R | no_license | jbrowne6/fpExperiments | R | false | false | 1,934 | r | library(rerf)
library(data.table)
nTimes <- 1
num_trees <- 1
#ML <- c(32,48)
ML <- c(1)
dataset <- "temp"
algorithm <- "temp"
numCores <- 0
time <- 0
maxPercent <- .95
binSizePercent <- c(seq(.05,maxPercent,.05),1)
binSizePercent <- .00005
resultData <- data.frame(as.character(dataset), algorithm, numCores, time,time, stringsAsFactors=FALSE)
##############################
######### Now test with binning
#############################
for(algName in c("rfBase")){
####################################################
########## HIGGS1
####################################################
x <- as.matrix(fread(file="../../res/HIGGS.csv", header=FALSE, sep=","))
y <- as.integer(x[,1])
x <- as.matrix(x[, -1])
smp_size <- floor(0.80*nrow(x))
gc()
for (p in ML){
for(j in binSizePercent){
binSize <- j*nrow(x)
for (i in 1:nTimes){
train_ind <- sample(seq_len(nrow(x)),size=smp_size)
X <- x[train_ind,]
Y <- y[train_ind]
Xt <- x[-train_ind,]
Yt <- y[-train_ind]
gc()
ptm <- proc.time()
forest <- fpRerF(X =X, Y = Y, forestType=algName,minParent=1,numCores=p,numTreesInForest=num_trees,nodeSizeToBin=binSize, nodeSizeBin=binSize)
# forest <- RerF(X,Y, trees=num_trees, bagging=.3, min.parent=1, max.depth=0, store.oob=TRUE, stratify=TRUE, num.cores=p, seed=sample(1:100000,1))
predictions <- fpPredict(forest, Xt)
error <- sum(predictions==Yt)/length(Yt)
ptm_hold <- (proc.time() - ptm)[3]
resultData <- rbind(resultData, c("higgs 10M", "fastRF(Bin)",j, ptm_hold,error))
rm(forest)
}
}
}
}
resultData <- resultData[2:nrow(resultData),]
resultData[,1] <- as.factor(resultData[,1])
resultData[,2] <- as.factor(resultData[,2])
resultData[,3] <- as.numeric(resultData[,3])
resultData[,4] <- as.numeric(resultData[,4])
write.table(resultData, file="bench.csv", col.names=FALSE, row.names=FALSE, append=TRUE, sep=",", quote=FALSE)
|
library(openintro)
data(COL)
#===> load in the data set from fdicHistograms <===#
load("../fdicHistograms/fdicHistograms.rda")
BR <- list()
MIDS <- br[-1]-0.25
BR[[1]] <- seq(110, 210, 10)
BR[[2]] <- seq(115, 210, 2.5)
COUNTS <- list()
for(i in 1:2){
COUNTS[[i]] <- rep(0, length(BR[[i]])-1)
for(j in 1:(length(BR[[i]])-1)){
these <- apply(cbind(MIDS < BR[[i]][j+1], MIDS >= BR[[i]][j]), 1, all)
if(any(these)){
COUNTS[[i]][j] <- sum(counts[these])
}
}
}
BR <- list()
MIDS <- br[-1]-0.25
BR[[1]] <- seq(110, 210, 10)
BR[[2]] <- seq(115, 210, 2.5)
COUNTS <- list()
for(i in 1:2){
COUNTS[[i]] <- rep(0, length(BR[[i]])-1)
for(j in 1:(length(BR[[i]])-1)){
these <- apply(cbind(MIDS < BR[[i]][j+1], MIDS >= BR[[i]][j]), 1, all)
if(any(these)){
COUNTS[[i]][j] <- sum(counts[these])
}
}
}
histTemp <- function(BR, COUNTS, col=fadeColor(COL[1], "10"), border=COL[1,4], probability=TRUE, xlab='', ylab=NULL, xlim=NULL, ylim=NULL, ...){
br <- BR
h <- COUNTS
if(probability){
h <- h/sum(h)/diff(br)
}
if(is.null(ylab)){
ylab <- 'frequency'
if(probability){
ylab <- 'probability'
}
}
if(is.null(xlim)[1]){
xR <- range(br)
xlim <- xR + c(-0.05, 0.05)*diff(xR)
}
if(is.null(ylim)[1]){
ylim <- range(c(0,h))
}
# cat()
plot(-1, -1, xlab=xlab, ylab=ylab, xlim=xlim, ylim=ylim, type='n', ...)
abline(h=0)
lines(c(br[1],br[1]), c(0,h[1]), col=border)
for(i in 1:length(h)){
if(i > 1){
if(h[i] > h[i-1]){
lines(rep(br[i],2), h[c(i-1,i)], col=border)
}
}
lines(br[i+0:1], rep(h[i],2), col=border)
lines(rep(br[i+1],2), c(0,h[i]), col=border)
rect(br[i], 0, br[i+1], h[i], col=col, border=border)
}
}
pdf('fdicHeightContDistFilled.pdf', 5.7, 2.75)
par(mfrow=c(1,1), mar=c(3, 1, 0.1, 1), mgp=c(1.8, 0.7, 0))
histTemp(BR[[2]], COUNTS[[2]], col=fadeColor(COL[1], "10"), border=COL[1,4], xlim=c(125, 210), axes=FALSE, xlab='height (cm)', ylab='', probability=TRUE)
axis(1)
lines(dens$x, dens$y, col=COL[1], lwd=2)
these <- dens$x > 180 & dens$x < 185
polygon(c(dens$x[these][1], dens$x[these], rev(dens$x[these])[1]), c(0, dens$y[these], 0), col=COL[1], border=COL[1])
sum(dens$y[these]*diff(dens$x[1:2]))
dev.off() | /Introduction to Probability and Statistics/02/figures/fdicHeightContDistFilled/fdicHeightContDistFilled.R | no_license | nishanmudalige/ebooks | R | false | false | 2,175 | r | library(openintro)
data(COL)
#===> load in the data set from fdicHistograms <===#
load("../fdicHistograms/fdicHistograms.rda")
BR <- list()
MIDS <- br[-1]-0.25
BR[[1]] <- seq(110, 210, 10)
BR[[2]] <- seq(115, 210, 2.5)
COUNTS <- list()
for(i in 1:2){
COUNTS[[i]] <- rep(0, length(BR[[i]])-1)
for(j in 1:(length(BR[[i]])-1)){
these <- apply(cbind(MIDS < BR[[i]][j+1], MIDS >= BR[[i]][j]), 1, all)
if(any(these)){
COUNTS[[i]][j] <- sum(counts[these])
}
}
}
BR <- list()
MIDS <- br[-1]-0.25
BR[[1]] <- seq(110, 210, 10)
BR[[2]] <- seq(115, 210, 2.5)
COUNTS <- list()
for(i in 1:2){
COUNTS[[i]] <- rep(0, length(BR[[i]])-1)
for(j in 1:(length(BR[[i]])-1)){
these <- apply(cbind(MIDS < BR[[i]][j+1], MIDS >= BR[[i]][j]), 1, all)
if(any(these)){
COUNTS[[i]][j] <- sum(counts[these])
}
}
}
histTemp <- function(BR, COUNTS, col=fadeColor(COL[1], "10"), border=COL[1,4], probability=TRUE, xlab='', ylab=NULL, xlim=NULL, ylim=NULL, ...){
br <- BR
h <- COUNTS
if(probability){
h <- h/sum(h)/diff(br)
}
if(is.null(ylab)){
ylab <- 'frequency'
if(probability){
ylab <- 'probability'
}
}
if(is.null(xlim)[1]){
xR <- range(br)
xlim <- xR + c(-0.05, 0.05)*diff(xR)
}
if(is.null(ylim)[1]){
ylim <- range(c(0,h))
}
# cat()
plot(-1, -1, xlab=xlab, ylab=ylab, xlim=xlim, ylim=ylim, type='n', ...)
abline(h=0)
lines(c(br[1],br[1]), c(0,h[1]), col=border)
for(i in 1:length(h)){
if(i > 1){
if(h[i] > h[i-1]){
lines(rep(br[i],2), h[c(i-1,i)], col=border)
}
}
lines(br[i+0:1], rep(h[i],2), col=border)
lines(rep(br[i+1],2), c(0,h[i]), col=border)
rect(br[i], 0, br[i+1], h[i], col=col, border=border)
}
}
pdf('fdicHeightContDistFilled.pdf', 5.7, 2.75)
par(mfrow=c(1,1), mar=c(3, 1, 0.1, 1), mgp=c(1.8, 0.7, 0))
histTemp(BR[[2]], COUNTS[[2]], col=fadeColor(COL[1], "10"), border=COL[1,4], xlim=c(125, 210), axes=FALSE, xlab='height (cm)', ylab='', probability=TRUE)
axis(1)
lines(dens$x, dens$y, col=COL[1], lwd=2)
these <- dens$x > 180 & dens$x < 185
polygon(c(dens$x[these][1], dens$x[these], rev(dens$x[these])[1]), c(0, dens$y[these], 0), col=COL[1], border=COL[1])
sum(dens$y[these]*diff(dens$x[1:2]))
dev.off() |
#remove zero columns
removeColsAllZeros = function(ddt) {
m <- as.matrix(ddt)
# isNumericColList <- lapply(1:ncol(m), function(ii,mm){is.numeric(mm[,ii])}, mm=m)
# indexNonNumericCols <- which(!unlist(isNumericColList))
mnz <- m[, colSums(abs(m),na.rm = TRUE) != 0]
return(mnz)
} | /common_funcs.R | no_license | zhangxz1123/Kaggle | R | false | false | 317 | r | #remove zero columns
removeColsAllZeros = function(ddt) {
m <- as.matrix(ddt)
# isNumericColList <- lapply(1:ncol(m), function(ii,mm){is.numeric(mm[,ii])}, mm=m)
# indexNonNumericCols <- which(!unlist(isNumericColList))
mnz <- m[, colSums(abs(m),na.rm = TRUE) != 0]
return(mnz)
} |
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.70676154919481e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615772056-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.70676154919481e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
rm(list = ls())
library(data.table)
library(Matrix)
library(xgboost)
library(caret)
library(dplyr)
cat("Read data")
df_train <- fread('/home/mustafa/Desktop/titanic/train.csv', sep=",", na.strings = "NA")
df_test <- fread("/home/mustafa/Desktop/titanic/test.csv" , sep=",", na.strings = "NA")
df_test %>% summarise_each(funs(sum(is.na(.))))
df_train %>% summarise_each(funs(sum(is.na(.))))
df_test[is.na(df_test$Age),"Age"] <- mean(df_test$Age, na.rm = TRUE)
df_train[is.na(df_train$Age),"Age"] <- mean(df_train$Age, na.rm = TRUE)
data = rbind(df_train,df_test,fill=T)
data$Title <- gsub('(.*, )|(\\..*)', '', data$Name)
rare_title <- c('Dona', 'Lady', 'the Countess','Capt', 'Col', 'Don',
'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer')
# Also reassign mlle, ms, and mme accordingly
data$Title[data$Title == 'Mlle'] <- 'Miss'
data$Title[data$Title == 'Ms'] <- 'Miss'
data$Title[data$Title == 'Mme'] <- 'Mrs'
data$Title[data$Title %in% rare_title] <- 'Rare Title'
data$Surname <- sapply(data$Name,
function(x) strsplit(x, split = '[,.]')[[1]][1])
# Create a family size variable including the passenger themselves
data$Fsize <- data$SibSp + data$Parch + 1
data$FsizeD[data$Fsize == 1] <- 'singleton'
data$FsizeD[data$Fsize < 5 & data$Fsize > 1] <- 'small'
data$FsizeD[data$Fsize > 4] <- 'large'
data$isAlone <- 0
data[data$Fsize == 1,"isAlone"] <- 1
data$Child[data$Age < 18] <- 1
data$Child[data$Age >= 18] <- 0
data$Mother <- 0
data$Mother[data$Sex == 'female' & data$Parch > 0 & data$Age > 18 & data$Title != 'Miss'] <- 1
data$Deck <- factor(sapply(data$Cabin, function(x) unlist(strsplit(x, NULL)[[1]][1])))
data <- data[,-c("Ticket","Name","Surname", "Cabin", "Deck")]
ohe_feats = c('Sex', 'Embarked', 'Title', 'FsizeD')
for (f in ohe_feats){
levels = unique(data[[f]])
data[[f]] = factor(data[[f]], level = levels)
}
# one-hot-encoding features
#data = as.data.frame(data)
#dummies = dummyVars(~ Survived, Pclass, Sex, SibSp ,Parch, Embarked, Title, FsizeD, isAlone, Fsize, data = data)
#df_all_ohe <- as.data.frame(predict(dummies, newdata = data))
#df_all_combined <- cbind(data[,-c(which(colnames(data) %in% ohe_feats))],df_all_ohe)
#data = as.data.table(df_all_combined)
train = data[data$PassengerId %in% df_train$PassengerId,]
y_train <- train[!is.na(Survived),Survived]
train = train[,Survived:=NULL]
train = train[,PassengerId:=NULL]
train_sparse <- data.matrix(train)
test = data[data$PassengerId %in% df_test$PassengerId,]
test_ids <- test[,PassengerId]
test[,Survived:=NULL]
test[,PassengerId:=NULL]
test_sparse <- data.matrix(test)
dtrain <- xgb.DMatrix(data=train_sparse, label=y_train)
dtest <- xgb.DMatrix(data=test_sparse);
gc()
# Params for xgboost
param <- list(booster = "gbtree",
eval_metric = "auc",
objective = "binary:logistic",
eta = .11,
gamma = 1,
max_depth = 6,
min_child_weight = 1,
subsample = .7,
colsample_bytree = .7)
rounds = 72
mpreds = data.table(id=test_ids)
for(random.seed.num in 1:10) {
print(paste("[", random.seed.num , "] training xgboost begin ",sep=""," : ",Sys.time()))
set.seed(random.seed.num)
xgb_model <- xgb.train(data = dtrain,
params = param,
watchlist = list(train = dtrain),
nrounds = rounds,
verbose = 1,
print_every_n = 5)
vpreds = predict(xgb_model,dtest)
mpreds = cbind(mpreds, vpreds)
colnames(mpreds)[random.seed.num+1] = paste("pred_seed_", random.seed.num, sep="")
}
mpreds_2 = mpreds[, id:= NULL]
mpreds_2 = mpreds_2[, y := rowMeans(.SD)]
mpreds_2[mpreds_2$y <= 0.5,"x"] <- 0
mpreds_2[mpreds_2$y > 0.5,"x"] <- 1
submission = data.table(PassengerId=test_ids, Survived=mpreds_2$x)
write.table(submission, "titanic_xgboost.csv", sep=",", dec=".", quote=FALSE, row.names=FALSE)
library(randomForest)
fit <- randomForest(as.factor(y_train) ~ .,
data=train,
importance=TRUE,
do.trace=50,
ntree=200)
pred_test <- predict(fit, test)
submission_rf = data.table(PassengerId=test_ids, Survived=pred_test)
write.table(submission_rf, "titanic_rf.csv", sep=",", dec=".", quote=FALSE, row.names=FALSE)
| /titanic.R | no_license | mustfkeskin/Titanic-Machine-Learning-from-Disaster | R | false | false | 4,409 | r | rm(list = ls())
library(data.table)
library(Matrix)
library(xgboost)
library(caret)
library(dplyr)
cat("Read data")
df_train <- fread('/home/mustafa/Desktop/titanic/train.csv', sep=",", na.strings = "NA")
df_test <- fread("/home/mustafa/Desktop/titanic/test.csv" , sep=",", na.strings = "NA")
df_test %>% summarise_each(funs(sum(is.na(.))))
df_train %>% summarise_each(funs(sum(is.na(.))))
df_test[is.na(df_test$Age),"Age"] <- mean(df_test$Age, na.rm = TRUE)
df_train[is.na(df_train$Age),"Age"] <- mean(df_train$Age, na.rm = TRUE)
data = rbind(df_train,df_test,fill=T)
data$Title <- gsub('(.*, )|(\\..*)', '', data$Name)
rare_title <- c('Dona', 'Lady', 'the Countess','Capt', 'Col', 'Don',
'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer')
# Also reassign mlle, ms, and mme accordingly
data$Title[data$Title == 'Mlle'] <- 'Miss'
data$Title[data$Title == 'Ms'] <- 'Miss'
data$Title[data$Title == 'Mme'] <- 'Mrs'
data$Title[data$Title %in% rare_title] <- 'Rare Title'
data$Surname <- sapply(data$Name,
function(x) strsplit(x, split = '[,.]')[[1]][1])
# Create a family size variable including the passenger themselves
data$Fsize <- data$SibSp + data$Parch + 1
data$FsizeD[data$Fsize == 1] <- 'singleton'
data$FsizeD[data$Fsize < 5 & data$Fsize > 1] <- 'small'
data$FsizeD[data$Fsize > 4] <- 'large'
data$isAlone <- 0
data[data$Fsize == 1,"isAlone"] <- 1
data$Child[data$Age < 18] <- 1
data$Child[data$Age >= 18] <- 0
data$Mother <- 0
data$Mother[data$Sex == 'female' & data$Parch > 0 & data$Age > 18 & data$Title != 'Miss'] <- 1
data$Deck <- factor(sapply(data$Cabin, function(x) unlist(strsplit(x, NULL)[[1]][1])))
data <- data[,-c("Ticket","Name","Surname", "Cabin", "Deck")]
ohe_feats = c('Sex', 'Embarked', 'Title', 'FsizeD')
for (f in ohe_feats){
levels = unique(data[[f]])
data[[f]] = factor(data[[f]], level = levels)
}
# one-hot-encoding features
#data = as.data.frame(data)
#dummies = dummyVars(~ Survived, Pclass, Sex, SibSp ,Parch, Embarked, Title, FsizeD, isAlone, Fsize, data = data)
#df_all_ohe <- as.data.frame(predict(dummies, newdata = data))
#df_all_combined <- cbind(data[,-c(which(colnames(data) %in% ohe_feats))],df_all_ohe)
#data = as.data.table(df_all_combined)
train = data[data$PassengerId %in% df_train$PassengerId,]
y_train <- train[!is.na(Survived),Survived]
train = train[,Survived:=NULL]
train = train[,PassengerId:=NULL]
train_sparse <- data.matrix(train)
test = data[data$PassengerId %in% df_test$PassengerId,]
test_ids <- test[,PassengerId]
test[,Survived:=NULL]
test[,PassengerId:=NULL]
test_sparse <- data.matrix(test)
dtrain <- xgb.DMatrix(data=train_sparse, label=y_train)
dtest <- xgb.DMatrix(data=test_sparse);
gc()
# Params for xgboost
param <- list(booster = "gbtree",
eval_metric = "auc",
objective = "binary:logistic",
eta = .11,
gamma = 1,
max_depth = 6,
min_child_weight = 1,
subsample = .7,
colsample_bytree = .7)
rounds = 72
mpreds = data.table(id=test_ids)
for(random.seed.num in 1:10) {
print(paste("[", random.seed.num , "] training xgboost begin ",sep=""," : ",Sys.time()))
set.seed(random.seed.num)
xgb_model <- xgb.train(data = dtrain,
params = param,
watchlist = list(train = dtrain),
nrounds = rounds,
verbose = 1,
print_every_n = 5)
vpreds = predict(xgb_model,dtest)
mpreds = cbind(mpreds, vpreds)
colnames(mpreds)[random.seed.num+1] = paste("pred_seed_", random.seed.num, sep="")
}
mpreds_2 = mpreds[, id:= NULL]
mpreds_2 = mpreds_2[, y := rowMeans(.SD)]
mpreds_2[mpreds_2$y <= 0.5,"x"] <- 0
mpreds_2[mpreds_2$y > 0.5,"x"] <- 1
submission = data.table(PassengerId=test_ids, Survived=mpreds_2$x)
write.table(submission, "titanic_xgboost.csv", sep=",", dec=".", quote=FALSE, row.names=FALSE)
library(randomForest)
fit <- randomForest(as.factor(y_train) ~ .,
data=train,
importance=TRUE,
do.trace=50,
ntree=200)
pred_test <- predict(fit, test)
submission_rf = data.table(PassengerId=test_ids, Survived=pred_test)
write.table(submission_rf, "titanic_rf.csv", sep=",", dec=".", quote=FALSE, row.names=FALSE)
|
library(readxl)
library(tidyverse)
library(sp)
library(rgdal)
library(rmarkdown)
tph_data<-read_excel("M:/OtoR/Phillips66/Projects/Greybull/ProjectDocuments/SiteCharterztn/Reports/OldVersions/DRAFT_2013_SiteChar/Refs-TechMemos/SmearZone/NewLNAPLBodyEval_GPworking.xlsx", sheet=2)
tph_data$`X Coordinate`<-as.numeric(tph_data$`X Coordinate`)
tph_data$`Y Coordinate`<-as.numeric(tph_data$`Y Coordinate`)
fgdb<-"M:/OtoR/Phillips66/Projects/Greybull/GIS/Projects/SiteCharacterization/Report_2014/Data/Greybull_SCR_Data.gdb"
subset(ogrDrivers(), grepl("GDB", name))
fc_list <- ogrListLayers(fgdb)
print(fc_list)
fc <- readOGR(dsn=fgdb,layer="Smear_Zone_Eval")
dirty<-fc$Location[fc$SZ_Presence_Analytical=="Dirty"]
tbl1<-as_tibble(tph_shp[tph_shp$`DRO+GRO`>250,c(1,4,17)])
tbl1$Date.Sampled<-as.Date(tbl1$Date.Sampled)
tph_shp<- SpatialPointsDataFrame(tph_data[!is.na(tph_data$`X Coordinate`),c("X Coordinate","Y Coordinate")],
data= tph_data[!is.na(tph_data$`X Coordinate`),],
proj4string = CRS("+init=EPSG:3737")) # wyoming state plane east central NAD83
plot(tph_shp)
writeOGR(tph_shp, dsn = getwd(), layer = "tph", driver = "ESRI Shapefile", overwrite_layer=TRUE)
tph_data$`Location ID`[tph_data$`DRO+GRO`>1000]
points(tph_shp[tph_shp$`DRO+GRO`>1000,], pch=19)
which(tph_shp$`Location ID` %Like% 'AOC5-BH6%')
tph_shp$`Location ID`[which(grepl('^AOC5-BH6',tph_shp$`Location ID`))]
points(tph_shp[c(28,29),],pch="X")
tph_shp$`Location ID`=='AOC5-BH9%'
max(tph_shp[tph_shp$`DRO+GRO`>1000,]$`X Coordinate`)-min(tph_shp[tph_shp$`DRO+GRO`>1000,]$`X Coordinate`)
max(tph_shp[tph_shp$`DRO+GRO`>1000,]$`Y Coordinate`)-min(tph_shp[tph_shp$`DRO+GRO`>1000,]$`Y Coordinate`)
# CONVERSION of TPH to LNAPL Saturation
## see http://naplansr.com/conversion-of-tph-to-napl-saturation-volume-2-issue-1-january-2012/
sn<-function (TPH, phi, rho, rhop) {
sn=TPH*(1-phi)*rhop*10^-6/(phi*rho)
return(sn)
}
TPH<-mean(tph_shp[tph_shp$`DRO+GRO`>250,]$`DRO+GRO`) # mean TPH in soil samples with total TPH above 1000, i.e., indicative of LNAPL
TPH<-quantile(tph_shp[tph_shp$`DRO+GRO`>1000,]$`DRO+GRO`,.9)
rhop<-2.65 # particle density
rho<-0.8 # LNAPL density
phi<-0.348 # porosity
sn(TPH,phi,rho,rhop) # average LNAPL saturation
quantile(tph_shp[tph_shp$`DRO+GRO`>1000,]$`X Coordinate`,.75)-quantile(tph_shp[tph_shp$`DRO+GRO`>1000,]$`X Coordinate`,0.25)
quantile(tph_shp[tph_shp$`DRO+GRO`>1000,]$`Y Coordinate`,.75)-quantile(tph_shp[tph_shp$`DRO+GRO`>1000,]$`Y Coordinate`,0.25)
library(knitr)
rmarkdown::render('Markdown.Rmd', output_file = "output.html")
save(tbl1,file="tbl1.Rda")
import<-read_csv("SECOR_AppendixE_data.csv")
| /TPH_LNAPL_Sat.R | no_license | tmolone1/Greybull | R | false | false | 2,696 | r | library(readxl)
library(tidyverse)
library(sp)
library(rgdal)
library(rmarkdown)
tph_data<-read_excel("M:/OtoR/Phillips66/Projects/Greybull/ProjectDocuments/SiteCharterztn/Reports/OldVersions/DRAFT_2013_SiteChar/Refs-TechMemos/SmearZone/NewLNAPLBodyEval_GPworking.xlsx", sheet=2)
tph_data$`X Coordinate`<-as.numeric(tph_data$`X Coordinate`)
tph_data$`Y Coordinate`<-as.numeric(tph_data$`Y Coordinate`)
fgdb<-"M:/OtoR/Phillips66/Projects/Greybull/GIS/Projects/SiteCharacterization/Report_2014/Data/Greybull_SCR_Data.gdb"
subset(ogrDrivers(), grepl("GDB", name))
fc_list <- ogrListLayers(fgdb)
print(fc_list)
fc <- readOGR(dsn=fgdb,layer="Smear_Zone_Eval")
dirty<-fc$Location[fc$SZ_Presence_Analytical=="Dirty"]
tbl1<-as_tibble(tph_shp[tph_shp$`DRO+GRO`>250,c(1,4,17)])
tbl1$Date.Sampled<-as.Date(tbl1$Date.Sampled)
tph_shp<- SpatialPointsDataFrame(tph_data[!is.na(tph_data$`X Coordinate`),c("X Coordinate","Y Coordinate")],
data= tph_data[!is.na(tph_data$`X Coordinate`),],
proj4string = CRS("+init=EPSG:3737")) # wyoming state plane east central NAD83
plot(tph_shp)
writeOGR(tph_shp, dsn = getwd(), layer = "tph", driver = "ESRI Shapefile", overwrite_layer=TRUE)
tph_data$`Location ID`[tph_data$`DRO+GRO`>1000]
points(tph_shp[tph_shp$`DRO+GRO`>1000,], pch=19)
which(tph_shp$`Location ID` %Like% 'AOC5-BH6%')
tph_shp$`Location ID`[which(grepl('^AOC5-BH6',tph_shp$`Location ID`))]
points(tph_shp[c(28,29),],pch="X")
tph_shp$`Location ID`=='AOC5-BH9%'
max(tph_shp[tph_shp$`DRO+GRO`>1000,]$`X Coordinate`)-min(tph_shp[tph_shp$`DRO+GRO`>1000,]$`X Coordinate`)
max(tph_shp[tph_shp$`DRO+GRO`>1000,]$`Y Coordinate`)-min(tph_shp[tph_shp$`DRO+GRO`>1000,]$`Y Coordinate`)
# CONVERSION of TPH to LNAPL Saturation
## see http://naplansr.com/conversion-of-tph-to-napl-saturation-volume-2-issue-1-january-2012/
sn<-function (TPH, phi, rho, rhop) {
sn=TPH*(1-phi)*rhop*10^-6/(phi*rho)
return(sn)
}
TPH<-mean(tph_shp[tph_shp$`DRO+GRO`>250,]$`DRO+GRO`) # mean TPH in soil samples with total TPH above 1000, i.e., indicative of LNAPL
TPH<-quantile(tph_shp[tph_shp$`DRO+GRO`>1000,]$`DRO+GRO`,.9)
rhop<-2.65 # particle density
rho<-0.8 # LNAPL density
phi<-0.348 # porosity
sn(TPH,phi,rho,rhop) # average LNAPL saturation
quantile(tph_shp[tph_shp$`DRO+GRO`>1000,]$`X Coordinate`,.75)-quantile(tph_shp[tph_shp$`DRO+GRO`>1000,]$`X Coordinate`,0.25)
quantile(tph_shp[tph_shp$`DRO+GRO`>1000,]$`Y Coordinate`,.75)-quantile(tph_shp[tph_shp$`DRO+GRO`>1000,]$`Y Coordinate`,0.25)
library(knitr)
rmarkdown::render('Markdown.Rmd', output_file = "output.html")
save(tbl1,file="tbl1.Rda")
import<-read_csv("SECOR_AppendixE_data.csv")
|
#' Ensure an object is a data frame, with rownames moved into a column
#'
#' @param x a data.frame or matrix
#' @param newnames new column names, not including the rownames
#' @param newcol the name of the new rownames column
#'
#' @return a data.frame, with rownames moved into a column and new column
#' names assigned
#'
#' @export
fix_data_frame <- function(x, newnames = NULL, newcol = "term") {
if (!is.null(newnames) && length(newnames) != ncol(x)) {
stop("newnames must be NULL or have length equal to number of columns")
}
if (all(rownames(x) == seq_len(nrow(x)))) {
# don't need to move rownames into a new column
ret <- data.frame(x, stringsAsFactors = FALSE)
if (!is.null(newnames)) {
colnames(ret) <- newnames
}
}
else {
ret <- data.frame(
...new.col... = rownames(x),
unrowname(x),
stringsAsFactors = FALSE
)
colnames(ret)[1] <- newcol
if (!is.null(newnames)) {
colnames(ret)[-1] <- newnames
}
}
unrowname(ret)
}
# strip rownames from a data frame
unrowname <- function(x) {
rownames(x) <- NULL
x
}
# remove NULL items in a vector or list
compact <- function(x) Filter(Negate(is.null), x)
#' insert a row of NAs into a data frame wherever another data frame has NAs
#'
#' @param x data frame that has one row for each non-NA row in original
#' @param original data frame with NAs
insert_NAs <- function(x, original) {
indices <- rep(NA, nrow(original))
indices[which(stats::complete.cases(original))] <- seq_len(nrow(x))
x[indices, ]
}
#' add fitted values, residuals, and other common outputs to
#' an augment call
#'
#' Add fitted values, residuals, and other common outputs to
#' the value returned from `augment`.
#'
#' In the case that a residuals or influence generic is not implemented for the
#' model, fail quietly.
#'
#' @param x a model
#' @param data original data onto which columns should be added
#' @param newdata new data to predict on, optional
#' @param type Type of prediction and residuals to compute
#' @param type.predict Type of prediction to compute; by default
#' same as `type`
#' @param type.residuals Type of residuals to compute; by default
#' same as `type`
#' @param se.fit Value to pass to predict's `se.fit`, or NULL for
#' no value
#' @param ... extra arguments (not used)
#'
#' @export
augment_columns <- function(x, data, newdata, type, type.predict = type,
type.residuals = type, se.fit = TRUE, ...) {
notNAs <- function(o) if (is.null(o) || all(is.na(o))) {
NULL
} else {
o
}
residuals0 <- purrr::possibly(stats::residuals, NULL)
influence0 <- purrr::possibly(stats::influence, NULL)
cooks.distance0 <- purrr::possibly(stats::cooks.distance, NULL)
rstandard0 <- purrr::possibly(stats::rstandard, NULL)
predict0 <- purrr::possibly(stats::predict, NULL)
# call predict with arguments
args <- list(x)
if (!missing(newdata)) {
args$newdata <- newdata
}
if (!missing(type.predict)) {
args$type <- type.predict
}
args$se.fit <- se.fit
args <- c(args, list(...))
if ("panelmodel" %in% class(x)) {
# work around for panel models (plm)
# stat::predict() returns wrong fitted values when applied to random or fixed effect panel models [plm(..., model="random"), plm(, ..., model="pooling")]
# It works only for pooled OLS models (plm( ..., model="pooling"))
pred <- model.frame(x)[, 1] - residuals(x)
} else {
# suppress warning: geeglm objects complain about predict being used
pred <- suppressWarnings(do.call(predict0, args))
}
if (is.null(pred)) {
# try "fitted" instead- some objects don't have "predict" method
pred <- do.call(stats::fitted, args)
}
if (is.list(pred)) {
ret <- data.frame(.fitted = pred$fit)
ret$.se.fit <- pred$se.fit
} else {
ret <- data.frame(.fitted = as.numeric(pred))
}
na_action <- if (isS4(x)) {
attr(stats::model.frame(x), "na.action")
} else {
stats::na.action(x)
}
if (missing(newdata) || is.null(newdata)) {
if (!missing(type.residuals)) {
ret$.resid <- residuals0(x, type = type.residuals)
} else {
ret$.resid <- residuals0(x)
}
infl <- influence0(x, do.coef = FALSE)
if (!is.null(infl)) {
if (is_mgcv(x)) {
ret$.hat <- infl
ret$.sigma <- NA
} else {
ret$.hat <- infl$hat
ret$.sigma <- infl$sigma
}
}
# if cooksd and rstandard can be computed and aren't all NA
# (as they are in rlm), do so
ret$.cooksd <- notNAs(cooks.distance0(x))
ret$.std.resid <- notNAs(rstandard0(x))
original <- data
if (class(na_action) == "exclude") {
# check if values are missing
if (length(stats::residuals(x)) > nrow(data)) {
warning(
"When fitting with na.exclude, rows with NA in ",
"original data will be dropped unless those rows are provided ",
"in 'data' argument"
)
}
}
} else {
original <- newdata
}
if (is.null(na_action) || nrow(original) == nrow(ret)) {
# no NAs were left out; we can simply recombine
original <- fix_data_frame(original, newcol = ".rownames")
return(unrowname(cbind(original, ret)))
} else if (class(na_action) == "omit") {
# if the option is "omit", drop those rows from the data
original <- fix_data_frame(original, newcol = ".rownames")
original <- original[-na_action, ]
return(unrowname(cbind(original, ret)))
}
# add .rownames column to merge the results with the original; resilent to NAs
ret$.rownames <- rownames(ret)
original$.rownames <- rownames(original)
ret <- merge(original, ret, by = ".rownames")
# reorder to line up with original
ret <- ret[order(match(ret$.rownames, rownames(original))), ]
rownames(ret) <- NULL
# if rownames are just the original 1...n, they can be removed
if (all(ret$.rownames == seq_along(ret$.rownames))) {
ret$.rownames <- NULL
}
ret
}
#' Add logLik, AIC, BIC, and other common measurements to a glance of
#' a prediction
#'
#' A helper function for several functions in the glance generic. Methods
#' such as logLik, AIC, and BIC are defined for many prediction
#' objects, such as lm, glm, and nls. This is a helper function that adds
#' them to a glance data.frame can be performed. If any of them cannot be
#' computed, it fails quietly.
#'
#' @details In one special case, deviance for objects of the
#' `lmerMod` class from lme4 is computed with
#' `deviance(x, REML=FALSE)`.
#'
#' @param ret a one-row data frame (a partially complete glance)
#' @param x the prediction model
#'
#' @return a one-row data frame with additional columns added, such as
#' \item{logLik}{log likelihoods}
#' \item{AIC}{Akaike Information Criterion}
#' \item{BIC}{Bayesian Information Criterion}
#' \item{deviance}{deviance}
#' \item{df.residual}{residual degrees of freedom}
#'
#' Each of these are produced by the corresponding generics
#'
#' @export
finish_glance <- function(ret, x) {
ret$logLik <- tryCatch(as.numeric(stats::logLik(x)), error = function(e) NULL)
ret$AIC <- tryCatch(stats::AIC(x), error = function(e) NULL)
ret$BIC <- tryCatch(stats::BIC(x), error = function(e) NULL)
# special case for REML objects (better way?)
if (inherits(x, "lmerMod")) {
ret$deviance <- tryCatch(stats::deviance(x, REML = FALSE),
error = function(e) NULL
)
} else {
ret$deviance <- tryCatch(stats::deviance(x), error = function(e) NULL)
}
ret$df.residual <- tryCatch(df.residual(x), error = function(e) NULL)
return(unrowname(ret))
}
#' Calculate confidence interval as a tidy data frame
#'
#' Return a confidence interval as a tidy data frame. This directly wraps the
#' [confint()] function, but ensures it follows broom conventions:
#' column names of `conf.low` and `conf.high`, and no row names
#'
#' @param x a model object for which [confint()] can be calculated
#' @param conf.level confidence level
#' @param func Function to use for computing confint
#' @param ... extra arguments passed on to `confint`
#'
#' @return A data frame with two columns: `conf.low` and `conf.high`.
#'
#' @seealso \link{confint}
#'
#' @export
confint_tidy <- function(x, conf.level = .95, func = stats::confint, ...) {
# avoid "Waiting for profiling to be done..." message for some models
ci <- suppressMessages(func(x, level = conf.level, ...))
if (is.null(dim(ci))) {
ci <- matrix(ci, nrow = 1)
}
# remove rows that are all NA. *not the same* as na.omit which checks
# for any NA.
all_na <- apply(ci, 1, function(x) all(is.na(x)))
ci <- ci[!all_na,, drop = FALSE]
colnames(ci) <- c("conf.low", "conf.high")
as_tibble(ci)
}
#' Expand a dataset to include all factorial combinations of one or more
#' variables
#'
#' This function is deprecated: use `tidyr::crossing` instead
#'
#' @param df a tbl
#' @param ... arguments
#' @param stringsAsFactors logical specifying if character vectors are
#' converted to factors.
#'
#' @return A tbl
#'
#' @import dplyr
#' @import tidyr
#'
#' @export
inflate <- function(df, ..., stringsAsFactors = FALSE) {
.Deprecated("tidyr::crossing")
ret <- expand.grid(..., stringsAsFactors = stringsAsFactors)
ret <- ret %>%
group_by_all() %>%
do(data = df) %>%
ungroup() %>%
tidyr::unnest(data)
if (!is.null(groups(df))) {
ret <- ret %>%
group_by_all()
}
ret
}
# utility function from tidyr::col_name
col_name <- function(x, default = stop("Please supply column name", call. = FALSE)) {
if (is.character(x)) {
return(x)
}
if (identical(x, quote(expr = ))) {
return(default)
}
if (is.name(x)) {
return(as.character(x))
}
if (is.null(x)) {
return(x)
}
stop("Invalid column specification", call. = FALSE)
}
| /R/utilities.R | no_license | josue-rodriguez/broom | R | false | false | 9,814 | r | #' Ensure an object is a data frame, with rownames moved into a column
#'
#' @param x a data.frame or matrix
#' @param newnames new column names, not including the rownames
#' @param newcol the name of the new rownames column
#'
#' @return a data.frame, with rownames moved into a column and new column
#' names assigned
#'
#' @export
fix_data_frame <- function(x, newnames = NULL, newcol = "term") {
if (!is.null(newnames) && length(newnames) != ncol(x)) {
stop("newnames must be NULL or have length equal to number of columns")
}
if (all(rownames(x) == seq_len(nrow(x)))) {
# don't need to move rownames into a new column
ret <- data.frame(x, stringsAsFactors = FALSE)
if (!is.null(newnames)) {
colnames(ret) <- newnames
}
}
else {
ret <- data.frame(
...new.col... = rownames(x),
unrowname(x),
stringsAsFactors = FALSE
)
colnames(ret)[1] <- newcol
if (!is.null(newnames)) {
colnames(ret)[-1] <- newnames
}
}
unrowname(ret)
}
# strip rownames from a data frame
unrowname <- function(x) {
rownames(x) <- NULL
x
}
# remove NULL items in a vector or list
compact <- function(x) Filter(Negate(is.null), x)
#' insert a row of NAs into a data frame wherever another data frame has NAs
#'
#' @param x data frame that has one row for each non-NA row in original
#' @param original data frame with NAs
insert_NAs <- function(x, original) {
indices <- rep(NA, nrow(original))
indices[which(stats::complete.cases(original))] <- seq_len(nrow(x))
x[indices, ]
}
#' add fitted values, residuals, and other common outputs to
#' an augment call
#'
#' Add fitted values, residuals, and other common outputs to
#' the value returned from `augment`.
#'
#' In the case that a residuals or influence generic is not implemented for the
#' model, fail quietly.
#'
#' @param x a model
#' @param data original data onto which columns should be added
#' @param newdata new data to predict on, optional
#' @param type Type of prediction and residuals to compute
#' @param type.predict Type of prediction to compute; by default
#' same as `type`
#' @param type.residuals Type of residuals to compute; by default
#' same as `type`
#' @param se.fit Value to pass to predict's `se.fit`, or NULL for
#' no value
#' @param ... extra arguments (not used)
#'
#' @export
augment_columns <- function(x, data, newdata, type, type.predict = type,
type.residuals = type, se.fit = TRUE, ...) {
notNAs <- function(o) if (is.null(o) || all(is.na(o))) {
NULL
} else {
o
}
residuals0 <- purrr::possibly(stats::residuals, NULL)
influence0 <- purrr::possibly(stats::influence, NULL)
cooks.distance0 <- purrr::possibly(stats::cooks.distance, NULL)
rstandard0 <- purrr::possibly(stats::rstandard, NULL)
predict0 <- purrr::possibly(stats::predict, NULL)
# call predict with arguments
args <- list(x)
if (!missing(newdata)) {
args$newdata <- newdata
}
if (!missing(type.predict)) {
args$type <- type.predict
}
args$se.fit <- se.fit
args <- c(args, list(...))
if ("panelmodel" %in% class(x)) {
# work around for panel models (plm)
# stat::predict() returns wrong fitted values when applied to random or fixed effect panel models [plm(..., model="random"), plm(, ..., model="pooling")]
# It works only for pooled OLS models (plm( ..., model="pooling"))
pred <- model.frame(x)[, 1] - residuals(x)
} else {
# suppress warning: geeglm objects complain about predict being used
pred <- suppressWarnings(do.call(predict0, args))
}
if (is.null(pred)) {
# try "fitted" instead- some objects don't have "predict" method
pred <- do.call(stats::fitted, args)
}
if (is.list(pred)) {
ret <- data.frame(.fitted = pred$fit)
ret$.se.fit <- pred$se.fit
} else {
ret <- data.frame(.fitted = as.numeric(pred))
}
na_action <- if (isS4(x)) {
attr(stats::model.frame(x), "na.action")
} else {
stats::na.action(x)
}
if (missing(newdata) || is.null(newdata)) {
if (!missing(type.residuals)) {
ret$.resid <- residuals0(x, type = type.residuals)
} else {
ret$.resid <- residuals0(x)
}
infl <- influence0(x, do.coef = FALSE)
if (!is.null(infl)) {
if (is_mgcv(x)) {
ret$.hat <- infl
ret$.sigma <- NA
} else {
ret$.hat <- infl$hat
ret$.sigma <- infl$sigma
}
}
# if cooksd and rstandard can be computed and aren't all NA
# (as they are in rlm), do so
ret$.cooksd <- notNAs(cooks.distance0(x))
ret$.std.resid <- notNAs(rstandard0(x))
original <- data
if (class(na_action) == "exclude") {
# check if values are missing
if (length(stats::residuals(x)) > nrow(data)) {
warning(
"When fitting with na.exclude, rows with NA in ",
"original data will be dropped unless those rows are provided ",
"in 'data' argument"
)
}
}
} else {
original <- newdata
}
if (is.null(na_action) || nrow(original) == nrow(ret)) {
# no NAs were left out; we can simply recombine
original <- fix_data_frame(original, newcol = ".rownames")
return(unrowname(cbind(original, ret)))
} else if (class(na_action) == "omit") {
# if the option is "omit", drop those rows from the data
original <- fix_data_frame(original, newcol = ".rownames")
original <- original[-na_action, ]
return(unrowname(cbind(original, ret)))
}
# add .rownames column to merge the results with the original; resilent to NAs
ret$.rownames <- rownames(ret)
original$.rownames <- rownames(original)
ret <- merge(original, ret, by = ".rownames")
# reorder to line up with original
ret <- ret[order(match(ret$.rownames, rownames(original))), ]
rownames(ret) <- NULL
# if rownames are just the original 1...n, they can be removed
if (all(ret$.rownames == seq_along(ret$.rownames))) {
ret$.rownames <- NULL
}
ret
}
#' Add logLik, AIC, BIC, and other common measurements to a glance of
#' a prediction
#'
#' A helper function for several functions in the glance generic. Methods
#' such as logLik, AIC, and BIC are defined for many prediction
#' objects, such as lm, glm, and nls. This is a helper function that adds
#' them to a glance data.frame can be performed. If any of them cannot be
#' computed, it fails quietly.
#'
#' @details In one special case, deviance for objects of the
#' `lmerMod` class from lme4 is computed with
#' `deviance(x, REML=FALSE)`.
#'
#' @param ret a one-row data frame (a partially complete glance)
#' @param x the prediction model
#'
#' @return a one-row data frame with additional columns added, such as
#' \item{logLik}{log likelihoods}
#' \item{AIC}{Akaike Information Criterion}
#' \item{BIC}{Bayesian Information Criterion}
#' \item{deviance}{deviance}
#' \item{df.residual}{residual degrees of freedom}
#'
#' Each of these are produced by the corresponding generics
#'
#' @export
finish_glance <- function(ret, x) {
ret$logLik <- tryCatch(as.numeric(stats::logLik(x)), error = function(e) NULL)
ret$AIC <- tryCatch(stats::AIC(x), error = function(e) NULL)
ret$BIC <- tryCatch(stats::BIC(x), error = function(e) NULL)
# special case for REML objects (better way?)
if (inherits(x, "lmerMod")) {
ret$deviance <- tryCatch(stats::deviance(x, REML = FALSE),
error = function(e) NULL
)
} else {
ret$deviance <- tryCatch(stats::deviance(x), error = function(e) NULL)
}
ret$df.residual <- tryCatch(df.residual(x), error = function(e) NULL)
return(unrowname(ret))
}
#' Calculate confidence interval as a tidy data frame
#'
#' Return a confidence interval as a tidy data frame. This directly wraps the
#' [confint()] function, but ensures it follows broom conventions:
#' column names of `conf.low` and `conf.high`, and no row names
#'
#' @param x a model object for which [confint()] can be calculated
#' @param conf.level confidence level
#' @param func Function to use for computing confint
#' @param ... extra arguments passed on to `confint`
#'
#' @return A data frame with two columns: `conf.low` and `conf.high`.
#'
#' @seealso \link{confint}
#'
#' @export
confint_tidy <- function(x, conf.level = .95, func = stats::confint, ...) {
# avoid "Waiting for profiling to be done..." message for some models
ci <- suppressMessages(func(x, level = conf.level, ...))
if (is.null(dim(ci))) {
ci <- matrix(ci, nrow = 1)
}
# remove rows that are all NA. *not the same* as na.omit which checks
# for any NA.
all_na <- apply(ci, 1, function(x) all(is.na(x)))
ci <- ci[!all_na,, drop = FALSE]
colnames(ci) <- c("conf.low", "conf.high")
as_tibble(ci)
}
#' Expand a dataset to include all factorial combinations of one or more
#' variables
#'
#' This function is deprecated: use `tidyr::crossing` instead
#'
#' @param df a tbl
#' @param ... arguments
#' @param stringsAsFactors logical specifying if character vectors are
#' converted to factors.
#'
#' @return A tbl
#'
#' @import dplyr
#' @import tidyr
#'
#' @export
inflate <- function(df, ..., stringsAsFactors = FALSE) {
.Deprecated("tidyr::crossing")
ret <- expand.grid(..., stringsAsFactors = stringsAsFactors)
ret <- ret %>%
group_by_all() %>%
do(data = df) %>%
ungroup() %>%
tidyr::unnest(data)
if (!is.null(groups(df))) {
ret <- ret %>%
group_by_all()
}
ret
}
# utility function from tidyr::col_name
col_name <- function(x, default = stop("Please supply column name", call. = FALSE)) {
if (is.character(x)) {
return(x)
}
if (identical(x, quote(expr = ))) {
return(default)
}
if (is.name(x)) {
return(as.character(x))
}
if (is.null(x)) {
return(x)
}
stop("Invalid column specification", call. = FALSE)
}
|
################################
# Project Data Cleaning 3rd week
################################
## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
### Load the required Libraries
if (!require("data.table")) {
install.packages("data.table")
}
if (!require("reshape2")) {
install.packages("reshape2")
}
library(data.table)
require(reshape2)
## 1. Merges the training and the test sets to create one data set.
# Load: activity labels
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
# Load: data column names
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
## 2.1 Extracts only the measurements on the mean and standard deviation for each measurement.
# Extract the measurements on the mean and standard deviation
extract_features <- grepl("mean|std", features)
# Load and process the x_test and y_test data.
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
names(x_test) = features
# Extract only the measurements on the mean and standard deviation
x_test = x_test[,extract_features]
# Load the activity labels
y_test[,2] = activity_labels[y_test[,1]]
names(y_test) = c("Activity_ID", "Activity_Label")
names(subject_test) = "subject"
# Join the data
test_data <- cbind(as.data.table(subject_test), y_test, x_test)
# Load and process x_train and y_train data.
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
names(x_train) = features
## 2.2 Extracts only the measurements on the mean and standard deviation for each measurement.
# Extract the measurements on the mean and standard deviation
x_train = x_train[,extract_features]
# Load activity data
y_train[,2] = activity_labels[y_train[,1]]
names(y_train) = c("Activity_ID", "Activity_Label")
names(subject_train) = "subject"
# Join data
train_data <- cbind(as.data.table(subject_train), y_train, x_train)
# Merge the test and train data
data = rbind(test_data, train_data)
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
id_labels = c("subject", "Activity_ID", "Activity_Label")
data_labels = setdiff(colnames(data), id_labels)
melt_data = melt(data, id = id_labels, measure.vars = data_labels)
# Apply mean function to dataset using dcast function
tidy_data = dcast(melt_data, subject + Activity_Label ~ variable, mean)
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
write.table(tidy_data, file = "./tidy_data.txt")
| /run_analysis.R | no_license | michelem999/GettingCleaningData | R | false | false | 3,242 | r |
################################
# Project Data Cleaning 3rd week
################################
## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
### Load the required Libraries
if (!require("data.table")) {
install.packages("data.table")
}
if (!require("reshape2")) {
install.packages("reshape2")
}
library(data.table)
require(reshape2)
## 1. Merges the training and the test sets to create one data set.
# Load: activity labels
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
# Load: data column names
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
## 2.1 Extracts only the measurements on the mean and standard deviation for each measurement.
# Extract the measurements on the mean and standard deviation
extract_features <- grepl("mean|std", features)
# Load and process the x_test and y_test data.
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
names(x_test) = features
# Extract only the measurements on the mean and standard deviation
x_test = x_test[,extract_features]
# Load the activity labels
y_test[,2] = activity_labels[y_test[,1]]
names(y_test) = c("Activity_ID", "Activity_Label")
names(subject_test) = "subject"
# Join the data
test_data <- cbind(as.data.table(subject_test), y_test, x_test)
# Load and process x_train and y_train data.
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
names(x_train) = features
## 2.2 Extracts only the measurements on the mean and standard deviation for each measurement.
# Extract the measurements on the mean and standard deviation
x_train = x_train[,extract_features]
# Load activity data
y_train[,2] = activity_labels[y_train[,1]]
names(y_train) = c("Activity_ID", "Activity_Label")
names(subject_train) = "subject"
# Join data
train_data <- cbind(as.data.table(subject_train), y_train, x_train)
# Merge the test and train data
data = rbind(test_data, train_data)
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
id_labels = c("subject", "Activity_ID", "Activity_Label")
data_labels = setdiff(colnames(data), id_labels)
melt_data = melt(data, id = id_labels, measure.vars = data_labels)
# Apply mean function to dataset using dcast function
tidy_data = dcast(melt_data, subject + Activity_Label ~ variable, mean)
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
write.table(tidy_data, file = "./tidy_data.txt")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rrepast-engine.R
\name{createOutputDir}
\alias{createOutputDir}
\title{Create output directory}
\usage{
createOutputDir()
}
\description{
A simple function to make a directory to save the
model's data.
}
\details{
Create the, if required, the directory to save the
output data generate by the model. It is intended for internal
use.
}
| /man/createOutputDir.Rd | permissive | antonio-pgarcia/RRepast | R | false | true | 413 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rrepast-engine.R
\name{createOutputDir}
\alias{createOutputDir}
\title{Create output directory}
\usage{
createOutputDir()
}
\description{
A simple function to make a directory to save the
model's data.
}
\details{
Create the, if required, the directory to save the
output data generate by the model. It is intended for internal
use.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{as.num}
\alias{as.num}
\title{Convert safely to numeric}
\usage{
as.num(x)
}
\arguments{
\item{x}{value}
}
\description{
Convert safely to numeric
}
| /man/as.num.Rd | no_license | ronkeizer/PKPDplot | R | false | true | 240 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{as.num}
\alias{as.num}
\title{Convert safely to numeric}
\usage{
as.num(x)
}
\arguments{
\item{x}{value}
}
\description{
Convert safely to numeric
}
|
#' @title Get Tiles from Open Map Servers
#' @name getTiles
#' @description Get map tiles based on a spatial object extent. Maps can be
#' fetched from various open map servers.
#' @param x an sf object, a simple feature collection or a Spatial*DataFrame.
#' @param spdf deprecated, a Spatial*DataFrame with a valid projection attribute.
#' @param type the tile server from which to get the map. See Details for providers.
#' For other sources use a list: type = list(src = "name of the source" ,
#' q = "tiles address", sub = "subdomains", cit = "how to cite the tiles"). See Examples.
#' @param zoom the zoom level. If null, it is determined automatically
#' (see Details).
#' @param crop TRUE if results should be cropped to the specified x extent,
#' FALSE otherwise. If x is an sf object with one POINT, crop is set to FALSE.
#' @param verbose if TRUE, tiles filepaths, zoom level and citation are displayed.
#' @param apikey Needed for Thunderforest maps.
#' @param cachedir name of a directory used to cache tiles. If TRUE, places a
#' 'tile.cache' folder in the working directory. If FALSE, tiles are not cached.
#' @param forceDownload if TRUE, cached tiles are downloaded again.
#' @details
#' Zoom levels are described on the OpenStreetMap wiki:
#' \url{https://wiki.openstreetmap.org/wiki/Zoom_levels}. \cr\cr
#' Full list of providers:
#' \tabular{lll}{
#' 'OpenStreetMap' (or 'osm') \tab 'Stamen' (or 'stamenbw') \tab 'Esri' \cr
#' 'OpenStreetMap.DE' \tab 'Stamen.Toner' \tab 'Esri.WorldStreetMap'\cr
#' 'OpenStreetMap.France' \tab 'Stamen.TonerBackground' \tab 'Esri.DeLorme'\cr
#' 'OpenStreetMap.HOT' (or 'hotstyle') \tab 'Stamen.TonerHybrid' \tab 'Esri.WorldTopoMap'\cr
#' \tab 'Stamen.TonerLines' \tab 'Esri.WorldImagery'\cr
#' 'OpenMapSurfer' \tab 'Stamen.TonerLabels' \tab 'Esri.WorldTerrain'\cr
#' 'OpenMapSurfer.Roads' \tab 'Stamen.TonerLite' \tab 'Esri.WorldShadedRelief'\cr
#' 'OpenMapSurfer.Hybrid' \tab 'Stamen.Watercolor' (or 'stamenwatercolor') \tab 'Esri.OceanBasemap'\cr
#' 'OpenMapSurfer.AdminBounds' \tab 'Stamen.Terrain' \tab 'Esri.NatGeoWorldMap'\cr
#' 'OpenMapSurfer.ElementsAtRisk' \tab 'Stamen.TerrainBackground' \tab 'Esri.WorldGrayCanvas'\cr
#' \tab 'Stamen.TerrainLabels' \tab \cr
#' 'CartoDB' \tab \tab 'Hydda'\cr
#' 'CartoDB.Positron' (or 'cartolight') \tab 'Thunderforest' \tab 'Hydda.Full'\cr
#' 'CartoDB.PositronNoLabels' \tab 'Thunderforest.OpenCycleMap' \tab 'Hydda.Base'\cr
#' 'CartoDB.PositronOnlyLabels' \tab 'Thunderforest.Transport' \tab 'Hydda.RoadsAndLabels'\cr
#' 'CartoDB.DarkMatter' (or 'cartodark') \tab 'Thunderforest.TransportDark' \tab \cr
#' 'CartoDB.DarkMatterNoLabels' \tab 'Thunderforest.SpinalMap' \tab 'HikeBike' (or 'hikebike')\cr
#' 'CartoDB.DarkMatterOnlyLabels' \tab 'Thunderforest.Landscape' \tab 'HikeBike.HikeBike'\cr
#' 'CartoDB.Voyager' \tab 'Thunderforest.Outdoors' \tab \cr
#' 'CartoDB.VoyagerNoLabels' \tab 'Thunderforest.Pioneer' \tab 'OpenTopoMap' (or 'opentopomap') \cr
#' 'CartoDB.VoyagerOnlyLabels' \tab 'Thunderforest.MobileAtlas' \tab 'Wikimedia'\cr
#' 'CartoDB.VoyagerLabelsUnder' \tab 'Thunderforest.Neighbourhood' \tab 'OpenStreetMap.MapnikBW' (or 'osmgrayscale')\cr
#' }
#' @references \url{https://leaflet-extras.github.io/leaflet-providers/preview/}
#' @export
#' @return A RasterBrick is returned.
#' @seealso \link{tilesLayer}
#' @examples
#' \dontrun{
#' library(sf)
#' mtq <- st_read(system.file("gpkg/mtq.gpkg", package="cartography"))
#' # Download the tiles, extent = Martinique
#' mtqOSM <- getTiles(x = mtq, type = "osm", crop = TRUE)
#' # Plot the tiles
#' tilesLayer(mtqOSM)
#' # Plot countries
#' plot(st_geometry(mtq), add=TRUE)
#' txt <- paste0("\u00A9 OpenStreetMap contributors.",
#' " Tiles style under CC BY-SA, www.openstreetmap.org/copyright")
#' mtext(text = txt, side = 1, adj = 0, cex = 0.7, font = 3)
#'
#' # Download esri tiles
#' fullserver = paste("https://server.arcgisonline.com/ArcGIS/rest/services",
#' "Specialty/DeLorme_World_Base_Map/MapServer",
#' "tile/{z}/{y}/{x}.jpg",
#' sep = "/"
#' )
#' typeosm <- list(
#' src = 'esri',
#' q = fullserver,
#' sub = NA,
#' cit = 'Tiles; Esri; Copyright: 2012 DeLorme'
#' )
#' mtqESRI <- getTiles(x = mtq, type = typeosm, crop = TRUE, verbose = T, zoom = 10)
#' # Plot the tiles
#' tilesLayer(mtqESRI)
#' txt <- typeosm$cit
#' mtext(text = txt, side = 1, adj = 0, cex = 0.6, font = 3)
#' }
getTiles <- function(x, spdf, type = "OpenStreetMap", zoom = NULL, crop = FALSE,
verbose = FALSE, apikey=NA, cachedir=FALSE, forceDownload=FALSE){
# deprecated check
if(!missing(spdf)){
warning("spdf is deprecated; use x instead.", call. = FALSE)
x <- spdf
}
# test for sp
if(methods::is(x,"Spatial") == TRUE){
x <- convertToSf(spdf = x)
}
# test for single point (apply buffer to obtain a correct bbox)
if(nrow(x)==1 && sf::st_is(x, "POINT")){
xt <- sf::st_transform(x, 3857)
sf::st_geometry(xt) <- sf::st_buffer(sf::st_geometry(xt), 1000)
crop <- FALSE
# use x bbox to select the tiles to get
bbx <- sf::st_bbox(sf::st_transform(sf::st_as_sfc(sf::st_bbox(xt)), 4326))
}else{
# use x bbox to select the tiles to get
bbx <- sf::st_bbox(sf::st_transform(sf::st_as_sfc(sf::st_bbox(x)), 4326))
}
# select a default zoom level
if(is.null(zoom)){
gz <- slippymath::bbox_tile_query(bbx)
zoom <- min(gz[gz$total_tiles %in% 4:10,"zoom"])
}
# get tile list
tile_grid <- slippymath::bbox_to_tile_grid(bbox = bbx, zoom = zoom)
# get query parameters according to type
param <- get_param(type)
# subdomains management
tile_grid$tiles$s <- sample(param$sub, nrow(tile_grid$tiles), replace = T)
# src mgmnt
tile_grid$src <- param$src
# query mgmnt
tile_grid$q <- sub("XXXXXX",apikey,param$q)
# citation
tile_grid$cit <- param$cit
# extension management
if (length(grep("jpg",param$q))>0){
ext="jpg"
} else if (length(grep("png",param$q))>0){
ext="png"
}
tile_grid$ext<-ext
#tile_grid$ext <- substr(param$q, nchar(param$q)-2, nchar(param$q))
# download images
images <- get_tiles(tile_grid, verbose, cachedir, forceDownload)
# compose images
rout <- compose_tile_grid(tile_grid, images)
# reproject rout
rout <- raster::projectRaster(from = rout, crs = sf::st_crs(x)$proj4string)
rout <- raster::clamp(rout,lower = 0, upper = 255, useValues = TRUE)
# crop management
if(crop == TRUE){
cb <- sf::st_bbox(x)
k <- min(c(0.052 * (cb[4] - cb[2]), 0.052 * (cb[3] - cb[1])))
cb <- cb + c(-k, -k, k, k)
rout <- raster::crop(rout,cb[c(1,3,2,4)])
}
rout
}
# get the tiles according to the grid
get_tiles <- function(tile_grid, verbose, cachedir, forceDownload) {
# go through tile_grid tiles and download
images <- apply(
X = tile_grid$tiles,
MARGIN = 1,
FUN = dl_t,
z = tile_grid$zoom,
ext = tile_grid$ext,
src = tile_grid$src,
q = tile_grid$q,
verbose = verbose,
cachedir = cachedir,
forceDownload = forceDownload
)
if (verbose) {
message("Zoom:", tile_grid$zoom, "\nData and map tiles sources:\n",
tile_grid$cit)
}
images
}
# download tile according to parameters
dl_t <- function(x, z, ext, src, q, verbose, cachedir, forceDownload) {
# forceDownload will overwrite any files existing in cache
if(!is.logical(forceDownload)) stop("forceDownload must be TRUE or FALSE")
# if cachedir==F, save to temporary filepath
if(cachedir == FALSE) {
cachedir <- tempdir()
} else {
# if cachedir==T, place in working directory
if(cachedir == TRUE) cachedir <- paste0(getwd(),'/tile.cache')
#create the cachedir if it doesn't exist.
if(!dir.exists(cachedir)) dir.create(cachedir)
# uses subdirectories based on src to make the directory easier for users to navigate
subdir <- paste0(cachedir,"/",src)
if(!dir.exists(subdir)) dir.create(subdir)
cachedir <- subdir
}
outfile <- paste0(cachedir, "/", src, "_", z, "_", x[1], "_", x[2],".", ext)
if (!file.exists(outfile) | isTRUE(forceDownload)) {
q <- gsub(pattern = '{s}', replacement = x[3], x = q, fixed = TRUE)
q <- gsub(pattern = '{x}', replacement = x[1], x = q, fixed = TRUE)
q <- gsub(pattern = '{y}', replacement = x[2], x = q, fixed = TRUE)
q <- gsub(pattern = '{z}', replacement = z, x = q, fixed = TRUE)
if (verbose) {
message(q, " => ", outfile)
}
curl::curl_download(url = q, destfile = outfile)
}
outfile
}
# compose tiles
compose_tile_grid <- function (tile_grid, images){
bricks = vector("list", nrow(tile_grid$tiles))
for (i in seq_along(bricks)){
bbox <- slippymath::tile_bbox(tile_grid$tiles$x[i], tile_grid$tiles$y[i],
tile_grid$zoom)
img <- images[i]
# special for png tiles
if (tile_grid$ext=="png"){
img <- png::readPNG(img)*255
}
# compose brick raster
r_img <- raster::brick(img, crs = sf::st_crs(3857)$proj4string)
raster::extent(r_img) <- raster::extent(bbox[c("xmin", "xmax",
"ymin", "ymax")])
bricks[[i]] <- r_img
}
# if only one tile is needed
if(length(bricks)==1){
return(bricks[[1]])
}
# all tiles together
rout <- do.call(raster::merge, bricks)
rout
}
# providers parameters
get_param <- function(type) {
if (length(type) == 4) {
param <- type
} else{
param <- switch(
type,
osm = get_param('OpenStreetMap'),
hotstyle = get_param('OpenStreetMap.HOT'),
hikebike = get_param('HikeBike'),
osmgrayscale = get_param('OpenStreetMap.MapnikBW'),
stamenbw = get_param('Stamen'),
stamenwatercolor = get_param('Stamen.Watercolor'),
cartodark = get_param('CartoDB.DarkMatter'),
cartolight = get_param('CartoDB.Positron'),
opentopomap = get_param('OpenTopoMap'),
OpenStreetMap.MapnikBW = list(
src = "osmgrayscale",
q = "https://tiles.wmflabs.org/bw-mapnik/{z}/{x}/{y}.png" ,
sub = NA,
cit = "\u00A9 OpenStreetMap contributors. Tiles style under CC BY-SA, www.openstreetmap.org/copyright."
),
OpenStreetMap = list(
src = "OpenStreetMap",
q = 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
sub = c("a", "b", "c"),
cit = "\u00A9 OpenStreetMap contributors"
),
OpenStreetMap.DE = list(
src = "OpenStreetMap.DE",
q = "https://{s}.tile.openstreetmap.de/tiles/osmde/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "\u00A9 OpenStreetMap contributors"
),
OpenStreetMap.France = list(
src = "OpenStreetMap.France",
q = "https://{s}.tile.openstreetmap.fr/osmfr/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "\u00A9 Openstreetmap France | \u00A9 OpenStreetMap contributors"
),
OpenStreetMap.HOT = list(
src = "OpenStreetMap.HOT",
q = "https://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "\u00A9 OpenStreetMap contributors, Tiles style by OpenStreetMap France"
),
OpenTopoMap = list(
src = "OpenTopoMap",
q = "https://{s}.tile.opentopomap.org/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "Map data: \u00A9 OpenStreetMap contributors, OpenTopoMap (CC-BY-SA)"
),
OpenMapSurfer = list(
src = "OpenMapSurfer",
q = "https://maps.heigit.org/openmapsurfer/tiles/roads/webmercator/{z}/{x}/{y}.png",
sub = NA,
cit = "Imagery from GIScience Research Group \uFE6B University of Heidelberg | Map data "
),
OpenMapSurfer.Roads = list(
src = "OpenMapSurfer.Roads",
q = "https://maps.heigit.org/openmapsurfer/tiles/roads/webmercator/{z}/{x}/{y}.png",
sub = NA,
cit = "Imagery from GIScience Research Group \uFE6B University of Heidelberg | Map data \u00A9 OpenStreetMap contributors"
),
OpenMapSurfer.Hybrid = list(
src = "OpenMapSurfer.Hybrid",
q = "https://maps.heigit.org/openmapsurfer/tiles/hybrid/webmercator/{z}/{x}/{y}.png",
sub = NA,
cit = "Imagery from GIScience Research Group \uFE6B University of Heidelberg | Map data \u00A9 OpenStreetMap contributors"
),
OpenMapSurfer.AdminBounds = list(
src = "OpenMapSurfer.AdminBounds",
q = "https://maps.heigit.org/openmapsurfer/tiles/adminb/webmercator/{z}/{x}/{y}.png",
sub = NA,
cit = "Imagery from GIScience Research Group \uFE6B University of Heidelberg | Map data \u00A9 OpenStreetMap contributors"
),
OpenMapSurfer.ElementsAtRisk = list(
src = "OpenMapSurfer.ElementsAtRisk",
q = "https://maps.heigit.org/openmapsurfer/tiles/elements_at_risk/webmercator/{z}/{x}/{y}.png",
sub = NA,
cit = "Imagery from GIScience Research Group \uFE6B University of Heidelberg | Map data \u00A9 OpenStreetMap contributors"
),
Hydda = list(
src = "Hydda",
q = "https://{s}.tile.openstreetmap.se/hydda/full/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "Tiles courtesy of OpenStreetMap Sweden \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Hydda.Full = list(
src = "Hydda.Full",
q = "https://{s}.tile.openstreetmap.se/hydda/full/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "Tiles courtesy of OpenStreetMap Sweden \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Hydda.Base = list(
src = "Hydda.Base",
q = "https://{s}.tile.openstreetmap.se/hydda/base/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "Tiles courtesy of OpenStreetMap Sweden \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Hydda.RoadsAndLabels = list(
src = "Hydda.RoadsAndLabels",
q = "https://{s}.tile.openstreetmap.se/hydda/roads_and_labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "Tiles courtesy of OpenStreetMap Sweden \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen = list(
src = "Stamen",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.Toner = list(
src = "Stamen.Toner",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TonerBackground = list(
src = "Stamen.TonerBackground",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner-background/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TonerHybrid = list(
src = "Stamen.TonerHybrid",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner-hybrid/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TonerLines = list(
src = "Stamen.TonerLines",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner-lines/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TonerLabels = list(
src = "Stamen.TonerLabels",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner-labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TonerLite = list(
src = "Stamen.TonerLite",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner-lite/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.Watercolor = list(
src = "Stamen.Watercolor",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/watercolor/{z}/{x}/{y}.jpg",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.Terrain = list(
src = "Stamen.Terrain",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/terrain/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TerrainBackground = list(
src = "Stamen.TerrainBK",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/terrain-background/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TerrainLabels = list(
src = "Stamen.Terrainlabs",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/terrain-labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Esri = list(
src = "Esri",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Street_Map/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri"
),
Esri.WorldStreetMap = list(
src = "EsriWSM",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Street_Map/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Source: Esri, DeLorme, NAVTEQ, USGS, Intermap, iPC, NRCAN, Esri Japan, METI, Esri China (Hong Kong), Esri (Thailand), TomTom, 2012"
),
Esri.DeLorme = list(
src = "EsriDLor",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/Specialty/DeLorme_World_Base_Map/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Copyright: \u00A92012 DeLorme"
),
Esri.WorldTopoMap = list(
src = "EsriWTM",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Esri, DeLorme, NAVTEQ, TomTom, Intermap, iPC, USGS, FAO, NPS, NRCAN, GeoBase, Kadaster NL, Ordnance Survey, Esri Japan, METI, Esri China (Hong Kong), and the GIS User Community"
),
Esri.WorldImagery = list(
src = "EsriWI",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community"
),
Esri.WorldTerrain = list(
src = "EsriWT",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Terrain_Base/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Source: USGS, Esri, TANA, DeLorme, and NPS"
),
Esri.WorldShadedRelief = list(
src = "EsriWSR",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Shaded_Relief/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Source: Esri"
),
Esri.OceanBasemap = list(
src = "EsriOBM",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/Ocean_Basemap/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Sources: GEBCO, NOAA, CHS, OSU, UNH, CSUMB, National Geographic, DeLorme, NAVTEQ, and Esri"
),
Esri.NatGeoWorldMap = list(
src = "EsriNGW",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/NatGeo_World_Map/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 National Geographic, Esri, DeLorme, NAVTEQ, UNEP-WCMC, USGS, NASA, ESA, METI, NRCAN, GEBCO, NOAA, iPC"
),
Esri.WorldGrayCanvas = list(
src = "EsriWGC",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/Canvas/World_Light_Gray_Base/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Esri, DeLorme, NAVTEQ"
),
CartoDB = list(
src = "Carto",
q = "https://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.Positron = list(
src = "CartoP",
q = "https://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.PositronNoLabels = list(
src = "CartoPNL",
q = "https://{s}.basemaps.cartocdn.com/light_nolabels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.PositronOnlyLabels = list(
src = "CartoPOL",
q = "https://{s}.basemaps.cartocdn.com/light_only_labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.DarkMatter = list(
src = "CartoDM",
q = "https://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.DarkMatterNoLabels = list(
src = "CartoDMNL",
q = "https://{s}.basemaps.cartocdn.com/dark_nolabels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.DarkMatterOnlyLabels = list(
src = "CartoDMOL",
q = "https://{s}.basemaps.cartocdn.com/dark_only_labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.Voyager = list(
src = "CartoV",
q = "https://{s}.basemaps.cartocdn.com/rastertiles/voyager/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.VoyagerNoLabels = list(
src = "CartoVNL",
q = "https://{s}.basemaps.cartocdn.com/rastertiles/voyager_nolabels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.VoyagerOnlyLabels = list(
src = "CartoVOL",
q = "https://{s}.basemaps.cartocdn.com/rastertiles/voyager_only_labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.VoyagerLabelsUnder = list(
src = "CartoVLU",
q = "https://{s}.basemaps.cartocdn.com/rastertiles/voyager_labels_under/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
HikeBike = list(
src = "HikeBike",
q = "https://tiles.wmflabs.org/hikebike/{z}/{x}/{y}.png",
sub = NA,
cit = "\u00A9 OpenStreetMap contributors"
),
HikeBike.HikeBike = list(
src = "HikeBike2",
q = "https://tiles.wmflabs.org/hikebike/{z}/{x}/{y}.png",
sub = NA,
cit = "\u00A9 OpenStreetMap contributors"
),
Wikimedia = list(
src = "Wikimedia",
q = "https://maps.wikimedia.org/osm-intl/{z}/{x}/{y}.png",
sub = NA,
cit = "Wikimedia"
),
Thunderforest = list(
src = "Tf",
q = "https://tile.thunderforest.com/cycle/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.OpenCycleMap = list(
src = "Tf",
q = "https://tile.thunderforest.com/cycle/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.Transport = list(
src = "Tf.Tr",
q = "https://tile.thunderforest.com/transport/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.TransportDark = list(
src = "Tf.TrDr",
q = "https://tile.thunderforest.com/transport-dark/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.SpinalMap = list(
src = "Tf.SP",
q = "https://tile.thunderforest.com/spinal-map/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.Landscape = list(
src = "Tf.Lc",
q = "https://tile.thunderforest.com/landscape/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.Outdoors= list(
src = "Tf.Out",
q = "https://tile.thunderforest.com/outdoors/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.Pioneer = list(
src = "Tf.Pion",
q = "https://tile.thunderforest.com/pioneer/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.MobileAtlas= list(
src = "Tf.MB",
q = "https://tile.thunderforest.com/mobile-atlas/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.Neighbourhood= list(
src = "Tf.Nbg",
q = "https://tile.thunderforest.com/neighbourhood/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
)
)
}
param
}
| /R/getTiles.R | no_license | vgs549/cartography | R | false | false | 27,421 | r | #' @title Get Tiles from Open Map Servers
#' @name getTiles
#' @description Get map tiles based on a spatial object extent. Maps can be
#' fetched from various open map servers.
#' @param x an sf object, a simple feature collection or a Spatial*DataFrame.
#' @param spdf deprecated, a Spatial*DataFrame with a valid projection attribute.
#' @param type the tile server from which to get the map. See Details for providers.
#' For other sources use a list: type = list(src = "name of the source" ,
#' q = "tiles address", sub = "subdomains", cit = "how to cite the tiles"). See Examples.
#' @param zoom the zoom level. If null, it is determined automatically
#' (see Details).
#' @param crop TRUE if results should be cropped to the specified x extent,
#' FALSE otherwise. If x is an sf object with one POINT, crop is set to FALSE.
#' @param verbose if TRUE, tiles filepaths, zoom level and citation are displayed.
#' @param apikey Needed for Thunderforest maps.
#' @param cachedir name of a directory used to cache tiles. If TRUE, places a
#' 'tile.cache' folder in the working directory. If FALSE, tiles are not cached.
#' @param forceDownload if TRUE, cached tiles are downloaded again.
#' @details
#' Zoom levels are described on the OpenStreetMap wiki:
#' \url{https://wiki.openstreetmap.org/wiki/Zoom_levels}. \cr\cr
#' Full list of providers:
#' \tabular{lll}{
#' 'OpenStreetMap' (or 'osm') \tab 'Stamen' (or 'stamenbw') \tab 'Esri' \cr
#' 'OpenStreetMap.DE' \tab 'Stamen.Toner' \tab 'Esri.WorldStreetMap'\cr
#' 'OpenStreetMap.France' \tab 'Stamen.TonerBackground' \tab 'Esri.DeLorme'\cr
#' 'OpenStreetMap.HOT' (or 'hotstyle') \tab 'Stamen.TonerHybrid' \tab 'Esri.WorldTopoMap'\cr
#' \tab 'Stamen.TonerLines' \tab 'Esri.WorldImagery'\cr
#' 'OpenMapSurfer' \tab 'Stamen.TonerLabels' \tab 'Esri.WorldTerrain'\cr
#' 'OpenMapSurfer.Roads' \tab 'Stamen.TonerLite' \tab 'Esri.WorldShadedRelief'\cr
#' 'OpenMapSurfer.Hybrid' \tab 'Stamen.Watercolor' (or 'stamenwatercolor') \tab 'Esri.OceanBasemap'\cr
#' 'OpenMapSurfer.AdminBounds' \tab 'Stamen.Terrain' \tab 'Esri.NatGeoWorldMap'\cr
#' 'OpenMapSurfer.ElementsAtRisk' \tab 'Stamen.TerrainBackground' \tab 'Esri.WorldGrayCanvas'\cr
#' \tab 'Stamen.TerrainLabels' \tab \cr
#' 'CartoDB' \tab \tab 'Hydda'\cr
#' 'CartoDB.Positron' (or 'cartolight') \tab 'Thunderforest' \tab 'Hydda.Full'\cr
#' 'CartoDB.PositronNoLabels' \tab 'Thunderforest.OpenCycleMap' \tab 'Hydda.Base'\cr
#' 'CartoDB.PositronOnlyLabels' \tab 'Thunderforest.Transport' \tab 'Hydda.RoadsAndLabels'\cr
#' 'CartoDB.DarkMatter' (or 'cartodark') \tab 'Thunderforest.TransportDark' \tab \cr
#' 'CartoDB.DarkMatterNoLabels' \tab 'Thunderforest.SpinalMap' \tab 'HikeBike' (or 'hikebike')\cr
#' 'CartoDB.DarkMatterOnlyLabels' \tab 'Thunderforest.Landscape' \tab 'HikeBike.HikeBike'\cr
#' 'CartoDB.Voyager' \tab 'Thunderforest.Outdoors' \tab \cr
#' 'CartoDB.VoyagerNoLabels' \tab 'Thunderforest.Pioneer' \tab 'OpenTopoMap' (or 'opentopomap') \cr
#' 'CartoDB.VoyagerOnlyLabels' \tab 'Thunderforest.MobileAtlas' \tab 'Wikimedia'\cr
#' 'CartoDB.VoyagerLabelsUnder' \tab 'Thunderforest.Neighbourhood' \tab 'OpenStreetMap.MapnikBW' (or 'osmgrayscale')\cr
#' }
#' @references \url{https://leaflet-extras.github.io/leaflet-providers/preview/}
#' @export
#' @return A RasterBrick is returned.
#' @seealso \link{tilesLayer}
#' @examples
#' \dontrun{
#' library(sf)
#' mtq <- st_read(system.file("gpkg/mtq.gpkg", package="cartography"))
#' # Download the tiles, extent = Martinique
#' mtqOSM <- getTiles(x = mtq, type = "osm", crop = TRUE)
#' # Plot the tiles
#' tilesLayer(mtqOSM)
#' # Plot countries
#' plot(st_geometry(mtq), add=TRUE)
#' txt <- paste0("\u00A9 OpenStreetMap contributors.",
#' " Tiles style under CC BY-SA, www.openstreetmap.org/copyright")
#' mtext(text = txt, side = 1, adj = 0, cex = 0.7, font = 3)
#'
#' # Download esri tiles
#' fullserver = paste("https://server.arcgisonline.com/ArcGIS/rest/services",
#' "Specialty/DeLorme_World_Base_Map/MapServer",
#' "tile/{z}/{y}/{x}.jpg",
#' sep = "/"
#' )
#' typeosm <- list(
#' src = 'esri',
#' q = fullserver,
#' sub = NA,
#' cit = 'Tiles; Esri; Copyright: 2012 DeLorme'
#' )
#' mtqESRI <- getTiles(x = mtq, type = typeosm, crop = TRUE, verbose = T, zoom = 10)
#' # Plot the tiles
#' tilesLayer(mtqESRI)
#' txt <- typeosm$cit
#' mtext(text = txt, side = 1, adj = 0, cex = 0.6, font = 3)
#' }
getTiles <- function(x, spdf, type = "OpenStreetMap", zoom = NULL, crop = FALSE,
verbose = FALSE, apikey=NA, cachedir=FALSE, forceDownload=FALSE){
# deprecated check
if(!missing(spdf)){
warning("spdf is deprecated; use x instead.", call. = FALSE)
x <- spdf
}
# test for sp
if(methods::is(x,"Spatial") == TRUE){
x <- convertToSf(spdf = x)
}
# test for single point (apply buffer to obtain a correct bbox)
if(nrow(x)==1 && sf::st_is(x, "POINT")){
xt <- sf::st_transform(x, 3857)
sf::st_geometry(xt) <- sf::st_buffer(sf::st_geometry(xt), 1000)
crop <- FALSE
# use x bbox to select the tiles to get
bbx <- sf::st_bbox(sf::st_transform(sf::st_as_sfc(sf::st_bbox(xt)), 4326))
}else{
# use x bbox to select the tiles to get
bbx <- sf::st_bbox(sf::st_transform(sf::st_as_sfc(sf::st_bbox(x)), 4326))
}
# select a default zoom level
if(is.null(zoom)){
gz <- slippymath::bbox_tile_query(bbx)
zoom <- min(gz[gz$total_tiles %in% 4:10,"zoom"])
}
# get tile list
tile_grid <- slippymath::bbox_to_tile_grid(bbox = bbx, zoom = zoom)
# get query parameters according to type
param <- get_param(type)
# subdomains management
tile_grid$tiles$s <- sample(param$sub, nrow(tile_grid$tiles), replace = T)
# src mgmnt
tile_grid$src <- param$src
# query mgmnt
tile_grid$q <- sub("XXXXXX",apikey,param$q)
# citation
tile_grid$cit <- param$cit
# extension management
if (length(grep("jpg",param$q))>0){
ext="jpg"
} else if (length(grep("png",param$q))>0){
ext="png"
}
tile_grid$ext<-ext
#tile_grid$ext <- substr(param$q, nchar(param$q)-2, nchar(param$q))
# download images
images <- get_tiles(tile_grid, verbose, cachedir, forceDownload)
# compose images
rout <- compose_tile_grid(tile_grid, images)
# reproject rout
rout <- raster::projectRaster(from = rout, crs = sf::st_crs(x)$proj4string)
rout <- raster::clamp(rout,lower = 0, upper = 255, useValues = TRUE)
# crop management
if(crop == TRUE){
cb <- sf::st_bbox(x)
k <- min(c(0.052 * (cb[4] - cb[2]), 0.052 * (cb[3] - cb[1])))
cb <- cb + c(-k, -k, k, k)
rout <- raster::crop(rout,cb[c(1,3,2,4)])
}
rout
}
# get the tiles according to the grid
get_tiles <- function(tile_grid, verbose, cachedir, forceDownload) {
# go through tile_grid tiles and download
images <- apply(
X = tile_grid$tiles,
MARGIN = 1,
FUN = dl_t,
z = tile_grid$zoom,
ext = tile_grid$ext,
src = tile_grid$src,
q = tile_grid$q,
verbose = verbose,
cachedir = cachedir,
forceDownload = forceDownload
)
if (verbose) {
message("Zoom:", tile_grid$zoom, "\nData and map tiles sources:\n",
tile_grid$cit)
}
images
}
# download tile according to parameters
dl_t <- function(x, z, ext, src, q, verbose, cachedir, forceDownload) {
# forceDownload will overwrite any files existing in cache
if(!is.logical(forceDownload)) stop("forceDownload must be TRUE or FALSE")
# if cachedir==F, save to temporary filepath
if(cachedir == FALSE) {
cachedir <- tempdir()
} else {
# if cachedir==T, place in working directory
if(cachedir == TRUE) cachedir <- paste0(getwd(),'/tile.cache')
#create the cachedir if it doesn't exist.
if(!dir.exists(cachedir)) dir.create(cachedir)
# uses subdirectories based on src to make the directory easier for users to navigate
subdir <- paste0(cachedir,"/",src)
if(!dir.exists(subdir)) dir.create(subdir)
cachedir <- subdir
}
outfile <- paste0(cachedir, "/", src, "_", z, "_", x[1], "_", x[2],".", ext)
if (!file.exists(outfile) | isTRUE(forceDownload)) {
q <- gsub(pattern = '{s}', replacement = x[3], x = q, fixed = TRUE)
q <- gsub(pattern = '{x}', replacement = x[1], x = q, fixed = TRUE)
q <- gsub(pattern = '{y}', replacement = x[2], x = q, fixed = TRUE)
q <- gsub(pattern = '{z}', replacement = z, x = q, fixed = TRUE)
if (verbose) {
message(q, " => ", outfile)
}
curl::curl_download(url = q, destfile = outfile)
}
outfile
}
# compose tiles
compose_tile_grid <- function (tile_grid, images){
bricks = vector("list", nrow(tile_grid$tiles))
for (i in seq_along(bricks)){
bbox <- slippymath::tile_bbox(tile_grid$tiles$x[i], tile_grid$tiles$y[i],
tile_grid$zoom)
img <- images[i]
# special for png tiles
if (tile_grid$ext=="png"){
img <- png::readPNG(img)*255
}
# compose brick raster
r_img <- raster::brick(img, crs = sf::st_crs(3857)$proj4string)
raster::extent(r_img) <- raster::extent(bbox[c("xmin", "xmax",
"ymin", "ymax")])
bricks[[i]] <- r_img
}
# if only one tile is needed
if(length(bricks)==1){
return(bricks[[1]])
}
# all tiles together
rout <- do.call(raster::merge, bricks)
rout
}
# providers parameters
get_param <- function(type) {
if (length(type) == 4) {
param <- type
} else{
param <- switch(
type,
osm = get_param('OpenStreetMap'),
hotstyle = get_param('OpenStreetMap.HOT'),
hikebike = get_param('HikeBike'),
osmgrayscale = get_param('OpenStreetMap.MapnikBW'),
stamenbw = get_param('Stamen'),
stamenwatercolor = get_param('Stamen.Watercolor'),
cartodark = get_param('CartoDB.DarkMatter'),
cartolight = get_param('CartoDB.Positron'),
opentopomap = get_param('OpenTopoMap'),
OpenStreetMap.MapnikBW = list(
src = "osmgrayscale",
q = "https://tiles.wmflabs.org/bw-mapnik/{z}/{x}/{y}.png" ,
sub = NA,
cit = "\u00A9 OpenStreetMap contributors. Tiles style under CC BY-SA, www.openstreetmap.org/copyright."
),
OpenStreetMap = list(
src = "OpenStreetMap",
q = 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
sub = c("a", "b", "c"),
cit = "\u00A9 OpenStreetMap contributors"
),
OpenStreetMap.DE = list(
src = "OpenStreetMap.DE",
q = "https://{s}.tile.openstreetmap.de/tiles/osmde/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "\u00A9 OpenStreetMap contributors"
),
OpenStreetMap.France = list(
src = "OpenStreetMap.France",
q = "https://{s}.tile.openstreetmap.fr/osmfr/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "\u00A9 Openstreetmap France | \u00A9 OpenStreetMap contributors"
),
OpenStreetMap.HOT = list(
src = "OpenStreetMap.HOT",
q = "https://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "\u00A9 OpenStreetMap contributors, Tiles style by OpenStreetMap France"
),
OpenTopoMap = list(
src = "OpenTopoMap",
q = "https://{s}.tile.opentopomap.org/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "Map data: \u00A9 OpenStreetMap contributors, OpenTopoMap (CC-BY-SA)"
),
OpenMapSurfer = list(
src = "OpenMapSurfer",
q = "https://maps.heigit.org/openmapsurfer/tiles/roads/webmercator/{z}/{x}/{y}.png",
sub = NA,
cit = "Imagery from GIScience Research Group \uFE6B University of Heidelberg | Map data "
),
OpenMapSurfer.Roads = list(
src = "OpenMapSurfer.Roads",
q = "https://maps.heigit.org/openmapsurfer/tiles/roads/webmercator/{z}/{x}/{y}.png",
sub = NA,
cit = "Imagery from GIScience Research Group \uFE6B University of Heidelberg | Map data \u00A9 OpenStreetMap contributors"
),
OpenMapSurfer.Hybrid = list(
src = "OpenMapSurfer.Hybrid",
q = "https://maps.heigit.org/openmapsurfer/tiles/hybrid/webmercator/{z}/{x}/{y}.png",
sub = NA,
cit = "Imagery from GIScience Research Group \uFE6B University of Heidelberg | Map data \u00A9 OpenStreetMap contributors"
),
OpenMapSurfer.AdminBounds = list(
src = "OpenMapSurfer.AdminBounds",
q = "https://maps.heigit.org/openmapsurfer/tiles/adminb/webmercator/{z}/{x}/{y}.png",
sub = NA,
cit = "Imagery from GIScience Research Group \uFE6B University of Heidelberg | Map data \u00A9 OpenStreetMap contributors"
),
OpenMapSurfer.ElementsAtRisk = list(
src = "OpenMapSurfer.ElementsAtRisk",
q = "https://maps.heigit.org/openmapsurfer/tiles/elements_at_risk/webmercator/{z}/{x}/{y}.png",
sub = NA,
cit = "Imagery from GIScience Research Group \uFE6B University of Heidelberg | Map data \u00A9 OpenStreetMap contributors"
),
Hydda = list(
src = "Hydda",
q = "https://{s}.tile.openstreetmap.se/hydda/full/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "Tiles courtesy of OpenStreetMap Sweden \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Hydda.Full = list(
src = "Hydda.Full",
q = "https://{s}.tile.openstreetmap.se/hydda/full/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "Tiles courtesy of OpenStreetMap Sweden \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Hydda.Base = list(
src = "Hydda.Base",
q = "https://{s}.tile.openstreetmap.se/hydda/base/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "Tiles courtesy of OpenStreetMap Sweden \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Hydda.RoadsAndLabels = list(
src = "Hydda.RoadsAndLabels",
q = "https://{s}.tile.openstreetmap.se/hydda/roads_and_labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c"),
cit = "Tiles courtesy of OpenStreetMap Sweden \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen = list(
src = "Stamen",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.Toner = list(
src = "Stamen.Toner",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TonerBackground = list(
src = "Stamen.TonerBackground",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner-background/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TonerHybrid = list(
src = "Stamen.TonerHybrid",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner-hybrid/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TonerLines = list(
src = "Stamen.TonerLines",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner-lines/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TonerLabels = list(
src = "Stamen.TonerLabels",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner-labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TonerLite = list(
src = "Stamen.TonerLite",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/toner-lite/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.Watercolor = list(
src = "Stamen.Watercolor",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/watercolor/{z}/{x}/{y}.jpg",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.Terrain = list(
src = "Stamen.Terrain",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/terrain/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TerrainBackground = list(
src = "Stamen.TerrainBK",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/terrain-background/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Stamen.TerrainLabels = list(
src = "Stamen.Terrainlabs",
q = "https://stamen-tiles-{s}.a.ssl.fastly.net/terrain-labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "Map tiles by CC BY 3.0 \u2014 Map data \u00A9 OpenStreetMap contributors"
),
Esri = list(
src = "Esri",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Street_Map/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri"
),
Esri.WorldStreetMap = list(
src = "EsriWSM",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Street_Map/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Source: Esri, DeLorme, NAVTEQ, USGS, Intermap, iPC, NRCAN, Esri Japan, METI, Esri China (Hong Kong), Esri (Thailand), TomTom, 2012"
),
Esri.DeLorme = list(
src = "EsriDLor",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/Specialty/DeLorme_World_Base_Map/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Copyright: \u00A92012 DeLorme"
),
Esri.WorldTopoMap = list(
src = "EsriWTM",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Esri, DeLorme, NAVTEQ, TomTom, Intermap, iPC, USGS, FAO, NPS, NRCAN, GeoBase, Kadaster NL, Ordnance Survey, Esri Japan, METI, Esri China (Hong Kong), and the GIS User Community"
),
Esri.WorldImagery = list(
src = "EsriWI",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community"
),
Esri.WorldTerrain = list(
src = "EsriWT",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Terrain_Base/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Source: USGS, Esri, TANA, DeLorme, and NPS"
),
Esri.WorldShadedRelief = list(
src = "EsriWSR",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/World_Shaded_Relief/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Source: Esri"
),
Esri.OceanBasemap = list(
src = "EsriOBM",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/Ocean_Basemap/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Sources: GEBCO, NOAA, CHS, OSU, UNH, CSUMB, National Geographic, DeLorme, NAVTEQ, and Esri"
),
Esri.NatGeoWorldMap = list(
src = "EsriNGW",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/NatGeo_World_Map/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 National Geographic, Esri, DeLorme, NAVTEQ, UNEP-WCMC, USGS, NASA, ESA, METI, NRCAN, GEBCO, NOAA, iPC"
),
Esri.WorldGrayCanvas = list(
src = "EsriWGC",
q = "https://server.arcgisonline.com/ArcGIS/rest/services/Canvas/World_Light_Gray_Base/MapServer/tile/{z}/{y}/{x}.jpg",
sub = NA,
cit = "Tiles \u00A9 Esri \u2014 Esri, DeLorme, NAVTEQ"
),
CartoDB = list(
src = "Carto",
q = "https://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.Positron = list(
src = "CartoP",
q = "https://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.PositronNoLabels = list(
src = "CartoPNL",
q = "https://{s}.basemaps.cartocdn.com/light_nolabels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.PositronOnlyLabels = list(
src = "CartoPOL",
q = "https://{s}.basemaps.cartocdn.com/light_only_labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.DarkMatter = list(
src = "CartoDM",
q = "https://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.DarkMatterNoLabels = list(
src = "CartoDMNL",
q = "https://{s}.basemaps.cartocdn.com/dark_nolabels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.DarkMatterOnlyLabels = list(
src = "CartoDMOL",
q = "https://{s}.basemaps.cartocdn.com/dark_only_labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.Voyager = list(
src = "CartoV",
q = "https://{s}.basemaps.cartocdn.com/rastertiles/voyager/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.VoyagerNoLabels = list(
src = "CartoVNL",
q = "https://{s}.basemaps.cartocdn.com/rastertiles/voyager_nolabels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.VoyagerOnlyLabels = list(
src = "CartoVOL",
q = "https://{s}.basemaps.cartocdn.com/rastertiles/voyager_only_labels/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
CartoDB.VoyagerLabelsUnder = list(
src = "CartoVLU",
q = "https://{s}.basemaps.cartocdn.com/rastertiles/voyager_labels_under/{z}/{x}/{y}.png",
sub = c("a", "b", "c", "d"),
cit = "\u00A9 OpenStreetMap contributors \u00A9 CARTO"
),
HikeBike = list(
src = "HikeBike",
q = "https://tiles.wmflabs.org/hikebike/{z}/{x}/{y}.png",
sub = NA,
cit = "\u00A9 OpenStreetMap contributors"
),
HikeBike.HikeBike = list(
src = "HikeBike2",
q = "https://tiles.wmflabs.org/hikebike/{z}/{x}/{y}.png",
sub = NA,
cit = "\u00A9 OpenStreetMap contributors"
),
Wikimedia = list(
src = "Wikimedia",
q = "https://maps.wikimedia.org/osm-intl/{z}/{x}/{y}.png",
sub = NA,
cit = "Wikimedia"
),
Thunderforest = list(
src = "Tf",
q = "https://tile.thunderforest.com/cycle/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.OpenCycleMap = list(
src = "Tf",
q = "https://tile.thunderforest.com/cycle/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.Transport = list(
src = "Tf.Tr",
q = "https://tile.thunderforest.com/transport/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.TransportDark = list(
src = "Tf.TrDr",
q = "https://tile.thunderforest.com/transport-dark/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.SpinalMap = list(
src = "Tf.SP",
q = "https://tile.thunderforest.com/spinal-map/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.Landscape = list(
src = "Tf.Lc",
q = "https://tile.thunderforest.com/landscape/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.Outdoors= list(
src = "Tf.Out",
q = "https://tile.thunderforest.com/outdoors/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.Pioneer = list(
src = "Tf.Pion",
q = "https://tile.thunderforest.com/pioneer/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.MobileAtlas= list(
src = "Tf.MB",
q = "https://tile.thunderforest.com/mobile-atlas/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
),
Thunderforest.Neighbourhood= list(
src = "Tf.Nbg",
q = "https://tile.thunderforest.com/neighbourhood/{z}/{x}/{y}.png?apikey=XXXXXX",
sub = NA,
cit = "Maps \u00A9 www.thunderforest.com, Data \u00A9 www.osm.org/copyright"
)
)
}
param
}
|
#for loop
for(i in 1:5) {
x <- c(x, i)
}
A <- c(1,1,1)
for(j in 1:10){
A = cbind(A, c(j,j^2,5*j))
}
for(i in 1:100){
hist(runif(100), breaks=seq(from=0, to=1, by=0.05), probability =T, ylim=c(0,1.6), col="palegreen")
abline(h=1, lwd=3, col="purple")
Sys.sleep(0.5)
}
x0 <- c(1, 1)
x0 <- c(1, 1)
for (i in 1 : 10) {
x0 <- c(x0, x0[i+1]+x0[i])
}
y0 <- 5
y0_trace <- y0
for (i in 1 : 20) {
y0 <- -3 * (y0 -3) + y0
y0_trace <- c(y0_trace, y0)
}
# create a function
fun_one <- function(x) {
if (is.matrix(x)) {
x = x**2
} else {
x = 0
}
}
fun_two <- function(y){
if(is.matrix(y)){
y <- y+1
y <- y^2
}
if(is.matrix(y)){
y <- 2*y
}
return(y)
}
fun_three <- function(x, y){
if (x >5 & y > 5){
print("A")
} else if (x < 0 | y < 0) {
print("B")
} else {
print("C")
}
} | /STAT 302 lesson4.R | no_license | NanTang1106/STAT-302-Sp17 | R | false | false | 845 | r | #for loop
for(i in 1:5) {
x <- c(x, i)
}
A <- c(1,1,1)
for(j in 1:10){
A = cbind(A, c(j,j^2,5*j))
}
for(i in 1:100){
hist(runif(100), breaks=seq(from=0, to=1, by=0.05), probability =T, ylim=c(0,1.6), col="palegreen")
abline(h=1, lwd=3, col="purple")
Sys.sleep(0.5)
}
x0 <- c(1, 1)
x0 <- c(1, 1)
for (i in 1 : 10) {
x0 <- c(x0, x0[i+1]+x0[i])
}
y0 <- 5
y0_trace <- y0
for (i in 1 : 20) {
y0 <- -3 * (y0 -3) + y0
y0_trace <- c(y0_trace, y0)
}
# create a function
fun_one <- function(x) {
if (is.matrix(x)) {
x = x**2
} else {
x = 0
}
}
fun_two <- function(y){
if(is.matrix(y)){
y <- y+1
y <- y^2
}
if(is.matrix(y)){
y <- 2*y
}
return(y)
}
fun_three <- function(x, y){
if (x >5 & y > 5){
print("A")
} else if (x < 0 | y < 0) {
print("B")
} else {
print("C")
}
} |
## Two functions written to demonstrate Lexical Scoping in R
## Functions enable caching of computation intensive operation output
## makeCacheMatrix create a storage object for matrix inversions with supporting
## functions for setter and getter behaviours
makeCacheMatrix <- function(x = matrix()) {
matrix_inv <- NULL
set <- function(y) {
x <<- y
matrix_inv <<- NULL
}
get <- function() x
setinv <- function(inverse) matrix_inv <<- inverse
getinv <- function() matrix_inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve solves for the inverse of a matrix object (defined as object
## makeCacheMatrix). Function checks if the inverse has already been solved
## and returns the cached version. If null, executes the solve function to
## calculate the inverse then caches the result.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinv(m)
m
}
| /cachematrix.R | no_license | jplaxton/ProgrammingAssignment2 | R | false | false | 1,226 | r | ## Two functions written to demonstrate Lexical Scoping in R
## Functions enable caching of computation intensive operation output
## makeCacheMatrix create a storage object for matrix inversions with supporting
## functions for setter and getter behaviours
makeCacheMatrix <- function(x = matrix()) {
matrix_inv <- NULL
set <- function(y) {
x <<- y
matrix_inv <<- NULL
}
get <- function() x
setinv <- function(inverse) matrix_inv <<- inverse
getinv <- function() matrix_inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve solves for the inverse of a matrix object (defined as object
## makeCacheMatrix). Function checks if the inverse has already been solved
## and returns the cached version. If null, executes the solve function to
## calculate the inverse then caches the result.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinv(m)
m
}
|
library(wavethresh)
### Name: MaNoVe.wp
### Title: Make Node Vector (using Coifman-Wickerhauser best-basis type
### algorithm) on wavelet packet object
### Aliases: MaNoVe.wp
### Keywords: smooth
### ** Examples
#
# See example of use of this function in the examples section
# of the help of plot.wp
#
# A node vector vnv is created there that gets plotted.
#
| /data/genthat_extracted_code/wavethresh/examples/MaNoVe.wp.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 370 | r | library(wavethresh)
### Name: MaNoVe.wp
### Title: Make Node Vector (using Coifman-Wickerhauser best-basis type
### algorithm) on wavelet packet object
### Aliases: MaNoVe.wp
### Keywords: smooth
### ** Examples
#
# See example of use of this function in the examples section
# of the help of plot.wp
#
# A node vector vnv is created there that gets plotted.
#
|
# (1) First time install
# R
# > install.packages(c("shiny", "shinydashboard", "pwr"))
# select CRAN mirrors 71
# > q()
# (2) Run app
# Rscript shinyApp.R
# --------------------------------------------------------------------------
# import library
library(shiny)
library(shinydashboard)
library(pwr)
t.test.func <- function(m0, m1, s0, s1, n0, n1, dm=0, equal.variance=FALSE)
{ # m0, m1: sample means
# s0, s1: sample standard deviations
# n0, n1: sample sizes
# dm: expected difference in means to be tested for. Default 0
# equal.variance: whether or not to assume equal variance. Default FALSE
if(equal.variance==FALSE) { # welch-satterthwaite df
se <- sqrt((s0^2/n0) + (s1^2/n1))
df <- ((s0^2/n0 + s1^2/n1)^2) / ((s0^2/n0)^2/(n0-1) + (s1^2/n1)^2/(n1-1))
} else { # pooled standard deviation, scaled by the sample sizes
se <- sqrt((1/n0+1/n1) * ((n0-1)*s0^2 + (n1-1)*s1^2)/(n0+n1-2))
df <- n0+n1-2}
t <- (m0-m1-dm)/se
dat <- c(m0-m1, se, t, 2*pt(-abs(t), df))
cat("Delta of means: ", dat[1], "\n")
cat("Std error: ", dat[2], "\n")
cat("t: ", dat[3], "\n")
cat("p-value: ", dat[4])
}
ui <- dashboardPage(
dashboardHeader(title = "Experiment tools"),
dashboardSidebar(
sidebarMenu(
menuItem("Idea sample", tabName = "sample", icon = icon("users")),
menuItem("Test proportions", tabName = "psig", icon = icon("th")),
menuItem("Test means", tabName = "sig", icon = icon("th"))
)),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "sample",
h2("Idea sample"),
fluidRow(
box(
title = "Idea sample size",
verbatimTextOutput("pwr")
),
box(
title = "Settings",
numericInput("sample.std", "Current standard deviation:", 10),
numericInput("sample.m0", "Current mean value:", 10),
numericInput("sample.m1", "Expect mean value:", 11),
numericInput("sample.power", "Power:", 0.8),
numericInput("sample.sig", "Significant level:", 0.05),
# Include clarifying text ----
helpText("Note: recommend Power and Significant level as default value.")
#actionButton("calculate", "Calculate")
))),
# Second tab content
tabItem(tabName = "psig",
h2("Significant test on proportions"),
fluidRow(
box(
title = "Significant test",
verbatimTextOutput("p.sig")
),
box(
title = "Settings",
numericInput("psig.n0", "Sample size of control:", 8700),
numericInput("psig.n1", "Sample size of variant:", 963),
numericInput("psig.e0", "Response size of control:", 375),
numericInput("psig.e1", "Response size of variant:", 34),
numericInput("psig.ci", "Confidence level", 0.95),
# Include clarifying text ----
helpText("Note: Test for equality of proportions between 2 groups")
#actionButton("calculate", "Calculate")
))),
# 3rd tab content
tabItem(tabName = "sig",
h2("Significant test on means"),
fluidRow(
box(
title = "Control group Settings",
numericInput("sig.n0", "Sample size of control:", 5000),
numericInput("sig.m0", "Mean value of control:", 0.42),
numericInput("sig.s0", "Standard deviation of control:", 0.43)
),
box(
title = "Variant group Settings",
numericInput("sig.n1", "Sample size of variant:", 5000),
numericInput("sig.m1", "Mean value of variant:", 0.42),
numericInput("sig.s1", "Standard deviation of variant:", 0.43),
numericInput("sig.dm", "H0 expected difference in means:", 0),
numericInput("sig.ci", "Confidence level", 0.95),
radioButtons("sig.equal.variance", label = "Is equal variance:",
choices = list("True" = TRUE, "False" = FALSE),
selected = FALSE),
# Include clarifying text ----
helpText("Note: Test for equality of means between 2 groups")
#actionButton("calculate", "Calculate")
),
box(
title = "Significant test",
verbatimTextOutput("sig")
)))
))
)
# Define server logic to summarize and view selected dataset ----
server <- function(input, output) {
output$pwr <- renderPrint({
# https://stats.idre.ucla.edu/r/dae/power-analysis-for-two-group-independent-sample-t-test/
d = abs(input$sample.m1 - input$sample.m0)/input$sample.std
pwr.t.test(d = d, power = input$sample.power, sig.level = input$sample.sig, type="two.sample")
})
output$p.sig <- renderPrint({
##' method 1: fisher exact test
print(
fisher.test(rbind(c(input$psig.e0, input$psig.n0-input$psig.e0), c(input$psig.e1, input$psig.n1-input$psig.e1)), conf.level = input$psig.ci)
)
##' method 2: Normal Approximation to Binomial
#p0.hat <- input$e0/input$n0
#p1.hat <- input$e1/input$n1
#p.hat <- (input$n0*p0.hat + input$n1*p1.hat)/(input$n0 + input$n1)
#z <- (p0.hat - p1.hat)/sqrt(p.hat*(1-p.hat)*(1/input$n0 + 1/input$n1))
#pnorm(z, lower.tail = F)
##' method 3: 2-sample test for equality of proportions
print(
prop.test(c(input$psig.e0, input$psig.e1), c(input$psig.n0, input$psig.n1), correct=FALSE, conf.level = 0.95)
)
##' method 4: Exact binomial test
# print(binom.test(x=input$e1, n=input$n1, p=input$e0/input$e0, alternative = "greater", conf.level = 0.95))
})
output$sig <- renderPrint({
# 2 sample t test
t.test.func(input$sig.m0, input$sig.m1, input$sig.s0, input$sig.s1, input$sig.n0, input$sig.n1, input$sig.dm, input$sig.equal.variance)
})
}
# Create Shiny app ----
shinyApp(ui, server)
| /shinyApp.R | no_license | lanyliu/experiment | R | false | false | 6,525 | r | # (1) First time install
# R
# > install.packages(c("shiny", "shinydashboard", "pwr"))
# select CRAN mirrors 71
# > q()
# (2) Run app
# Rscript shinyApp.R
# --------------------------------------------------------------------------
# import library
library(shiny)
library(shinydashboard)
library(pwr)
t.test.func <- function(m0, m1, s0, s1, n0, n1, dm=0, equal.variance=FALSE)
{ # m0, m1: sample means
# s0, s1: sample standard deviations
# n0, n1: sample sizes
# dm: expected difference in means to be tested for. Default 0
# equal.variance: whether or not to assume equal variance. Default FALSE
if(equal.variance==FALSE) { # welch-satterthwaite df
se <- sqrt((s0^2/n0) + (s1^2/n1))
df <- ((s0^2/n0 + s1^2/n1)^2) / ((s0^2/n0)^2/(n0-1) + (s1^2/n1)^2/(n1-1))
} else { # pooled standard deviation, scaled by the sample sizes
se <- sqrt((1/n0+1/n1) * ((n0-1)*s0^2 + (n1-1)*s1^2)/(n0+n1-2))
df <- n0+n1-2}
t <- (m0-m1-dm)/se
dat <- c(m0-m1, se, t, 2*pt(-abs(t), df))
cat("Delta of means: ", dat[1], "\n")
cat("Std error: ", dat[2], "\n")
cat("t: ", dat[3], "\n")
cat("p-value: ", dat[4])
}
ui <- dashboardPage(
dashboardHeader(title = "Experiment tools"),
dashboardSidebar(
sidebarMenu(
menuItem("Idea sample", tabName = "sample", icon = icon("users")),
menuItem("Test proportions", tabName = "psig", icon = icon("th")),
menuItem("Test means", tabName = "sig", icon = icon("th"))
)),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "sample",
h2("Idea sample"),
fluidRow(
box(
title = "Idea sample size",
verbatimTextOutput("pwr")
),
box(
title = "Settings",
numericInput("sample.std", "Current standard deviation:", 10),
numericInput("sample.m0", "Current mean value:", 10),
numericInput("sample.m1", "Expect mean value:", 11),
numericInput("sample.power", "Power:", 0.8),
numericInput("sample.sig", "Significant level:", 0.05),
# Include clarifying text ----
helpText("Note: recommend Power and Significant level as default value.")
#actionButton("calculate", "Calculate")
))),
# Second tab content
tabItem(tabName = "psig",
h2("Significant test on proportions"),
fluidRow(
box(
title = "Significant test",
verbatimTextOutput("p.sig")
),
box(
title = "Settings",
numericInput("psig.n0", "Sample size of control:", 8700),
numericInput("psig.n1", "Sample size of variant:", 963),
numericInput("psig.e0", "Response size of control:", 375),
numericInput("psig.e1", "Response size of variant:", 34),
numericInput("psig.ci", "Confidence level", 0.95),
# Include clarifying text ----
helpText("Note: Test for equality of proportions between 2 groups")
#actionButton("calculate", "Calculate")
))),
# 3rd tab content
tabItem(tabName = "sig",
h2("Significant test on means"),
fluidRow(
box(
title = "Control group Settings",
numericInput("sig.n0", "Sample size of control:", 5000),
numericInput("sig.m0", "Mean value of control:", 0.42),
numericInput("sig.s0", "Standard deviation of control:", 0.43)
),
box(
title = "Variant group Settings",
numericInput("sig.n1", "Sample size of variant:", 5000),
numericInput("sig.m1", "Mean value of variant:", 0.42),
numericInput("sig.s1", "Standard deviation of variant:", 0.43),
numericInput("sig.dm", "H0 expected difference in means:", 0),
numericInput("sig.ci", "Confidence level", 0.95),
radioButtons("sig.equal.variance", label = "Is equal variance:",
choices = list("True" = TRUE, "False" = FALSE),
selected = FALSE),
# Include clarifying text ----
helpText("Note: Test for equality of means between 2 groups")
#actionButton("calculate", "Calculate")
),
box(
title = "Significant test",
verbatimTextOutput("sig")
)))
))
)
# Define server logic to summarize and view selected dataset ----
server <- function(input, output) {
output$pwr <- renderPrint({
# https://stats.idre.ucla.edu/r/dae/power-analysis-for-two-group-independent-sample-t-test/
d = abs(input$sample.m1 - input$sample.m0)/input$sample.std
pwr.t.test(d = d, power = input$sample.power, sig.level = input$sample.sig, type="two.sample")
})
output$p.sig <- renderPrint({
##' method 1: fisher exact test
print(
fisher.test(rbind(c(input$psig.e0, input$psig.n0-input$psig.e0), c(input$psig.e1, input$psig.n1-input$psig.e1)), conf.level = input$psig.ci)
)
##' method 2: Normal Approximation to Binomial
#p0.hat <- input$e0/input$n0
#p1.hat <- input$e1/input$n1
#p.hat <- (input$n0*p0.hat + input$n1*p1.hat)/(input$n0 + input$n1)
#z <- (p0.hat - p1.hat)/sqrt(p.hat*(1-p.hat)*(1/input$n0 + 1/input$n1))
#pnorm(z, lower.tail = F)
##' method 3: 2-sample test for equality of proportions
print(
prop.test(c(input$psig.e0, input$psig.e1), c(input$psig.n0, input$psig.n1), correct=FALSE, conf.level = 0.95)
)
##' method 4: Exact binomial test
# print(binom.test(x=input$e1, n=input$n1, p=input$e0/input$e0, alternative = "greater", conf.level = 0.95))
})
output$sig <- renderPrint({
# 2 sample t test
t.test.func(input$sig.m0, input$sig.m1, input$sig.s0, input$sig.s1, input$sig.n0, input$sig.n1, input$sig.dm, input$sig.equal.variance)
})
}
# Create Shiny app ----
shinyApp(ui, server)
|
#app
ui <- source("ui.R")
server <- source("server.R")
shinyApp(ui, server)
| /dashboard/app.R | no_license | benilak/Senior_Project | R | false | false | 76 | r | #app
ui <- source("ui.R")
server <- source("server.R")
shinyApp(ui, server)
|
#read in full data set and extract data for only 1/2/2007 and 2/2/2007
house<-read.table("C:/Users/davis450/Desktop/Coursera/household_power_consumption.txt", header = TRUE, sep=";", na.strings = "?")
house_dates<- house[house$Date %in% c("1/2/2007","2/2/2007"),]
#converting the date to R Date format
datetime <- strptime(paste(house_dates$Date, house_dates$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#plot code for plot2
global<- as.numeric(house_dates$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, global, type="l", xlab ="", ylab="Global Active power (kilowatts)")
dev.off()
| /plot2.R | no_license | NDavis72516/ExData_Plotting1 | R | false | false | 617 | r | #read in full data set and extract data for only 1/2/2007 and 2/2/2007
house<-read.table("C:/Users/davis450/Desktop/Coursera/household_power_consumption.txt", header = TRUE, sep=";", na.strings = "?")
house_dates<- house[house$Date %in% c("1/2/2007","2/2/2007"),]
#converting the date to R Date format
datetime <- strptime(paste(house_dates$Date, house_dates$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#plot code for plot2
global<- as.numeric(house_dates$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, global, type="l", xlab ="", ylab="Global Active power (kilowatts)")
dev.off()
|
setwd("E:\\Excelr Data\\R Codes\\Hyothesis Testing")
##### Normality Test##################
library(readxl)
library(xlsx)
library(openxlsx)
library(WriteXLS)
library(readr)
Faltoons <- read_csv("E:/Data Science/Assignments/Hypothesis Testing/Faltoons.csv")
View(Faltoons)
attach(Faltoons)
table1 <- table(Weekdays,Weekend)
table1
?prop.test
prop.test(x=c(66,47),n=c(233,167),conf.level = 0.95,correct = FALSE,alternative = "two.sided")
prop.test(x=c(66,47),n=c(233,167),conf.level = 0.95,correct = FALSE,alternative = "less")
chisq.test(table(Weekdays,Weekend))
| /Hypothesis/Faltoons.R | no_license | meghagowda3/datascience-Rcode-projects | R | false | false | 574 | r | setwd("E:\\Excelr Data\\R Codes\\Hyothesis Testing")
##### Normality Test##################
library(readxl)
library(xlsx)
library(openxlsx)
library(WriteXLS)
library(readr)
Faltoons <- read_csv("E:/Data Science/Assignments/Hypothesis Testing/Faltoons.csv")
View(Faltoons)
attach(Faltoons)
table1 <- table(Weekdays,Weekend)
table1
?prop.test
prop.test(x=c(66,47),n=c(233,167),conf.level = 0.95,correct = FALSE,alternative = "two.sided")
prop.test(x=c(66,47),n=c(233,167),conf.level = 0.95,correct = FALSE,alternative = "less")
chisq.test(table(Weekdays,Weekend))
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/sample.R
\name{lawn_sample}
\alias{lawn_sample}
\title{Return features from FeatureCollection at random}
\usage{
lawn_sample(features = NULL, n = 100, lint = FALSE)
}
\arguments{
\item{features}{A FeatureCollection}
\item{n}{(integer) Number of features to generate}
\item{lint}{(logical) Lint or not. Uses geojsonhint. Takes up increasing time
as the object to get linted increases in size, so probably use by
default for small objects, but not for large if you know they are good geojson
objects. Default: \code{FALSE}}
}
\value{
A \code{\link{data-FeatureCollection}}
}
\description{
Takes a \code{\link{data-FeatureCollection}} and returns a
\code{\link{data-FeatureCollection}} with given number of features at random
}
\examples{
lawn_sample(lawn_data$points_average, 1)
lawn_sample(lawn_data$points_average, 2)
lawn_sample(lawn_data$points_average, 3)
}
\seealso{
Other data functions: \code{\link{lawn_featurecollection}};
\code{\link{lawn_filter}}; \code{\link{lawn_linestring}};
\code{\link{lawn_point}}; \code{\link{lawn_polygon}};
\code{\link{lawn_random}}; \code{\link{lawn_remove}}
}
| /man/lawn_sample.Rd | permissive | carrillo/lawn | R | false | false | 1,193 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/sample.R
\name{lawn_sample}
\alias{lawn_sample}
\title{Return features from FeatureCollection at random}
\usage{
lawn_sample(features = NULL, n = 100, lint = FALSE)
}
\arguments{
\item{features}{A FeatureCollection}
\item{n}{(integer) Number of features to generate}
\item{lint}{(logical) Lint or not. Uses geojsonhint. Takes up increasing time
as the object to get linted increases in size, so probably use by
default for small objects, but not for large if you know they are good geojson
objects. Default: \code{FALSE}}
}
\value{
A \code{\link{data-FeatureCollection}}
}
\description{
Takes a \code{\link{data-FeatureCollection}} and returns a
\code{\link{data-FeatureCollection}} with given number of features at random
}
\examples{
lawn_sample(lawn_data$points_average, 1)
lawn_sample(lawn_data$points_average, 2)
lawn_sample(lawn_data$points_average, 3)
}
\seealso{
Other data functions: \code{\link{lawn_featurecollection}};
\code{\link{lawn_filter}}; \code{\link{lawn_linestring}};
\code{\link{lawn_point}}; \code{\link{lawn_polygon}};
\code{\link{lawn_random}}; \code{\link{lawn_remove}}
}
|
## Getting and Cleaning Data: Week 4 Project ##
## The script uses data from https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
## And aims to do all of the following:
## 1. Merges the training and the test sets to create on data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set.
## 4. Appropriately labels the data set with descriptive variables names.
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(reshape2)
library(tidyverse)
## Set working directory and download the data to local storage
setwd("H:/Documents/MDA/3_getting_and_cleaning_data/Project")
data_url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
data_file <- "UCI HAR Dataset.zip"
if(!file.exists(data_file)) {
# If the file does not exist already, then we will download the file
status <- download.file(data_url, data_file, method = "curl")
if (status != 0) {
stop("Could not download data")
}
}
## Unzip the files
unzip(data_file, setTimes = TRUE)
## Read the names of the features and activity labels
features <- read.table("UCI Har Dataset/features.txt", sep = " ", col.names = c("column_number", "feature"))
activity_labels <- read.table("UCI Har Dataset/activity_labels.txt", sep = " ", col.names = c("id", "activity"))
## Extract the features for mean and standard deviations only (Step 2.)
features_wanted <- grep(".*mean[(][)].*|.*std[(][)].*", features$feature)
# Stylistic preference to use "_" as a general separator, so converted here using tidyverse
features[, 2] <- features[, 2] %>% {gsub("-", "_", .)} %>% {gsub("[()]", "", .)} %>% tolower()
features_wanted_names <- features[features_wanted, 2]
## Load the dataset
train <- read.table("UCI HAR Dataset/train/X_train.txt")[features_wanted]
train_activities <- read.table("UCI HAR Dataset/train/Y_train.txt")
train_subjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(train_subjects, train_activities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[features_wanted]
test_activities <- read.table("UCI HAR Dataset/test/Y_test.txt")
test_subjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(test_subjects, test_activities, test)
## Merge the datasets together, turn first two columns into factors and add descriptive labels (Step 1., 3. and 4.)
data <- rbind(train, test)
colnames(data) <- c("subject", "activity", features_wanted_names)
data$activity <- factor(data$activity, levels = activity_labels[, 1], labels = activity_labels[, 2])
data$subject <- as.factor(data$subject)
## Melt and cast to obtain tidy data with the means and standard deviations (Step 5.)
melt_data <- melt(data, id = c("subject", "activity"))
recast_data <- dcast(melt_data, subject + activity ~ variable, mean)
## Write the tidy data
write.table(recast_data, "tidy.txt", row.names = FALSE)
## END ## | /run_analysis.R | no_license | oscarchan1996/getting_and_cleaning_data_project | R | false | false | 3,200 | r | ## Getting and Cleaning Data: Week 4 Project ##
## The script uses data from https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
## And aims to do all of the following:
## 1. Merges the training and the test sets to create on data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set.
## 4. Appropriately labels the data set with descriptive variables names.
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(reshape2)
library(tidyverse)
## Set working directory and download the data to local storage
setwd("H:/Documents/MDA/3_getting_and_cleaning_data/Project")
data_url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
data_file <- "UCI HAR Dataset.zip"
if(!file.exists(data_file)) {
# If the file does not exist already, then we will download the file
status <- download.file(data_url, data_file, method = "curl")
if (status != 0) {
stop("Could not download data")
}
}
## Unzip the files
unzip(data_file, setTimes = TRUE)
## Read the names of the features and activity labels
features <- read.table("UCI Har Dataset/features.txt", sep = " ", col.names = c("column_number", "feature"))
activity_labels <- read.table("UCI Har Dataset/activity_labels.txt", sep = " ", col.names = c("id", "activity"))
## Extract the features for mean and standard deviations only (Step 2.)
features_wanted <- grep(".*mean[(][)].*|.*std[(][)].*", features$feature)
# Stylistic preference to use "_" as a general separator, so converted here using tidyverse
features[, 2] <- features[, 2] %>% {gsub("-", "_", .)} %>% {gsub("[()]", "", .)} %>% tolower()
features_wanted_names <- features[features_wanted, 2]
## Load the dataset
train <- read.table("UCI HAR Dataset/train/X_train.txt")[features_wanted]
train_activities <- read.table("UCI HAR Dataset/train/Y_train.txt")
train_subjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(train_subjects, train_activities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[features_wanted]
test_activities <- read.table("UCI HAR Dataset/test/Y_test.txt")
test_subjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(test_subjects, test_activities, test)
## Merge the datasets together, turn first two columns into factors and add descriptive labels (Step 1., 3. and 4.)
data <- rbind(train, test)
colnames(data) <- c("subject", "activity", features_wanted_names)
data$activity <- factor(data$activity, levels = activity_labels[, 1], labels = activity_labels[, 2])
data$subject <- as.factor(data$subject)
## Melt and cast to obtain tidy data with the means and standard deviations (Step 5.)
melt_data <- melt(data, id = c("subject", "activity"))
recast_data <- dcast(melt_data, subject + activity ~ variable, mean)
## Write the tidy data
write.table(recast_data, "tidy.txt", row.names = FALSE)
## END ## |
#' make_plot() creates the Curriculum Pacing visualization.
#' Below are the arguments of the function
#'
#' @param student_step_data A tibble or data.frame that contains data from PSLC DataShop in either student-step or student-problem format
#' @param problem_hierarchy_order_data A tibble or data.frame that has two columns called "Problem Hierarchy" and "Problem Hierarchy Order" where "Problem Hierarchy" contains all of the unique Problem Hierarchy values from the DataShop file and "Problem Hierarchy Order" contains integers that order the hierarchy values in a sorted order.
#' @param time_scale_type Type of the time scale (Relative of Absolute)
#' @param time_scale_resolution Resolution of the time scale (Day, Week or Month)
#' @param min_time_unit Only used for relative time scale, minimum time unit for the data
#' @param max_time_unit Only used for relative time scale, maximum time unit for the data
#' @param min_datetime_unit Only used for absolute time scale, minimum time stamp for the data in YYYY-MM-DD HH:MM:SS format
#' @param max_datetime_unit Only used for absolute time scale, maximum time stamp for the data in YYYY-MM-DD HH:MM:SS format
#' @param plot_type Type of the plot, "Usage" or "Usage and performance". "Usage and performance" requires Corrects and Incorrects columns to be present in the data.
#'
#' @return A ggplot object that can be viewed or saved using ggsave()
#'
#'
make_plot <- function(student_step_data,
problem_hierarchy_order_data = NULL,
time_scale_type = c("Relative", "Absolute"),
time_scale_resolution = c("Day", "Week", "Month"),
min_time_unit = 1,
max_time_unit = 52,
min_datetime_unit = "1900-01-01 00:00:00",
max_datetime_unit = "3000-01-01 00:00:00",
plot_type = c("Usage", "Usage and performance")) {
suppressMessages(suppressWarnings(require(dplyr)))
suppressMessages(suppressWarnings(require(ggplot2)))
suppressMessages(suppressWarnings(require(stringr)))
suppressMessages(suppressWarnings(require(ggthemes)))
suppressMessages(suppressWarnings(require(gtools)))
suppressMessages(suppressWarnings(require(lubridate)))
#require(dplyr)
#require(ggplot2)
#require(stringr)
#require(ggthemes)
#require(gtools)
if(plot_type == "Usage and performance" ){
stopifnot( !all(is.na(student_step_data$Corrects)), !all(is.na(student_step_data$Incorrects)) )
} else {
student_step_data <- student_step_data %>%
mutate(
Corrects = NA_real_,
Incorrects = NA_real_
)
}
if (is.null(problem_hierarchy_order_data)) {
ord_df <- tibble(
`Problem Hierarchy` = unique(rdf$`Problem Hierarchy`)
) %>%
mutate(`Problem Hierarchy` = factor(`Problem Hierarchy`, gtools::mixedsort(`Problem Hierarchy`))) %>%
arrange(`Problem Hierarchy`) %>%
mutate(`Problem Hierarchy` = as.character(`Problem Hierarchy`))
} else {
ord_df <- problem_hierarchy_order_data %>%
arrange(`Problem Hierarchy Order`)
}
df <- student_step_data %>%
mutate(`Problem Hierarchy` = factor(`Problem Hierarchy`, ord_df$`Problem Hierarchy`))
if (time_scale_type == "Relative") {
min_dt <- date(min(floor_date(df$`Problem Start Time`, str_to_lower(time_scale_resolution))))
max_dt <- date(max(floor_date(df$`Problem Start Time`, str_to_lower(time_scale_resolution))))
dt_ord_df <- tibble(dt = seq.Date(min_dt, max_dt, by = str_to_lower(time_scale_resolution))) %>%
mutate(ord = row_number())
plt_rdf <- df %>%
mutate(date_binned = as_date(floor_date(`Problem Start Time`, str_to_lower(time_scale_resolution)))) %>%
inner_join(dt_ord_df, by = c("date_binned" = "dt")) %>%
group_by(`Anon Student Id`) %>%
mutate(min_ord = min(ord)) %>%
ungroup() %>%
group_by(`Anon Student Id`) %>%
mutate(rel_date_binned = ord - min_ord + 1)
plt_df <- plt_rdf %>%
rename(`Time Unit` = rel_date_binned) %>%
filter(`Time Unit` <= max_time_unit, `Time Unit` >= min_time_unit) %>%
group_by(`Time Unit`, `Problem Hierarchy`) %>%
summarise(n = n_distinct(`Anon Student Id`),
pct_correct = mean(Corrects / (Corrects + Incorrects) * 100)) %>%
mutate(pct_correct = if_else(is.nan(pct_correct), NA_real_, pct_correct)) %>%
ungroup() %>%
mutate(`Time Unit` = factor(`Time Unit`, min_time_unit:max_time_unit))
} else {
plt_rdf <- df %>%
filter(
as_datetime(`Problem Start Time`) <= as_datetime(max_datetime_unit) &
as_datetime(`Problem Start Time`) >= as_datetime(min_datetime_unit)
) %>%
mutate(date_binned = as_date(floor_date(`Problem Start Time`, str_to_lower(time_scale_resolution))))
plt_df <- plt_rdf %>%
rename(`Time Unit` = date_binned) %>%
group_by(`Time Unit`, `Problem Hierarchy`) %>%
summarise(n = n_distinct(`Anon Student Id`),
pct_correct = mean(Corrects / (Corrects + Incorrects) * 100)) %>%
mutate(pct_correct = if_else(is.nan(pct_correct), NA_real_, pct_correct)) %>%
ungroup()
}
if(plot_type == "Usage and performance" ){
plt <- plt_df %>%
ggplot(aes(`Time Unit`, `Problem Hierarchy`)) +
geom_point(aes(color = pct_correct, size = n)) +
scale_color_gradient2(low = "#d35400", mid = "#f1c40f", high = "#27ae60", midpoint = 50) +
theme_bw() +
labs(x = "Time Unit",
y = "Problem Hierarchy",
size = "Number of\nStudents",
color = "Percent\nCorrect",
title = "Curriculum Pacing usage and performance plot",
subtitle = "Usage and Performance (Number of Students and Percent Correct)") +
theme(text = element_text(size = 15.5),
axis.text.y = element_text(size = 9))
} else {
plt <- plt_df %>%
ggplot(aes(`Time Unit`, `Problem Hierarchy`)) +
geom_tile(aes(fill = n)) +
scale_fill_continuous(low = "gray90", high = "gray10") +
theme_bw() +
labs(x = "Time Unit",
y = "Problem Hierarchy",
fill = "Number of\nstudents",
title = "Curriculum pacing plot")
}
if (time_scale_type == "Relative") {
plt <- plt +
scale_x_discrete(breaks = function(x) {
y <- as.integer(x)
as.character(round(seq.int(min(y), max(y), length.out = 15)))
}, drop = FALSE)
}
return(plt)
} | /CurriculumPacing/program/make_plot.R | no_license | DannyWeitekamp/WorkflowComponents | R | false | false | 6,758 | r | #' make_plot() creates the Curriculum Pacing visualization.
#' Below are the arguments of the function
#'
#' @param student_step_data A tibble or data.frame that contains data from PSLC DataShop in either student-step or student-problem format
#' @param problem_hierarchy_order_data A tibble or data.frame that has two columns called "Problem Hierarchy" and "Problem Hierarchy Order" where "Problem Hierarchy" contains all of the unique Problem Hierarchy values from the DataShop file and "Problem Hierarchy Order" contains integers that order the hierarchy values in a sorted order.
#' @param time_scale_type Type of the time scale (Relative of Absolute)
#' @param time_scale_resolution Resolution of the time scale (Day, Week or Month)
#' @param min_time_unit Only used for relative time scale, minimum time unit for the data
#' @param max_time_unit Only used for relative time scale, maximum time unit for the data
#' @param min_datetime_unit Only used for absolute time scale, minimum time stamp for the data in YYYY-MM-DD HH:MM:SS format
#' @param max_datetime_unit Only used for absolute time scale, maximum time stamp for the data in YYYY-MM-DD HH:MM:SS format
#' @param plot_type Type of the plot, "Usage" or "Usage and performance". "Usage and performance" requires Corrects and Incorrects columns to be present in the data.
#'
#' @return A ggplot object that can be viewed or saved using ggsave()
#'
#'
make_plot <- function(student_step_data,
problem_hierarchy_order_data = NULL,
time_scale_type = c("Relative", "Absolute"),
time_scale_resolution = c("Day", "Week", "Month"),
min_time_unit = 1,
max_time_unit = 52,
min_datetime_unit = "1900-01-01 00:00:00",
max_datetime_unit = "3000-01-01 00:00:00",
plot_type = c("Usage", "Usage and performance")) {
suppressMessages(suppressWarnings(require(dplyr)))
suppressMessages(suppressWarnings(require(ggplot2)))
suppressMessages(suppressWarnings(require(stringr)))
suppressMessages(suppressWarnings(require(ggthemes)))
suppressMessages(suppressWarnings(require(gtools)))
suppressMessages(suppressWarnings(require(lubridate)))
#require(dplyr)
#require(ggplot2)
#require(stringr)
#require(ggthemes)
#require(gtools)
if(plot_type == "Usage and performance" ){
stopifnot( !all(is.na(student_step_data$Corrects)), !all(is.na(student_step_data$Incorrects)) )
} else {
student_step_data <- student_step_data %>%
mutate(
Corrects = NA_real_,
Incorrects = NA_real_
)
}
if (is.null(problem_hierarchy_order_data)) {
ord_df <- tibble(
`Problem Hierarchy` = unique(rdf$`Problem Hierarchy`)
) %>%
mutate(`Problem Hierarchy` = factor(`Problem Hierarchy`, gtools::mixedsort(`Problem Hierarchy`))) %>%
arrange(`Problem Hierarchy`) %>%
mutate(`Problem Hierarchy` = as.character(`Problem Hierarchy`))
} else {
ord_df <- problem_hierarchy_order_data %>%
arrange(`Problem Hierarchy Order`)
}
df <- student_step_data %>%
mutate(`Problem Hierarchy` = factor(`Problem Hierarchy`, ord_df$`Problem Hierarchy`))
if (time_scale_type == "Relative") {
min_dt <- date(min(floor_date(df$`Problem Start Time`, str_to_lower(time_scale_resolution))))
max_dt <- date(max(floor_date(df$`Problem Start Time`, str_to_lower(time_scale_resolution))))
dt_ord_df <- tibble(dt = seq.Date(min_dt, max_dt, by = str_to_lower(time_scale_resolution))) %>%
mutate(ord = row_number())
plt_rdf <- df %>%
mutate(date_binned = as_date(floor_date(`Problem Start Time`, str_to_lower(time_scale_resolution)))) %>%
inner_join(dt_ord_df, by = c("date_binned" = "dt")) %>%
group_by(`Anon Student Id`) %>%
mutate(min_ord = min(ord)) %>%
ungroup() %>%
group_by(`Anon Student Id`) %>%
mutate(rel_date_binned = ord - min_ord + 1)
plt_df <- plt_rdf %>%
rename(`Time Unit` = rel_date_binned) %>%
filter(`Time Unit` <= max_time_unit, `Time Unit` >= min_time_unit) %>%
group_by(`Time Unit`, `Problem Hierarchy`) %>%
summarise(n = n_distinct(`Anon Student Id`),
pct_correct = mean(Corrects / (Corrects + Incorrects) * 100)) %>%
mutate(pct_correct = if_else(is.nan(pct_correct), NA_real_, pct_correct)) %>%
ungroup() %>%
mutate(`Time Unit` = factor(`Time Unit`, min_time_unit:max_time_unit))
} else {
plt_rdf <- df %>%
filter(
as_datetime(`Problem Start Time`) <= as_datetime(max_datetime_unit) &
as_datetime(`Problem Start Time`) >= as_datetime(min_datetime_unit)
) %>%
mutate(date_binned = as_date(floor_date(`Problem Start Time`, str_to_lower(time_scale_resolution))))
plt_df <- plt_rdf %>%
rename(`Time Unit` = date_binned) %>%
group_by(`Time Unit`, `Problem Hierarchy`) %>%
summarise(n = n_distinct(`Anon Student Id`),
pct_correct = mean(Corrects / (Corrects + Incorrects) * 100)) %>%
mutate(pct_correct = if_else(is.nan(pct_correct), NA_real_, pct_correct)) %>%
ungroup()
}
if(plot_type == "Usage and performance" ){
plt <- plt_df %>%
ggplot(aes(`Time Unit`, `Problem Hierarchy`)) +
geom_point(aes(color = pct_correct, size = n)) +
scale_color_gradient2(low = "#d35400", mid = "#f1c40f", high = "#27ae60", midpoint = 50) +
theme_bw() +
labs(x = "Time Unit",
y = "Problem Hierarchy",
size = "Number of\nStudents",
color = "Percent\nCorrect",
title = "Curriculum Pacing usage and performance plot",
subtitle = "Usage and Performance (Number of Students and Percent Correct)") +
theme(text = element_text(size = 15.5),
axis.text.y = element_text(size = 9))
} else {
plt <- plt_df %>%
ggplot(aes(`Time Unit`, `Problem Hierarchy`)) +
geom_tile(aes(fill = n)) +
scale_fill_continuous(low = "gray90", high = "gray10") +
theme_bw() +
labs(x = "Time Unit",
y = "Problem Hierarchy",
fill = "Number of\nstudents",
title = "Curriculum pacing plot")
}
if (time_scale_type == "Relative") {
plt <- plt +
scale_x_discrete(breaks = function(x) {
y <- as.integer(x)
as.character(round(seq.int(min(y), max(y), length.out = 15)))
}, drop = FALSE)
}
return(plt)
} |
library(tidyverse)
library(data.table)
library(h2o)
source('code/tools.R')
source('code/plott.R')
# start h2o session
h2o.init(nthreads=-1, max_mem_size="52G")
train = h2o.importFile(path = 'data/group_b/train.csv')
valid = h2o.importFile(path = 'data/group_b/valid.csv')
# log the label
train['TrueAnswer_log'] = log(train['TrueAnswer'])
valid['TrueAnswer_log'] = log(valid['TrueAnswer'])
# set X and y
y_true = 'TrueAnswer'
y_log = 'TrueAnswer_log'
X = names(train)[c(3, 10:59, 63)]
##################################################################
######################## Random Forest ###########################
##################################################################
model_rf_log <- h2o.randomForest(
training_frame=train,
validation_frame=valid,
y=y_log,
x=X,
ntrees = 200,
max_depth = 20,
min_rows = 1,
stopping_rounds = 5,
stopping_metric = 'MSE',
stopping_tolerance = 0.0001
)
# performance check
summary(model_rf_log)
valid['y_pred_rf'] = exp(h2o.predict(model_rf_log, valid))
metrics(valid['y_pred_rf'], valid[y_true])
# plot
valid_dt_b = as.data.table(valid)
valid_dt$y_pred_rf = as.data.table(valid$y_pred_rf)
plotPred(valid_dt, group = 'GroupA-818', model = 'xgb', activity = FALSE)
##################################################################
########################### GBM ##################################
##################################################################
model_gbm <- h2o.gbm(training_frame=train,
model_id = 'model_gbm_log',
validation_frame=valid,
x = X,
y = y_log,
ntrees = 50,
max_depth = 20,
stopping_rounds = 5,
stopping_metric = 'MSE',
stopping_tolerance = 0.001)
# performance check
summary(model_gbm)
valid['y_pred_gbm'] = exp(h2o.predict(model_gbm, valid))
metrics(valid['y_pred_gbm'], valid[y_true])
valid_dt$y_pred_gbm = as.data.table(valid$y_pred_gbm)
plotPred(valid_dt, group = 'GroupA-804', model = 'gbm', activity = FALSE)
path <- "/home/dsun/Baseline/models_server/mojo/test"
mojo_destination <- h2o.download_mojo(model = model_gbm, path = path)
imported_model <- h2o.import_mojo('/home/dsun/Baseline/models_server/mojo/test/model_gbm_log.zip')
##################################################################
######################## XGBoost #################################
##################################################################
model_xgb <- h2o.xgboost(training_frame=train,
validation_frame=valid,
x = X,
y = y_log,
ntrees = 300,
max_depth = 10,
stopping_rounds = 5,
stopping_metric = 'MSE',
stopping_tolerance = 0.001,
verbose = FALSE)
# performance check
summary(model_xgb)
model = h2o.getModel(grid_xgb@model_ids[[1]])
valid['y_pred_xgb'] = (h2o.predict(model, valid))
metrics(valid['y_pred_xgb'], valid[y_true])
valid_dt$y_pred_xgb = as.data.table(valid$y_pred_xgb)
plotPred(valid_dt, group = 'GroupA-813', model = 'xgb', activity = TRUE)
################# Tuning the parameters #########################
hyper_params = list(
ntrees = c(200,300,400),
max_depth = seq(8,13,1),
learn_rate = seq(0.01, 0.2, 0.01),
sample_rate = seq(0.2,1,0.01),
col_sample_rate = seq(0.2,1,0.01),
col_sample_rate_per_tree = seq(0.2,1,0.01),
min_rows = seq(0,500,50),
reg_lambda = seq(0,1,0.1),
reg_alpha = seq(0,1,0.1)
)
search_criteria = list(
strategy = "RandomDiscrete",
max_runtime_secs = 3600,
max_models = 50,
seed = 1234,
stopping_rounds = 3,
stopping_metric = "MSE",
stopping_tolerance = 0.001
)
grid_xgb <- h2o.grid(
hyper_params = hyper_params,
search_criteria = search_criteria,
algorithm = "xgboost",
x = X,
y = y_true,
training_frame = train,
validation_frame = valid,
max_runtime_secs = 1800,
stopping_rounds = 5,
stopping_tolerance = 0.001,
stopping_metric = "MSE",
score_tree_interval = 10,
seed = 1234
)
##################################################################
######################## Deep Learning ###########################
##################################################################
model_deep <- h2o.deeplearning(
model_id = 'model_deep',
training_frame=train,
validation_frame=valid,
x=X,
y=y_true,
hidden=c(64,64),
variable_importances=T,
epochs=1000000, ## hopefully converges earlier...
score_validation_samples=10000, ## sample the validation dataset (faster)
stopping_rounds=15,
stopping_metric="MSE", ## could be "MSE","logloss","r2"
stopping_tolerance=0.0001,
verbose = FALSE
)
# performance check
summary(model_deep)
valid['y_pred_deep'] = ifelse((h2o.predict(model_deep, valid))>0,
(h2o.predict(model_deep, valid)),
0)
metrics(valid['y_pred_deep'], valid[y_true])
valid_dt$y_pred_deep = as.data.table(valid$y_pred_deep)
plotPred(valid_dt, group = 'GroupA-817', model = 'deep', activity = FALSE)
################# Tuning the parameters #########################
hyper_params = list(
activation=c("Rectifier","RectifierWithDropout"),
hidden=list(c(10,10), c(20,20),c(50,50), c(64,64), c(8,8,8), c(16,16,16), c(30,30,30),c(25,25,25,25)),
input_dropout_ratio=c(0,0.05,0.1,0.2),
l1=seq(0,0.5,0.01),
l2=seq(0,0.5,0.01),
rate = seq(0.001, 0.1, 0.001),
rate_annealing=c(1e-8,1e-7,1e-6)
)
search_criteria = list(
strategy = "RandomDiscrete",
max_runtime_secs = 18000,
max_models = 100,
seed = 1234,
stopping_rounds = 10,
stopping_metric = "MSE",
stopping_tolerance = 0.0001
)
grid_deep <- h2o.grid(
hyper_params = hyper_params,
search_criteria = search_criteria,
algorithm = "deeplearning",
x = X,
y = y_true,
training_frame = train,
validation_frame = valid,
max_runtime_secs = 5400,
stopping_rounds = 10,
stopping_tolerance = 0.0001,
stopping_metric = "MSE",
seed = 1234
)
#####################################################################
########################### Ensemble ################################
#####################################################################
#### Average ####
a = (valid_dt$y_pred_xgb+valid_dt$y_pred_rf+valid_dt$y_pred_deep+valid_dt$y_pred_gbm)/4
valid_dt$y_pred_avg = a
metrics(valid_dt$y_pred_avg, valid_dt$TrueAnswer)
#### Stack #####
train['y_pred_rf'] = exp(h2o.predict(model_rf_log, train))
train['y_pred_gbm'] = exp(h2o.predict(model_gbm, train))
train['y_pred_xgb'] = (h2o.predict(model_xgb, train))
train['y_pred_deep'] = (h2o.predict(model_deep, train))
# check
metrics(train['y_pred_rf'], train[y_true])
# model
X_ensemble = names(train)[67:70]
model_glm_ensemble = h2o.glm(
model_id="model_glm_ensemble",
training_frame=train,
validation_frame=valid,
y=y_true,
x=X_ensemble,
family = 'gaussian'
)
# performance check
summary(model_glm_ensemble)
valid['y_pred_stack'] = (h2o.predict(model_glm_ensemble, valid))
metrics(valid['y_pred_stack'], valid[y_true])
#########################################################################
###################### Save and Load ####################################
#########################################################################
# Save the model
path <- h2o.saveModel(model, path="models_server/separate_models/group_b", force=TRUE)
model <- h2o.import_mojo('/home/dsun/Baseline/models_server/mojo/DeepLearning_grid_1_AutoML_20190528_031324_model_54.zip')
summary(model)
pred = h2o.mojo_predict_csv(
input_csv_path = 'data/group_a/valid.csv',
mojo_zip_path = 'models_server/mojo/DeepLearning_grid_1_AutoML_20190528_031324_model_54.zip',
verbose = T
)
valid['y_pred'] = as.h2o(pred)
metrics(valid['y_pred'], valid['TrueAnswer'])
valid_dt = as.data.table(valid)
names(valid_dt)[68] = 'y_pred_mike'
plotPred(valid_dt, group = 'GroupA-841', model = 'mike', activity = FALSE)
model <- h2o.loadModel('models_server/group_a/model_xgb')
summary(model)
valid['y_pred_xgb'] = exp(h2o.predict(model, valid))
metrics(valid['y_pred_xgb'], valid['TrueAnswer'])
valid_dt = as.data.table(valid)
plotPred(valid_dt, group = 'GroupA-926', model = 'xgb', activity = FALSE)
| /code/separate_models/group_b.R | no_license | Dean-Sun/Baseline | R | false | false | 8,496 | r | library(tidyverse)
library(data.table)
library(h2o)
source('code/tools.R')
source('code/plott.R')
# start h2o session
h2o.init(nthreads=-1, max_mem_size="52G")
train = h2o.importFile(path = 'data/group_b/train.csv')
valid = h2o.importFile(path = 'data/group_b/valid.csv')
# log the label
train['TrueAnswer_log'] = log(train['TrueAnswer'])
valid['TrueAnswer_log'] = log(valid['TrueAnswer'])
# set X and y
y_true = 'TrueAnswer'
y_log = 'TrueAnswer_log'
X = names(train)[c(3, 10:59, 63)]
##################################################################
######################## Random Forest ###########################
##################################################################
model_rf_log <- h2o.randomForest(
training_frame=train,
validation_frame=valid,
y=y_log,
x=X,
ntrees = 200,
max_depth = 20,
min_rows = 1,
stopping_rounds = 5,
stopping_metric = 'MSE',
stopping_tolerance = 0.0001
)
# performance check
summary(model_rf_log)
valid['y_pred_rf'] = exp(h2o.predict(model_rf_log, valid))
metrics(valid['y_pred_rf'], valid[y_true])
# plot
valid_dt_b = as.data.table(valid)
valid_dt$y_pred_rf = as.data.table(valid$y_pred_rf)
plotPred(valid_dt, group = 'GroupA-818', model = 'xgb', activity = FALSE)
##################################################################
########################### GBM ##################################
##################################################################
model_gbm <- h2o.gbm(training_frame=train,
model_id = 'model_gbm_log',
validation_frame=valid,
x = X,
y = y_log,
ntrees = 50,
max_depth = 20,
stopping_rounds = 5,
stopping_metric = 'MSE',
stopping_tolerance = 0.001)
# performance check
summary(model_gbm)
valid['y_pred_gbm'] = exp(h2o.predict(model_gbm, valid))
metrics(valid['y_pred_gbm'], valid[y_true])
valid_dt$y_pred_gbm = as.data.table(valid$y_pred_gbm)
plotPred(valid_dt, group = 'GroupA-804', model = 'gbm', activity = FALSE)
path <- "/home/dsun/Baseline/models_server/mojo/test"
mojo_destination <- h2o.download_mojo(model = model_gbm, path = path)
imported_model <- h2o.import_mojo('/home/dsun/Baseline/models_server/mojo/test/model_gbm_log.zip')
##################################################################
######################## XGBoost #################################
##################################################################
model_xgb <- h2o.xgboost(training_frame=train,
validation_frame=valid,
x = X,
y = y_log,
ntrees = 300,
max_depth = 10,
stopping_rounds = 5,
stopping_metric = 'MSE',
stopping_tolerance = 0.001,
verbose = FALSE)
# performance check
summary(model_xgb)
model = h2o.getModel(grid_xgb@model_ids[[1]])
valid['y_pred_xgb'] = (h2o.predict(model, valid))
metrics(valid['y_pred_xgb'], valid[y_true])
valid_dt$y_pred_xgb = as.data.table(valid$y_pred_xgb)
plotPred(valid_dt, group = 'GroupA-813', model = 'xgb', activity = TRUE)
################# Tuning the parameters #########################
hyper_params = list(
ntrees = c(200,300,400),
max_depth = seq(8,13,1),
learn_rate = seq(0.01, 0.2, 0.01),
sample_rate = seq(0.2,1,0.01),
col_sample_rate = seq(0.2,1,0.01),
col_sample_rate_per_tree = seq(0.2,1,0.01),
min_rows = seq(0,500,50),
reg_lambda = seq(0,1,0.1),
reg_alpha = seq(0,1,0.1)
)
search_criteria = list(
strategy = "RandomDiscrete",
max_runtime_secs = 3600,
max_models = 50,
seed = 1234,
stopping_rounds = 3,
stopping_metric = "MSE",
stopping_tolerance = 0.001
)
grid_xgb <- h2o.grid(
hyper_params = hyper_params,
search_criteria = search_criteria,
algorithm = "xgboost",
x = X,
y = y_true,
training_frame = train,
validation_frame = valid,
max_runtime_secs = 1800,
stopping_rounds = 5,
stopping_tolerance = 0.001,
stopping_metric = "MSE",
score_tree_interval = 10,
seed = 1234
)
##################################################################
######################## Deep Learning ###########################
##################################################################
model_deep <- h2o.deeplearning(
model_id = 'model_deep',
training_frame=train,
validation_frame=valid,
x=X,
y=y_true,
hidden=c(64,64),
variable_importances=T,
epochs=1000000, ## hopefully converges earlier...
score_validation_samples=10000, ## sample the validation dataset (faster)
stopping_rounds=15,
stopping_metric="MSE", ## could be "MSE","logloss","r2"
stopping_tolerance=0.0001,
verbose = FALSE
)
# performance check
summary(model_deep)
valid['y_pred_deep'] = ifelse((h2o.predict(model_deep, valid))>0,
(h2o.predict(model_deep, valid)),
0)
metrics(valid['y_pred_deep'], valid[y_true])
valid_dt$y_pred_deep = as.data.table(valid$y_pred_deep)
plotPred(valid_dt, group = 'GroupA-817', model = 'deep', activity = FALSE)
################# Tuning the parameters #########################
hyper_params = list(
activation=c("Rectifier","RectifierWithDropout"),
hidden=list(c(10,10), c(20,20),c(50,50), c(64,64), c(8,8,8), c(16,16,16), c(30,30,30),c(25,25,25,25)),
input_dropout_ratio=c(0,0.05,0.1,0.2),
l1=seq(0,0.5,0.01),
l2=seq(0,0.5,0.01),
rate = seq(0.001, 0.1, 0.001),
rate_annealing=c(1e-8,1e-7,1e-6)
)
search_criteria = list(
strategy = "RandomDiscrete",
max_runtime_secs = 18000,
max_models = 100,
seed = 1234,
stopping_rounds = 10,
stopping_metric = "MSE",
stopping_tolerance = 0.0001
)
grid_deep <- h2o.grid(
hyper_params = hyper_params,
search_criteria = search_criteria,
algorithm = "deeplearning",
x = X,
y = y_true,
training_frame = train,
validation_frame = valid,
max_runtime_secs = 5400,
stopping_rounds = 10,
stopping_tolerance = 0.0001,
stopping_metric = "MSE",
seed = 1234
)
#####################################################################
########################### Ensemble ################################
#####################################################################
#### Average ####
a = (valid_dt$y_pred_xgb+valid_dt$y_pred_rf+valid_dt$y_pred_deep+valid_dt$y_pred_gbm)/4
valid_dt$y_pred_avg = a
metrics(valid_dt$y_pred_avg, valid_dt$TrueAnswer)
#### Stack #####
train['y_pred_rf'] = exp(h2o.predict(model_rf_log, train))
train['y_pred_gbm'] = exp(h2o.predict(model_gbm, train))
train['y_pred_xgb'] = (h2o.predict(model_xgb, train))
train['y_pred_deep'] = (h2o.predict(model_deep, train))
# check
metrics(train['y_pred_rf'], train[y_true])
# model
X_ensemble = names(train)[67:70]
model_glm_ensemble = h2o.glm(
model_id="model_glm_ensemble",
training_frame=train,
validation_frame=valid,
y=y_true,
x=X_ensemble,
family = 'gaussian'
)
# performance check
summary(model_glm_ensemble)
valid['y_pred_stack'] = (h2o.predict(model_glm_ensemble, valid))
metrics(valid['y_pred_stack'], valid[y_true])
#########################################################################
###################### Save and Load ####################################
#########################################################################
# Save the model
path <- h2o.saveModel(model, path="models_server/separate_models/group_b", force=TRUE)
model <- h2o.import_mojo('/home/dsun/Baseline/models_server/mojo/DeepLearning_grid_1_AutoML_20190528_031324_model_54.zip')
summary(model)
pred = h2o.mojo_predict_csv(
input_csv_path = 'data/group_a/valid.csv',
mojo_zip_path = 'models_server/mojo/DeepLearning_grid_1_AutoML_20190528_031324_model_54.zip',
verbose = T
)
valid['y_pred'] = as.h2o(pred)
metrics(valid['y_pred'], valid['TrueAnswer'])
valid_dt = as.data.table(valid)
names(valid_dt)[68] = 'y_pred_mike'
plotPred(valid_dt, group = 'GroupA-841', model = 'mike', activity = FALSE)
model <- h2o.loadModel('models_server/group_a/model_xgb')
summary(model)
valid['y_pred_xgb'] = exp(h2o.predict(model, valid))
metrics(valid['y_pred_xgb'], valid['TrueAnswer'])
valid_dt = as.data.table(valid)
plotPred(valid_dt, group = 'GroupA-926', model = 'xgb', activity = FALSE)
|
#' Access to toy examples bundled in this package
#'
#' Returns the paths to all available toy examples, or to a specific toy
#' example. Load via [readRDS()].
#'
#' @param name Name of the example, default: return all
#' @return A named vector of file system paths.
#'
#' @export
#' @importFrom stats setNames
#' @examples
#' toy_example()
#'
#' # Load example with results from Ye et al. (2009)
#' readRDS(toy_example("Tiny"))
toy_example <- function(name = NULL) {
root <- system.file("extdata", package = "mlfit")
if (is.null(name)) {
name <- dir(root)
name <- gsub("[.]rds$", "", name)
}
path <- normalizePath(file.path(root, paste0(name, ".rds")), mustWork = TRUE)
setNames(path, name)
}
| /R/toy.R | no_license | cran/mlfit | R | false | false | 739 | r | #' Access to toy examples bundled in this package
#'
#' Returns the paths to all available toy examples, or to a specific toy
#' example. Load via [readRDS()].
#'
#' @param name Name of the example, default: return all
#' @return A named vector of file system paths.
#'
#' @export
#' @importFrom stats setNames
#' @examples
#' toy_example()
#'
#' # Load example with results from Ye et al. (2009)
#' readRDS(toy_example("Tiny"))
toy_example <- function(name = NULL) {
root <- system.file("extdata", package = "mlfit")
if (is.null(name)) {
name <- dir(root)
name <- gsub("[.]rds$", "", name)
}
path <- normalizePath(file.path(root, paste0(name, ".rds")), mustWork = TRUE)
setNames(path, name)
}
|
#if(!require("magick"))
#{
# install.packages("magick",repos= "https://mirrors.nics.utk.edu/cran/",
# dependencies=TRUE,INSTALL_opts = c('--no-lock'));
#}
#library("magick");
x=4+3;
y=FALSE;
z=32;
print('check this');
| /Hwk1/test.R | no_license | apangia/MATH988Test | R | false | false | 237 | r | #if(!require("magick"))
#{
# install.packages("magick",repos= "https://mirrors.nics.utk.edu/cran/",
# dependencies=TRUE,INSTALL_opts = c('--no-lock'));
#}
#library("magick");
x=4+3;
y=FALSE;
z=32;
print('check this');
|
# --------------------------------------------------------------------------- #
# #
# MODULE DE POUR INTERROGATION DES META-DONNEES #
# & GENERATION DICTIONNAIRE DE VARIABLE #
# #
# SHINY APP #
# #
# --------------------------------------------------------------------------- #
# Chargement des packages
library(shiny)
library(RPostgres)
library(DBI)
library(markdown)
library(sqldf)
library(tidyr)
library(ggplot2)
library(dplyr)
library(forcats)
library(openxlsx)
library(DT)
library(shinythemes)
library(shinyjs)
library(RColorBrewer)
library(stringr)
# Ouverture en lecture
db <- 'bdsig'
host_db <- 'postsig'
db_port <- '5434'
db_user <- 'sig_consult'
db_password <- 'lecture'
con <- dbConnect(RPostgres::Postgres(), dbname = db, host=host_db,
port=db_port, user=db_user, password=db_password)
# Ouverture en Ecriture
db <- 'bdsig'
host_db <- 'postsig'
db_port <- '5434'
db_user <- 'sig'
db_password <- 'tetelle'
writing <- dbConnect(RPostgres::Postgres(), dbname = db, host=host_db,
port=db_port, user=db_user, password=db_password)
# ------------------------ CREATION SHINY APP ------------------------------- #
# --- PARTIE UI ------------------------------------
ui <- navbarPage("SIG - Mรฉta-Donnรฉes",windowTitle = "MรฉtaDonnรฉes SIG", collapsible = TRUE,
theme = shinytheme("cosmo"),
# Premier onglet supรฉrieur
tabPanel(
"Interroger les mรฉta-donnรฉes",
# Titre du panel Nยฐ1
titlePanel("Module de d'interrogation des variables en base"),hr(),
# Crรฉation double fenรชtre (sidebar + mainpanel)
sidebarLayout(
# Champ latรฉral gauche (Sidebar)
sidebarPanel(
fluidRow(
column(
width = 6,
# Tous les schรฉmas
radioButtons(
inputId = "select_schema",
label = "Schรฉma de donnรฉes",
inline = FALSE,
choices = c("Tous les schรฉmas", "Un ou plusieurs schรฉma(s)")
),
# sรฉlection du shรฉma
conditionalPanel(
condition = "input.select_schema == 'Un ou plusieurs schรฉma(s)'",
selectInput(
"schema_bdd",
label = "Sรฉlection de schรฉma(s) de donnรฉes",
multiple = TRUE,
choices = dbGetQuery(con, "SELECT DISTINCT table_schema FROM information_schema.columns")
)
),
),
column(
width = 6,
# Toutes les tables du schรฉma
conditionalPanel(
condition = "input.select_schema == 'Un ou plusieurs schรฉma(s)'",
radioButtons(
"select_table",
label = "Table de donnรฉes",
choices = c("Toutes les tables du schรฉma", "Une ou plusieurs table(s)")
)
),
# Sรฉlection conditionnel de la table parmi les schmรฉa sรฉlectionnรฉe (partie en server)
conditionalPanel(
condition = "input.select_table == 'Une ou plusieurs table(s)'",
selectInput(
"table_bdd",
label = "Sรฉlection de table(s) de donnรฉes",
multiple = TRUE,
choices = dbGetQuery(con,
"SELECT DISTINCT table_name FROM information_schema.columns"
)
)
),
)
),
# Sรฉlection conditionnelle des variables parmi les schรฉma et tables choisies + export dictionnaire de variables
hr(),
tags$b("Gรฉnรฉrer un dictionnaire des variables"),
br(),
em("Pour permettre la sรฉlection choisir au moins un schรฉma et une table."),
br(),
conditionalPanel(condition = "input.select_table == 'Une ou plusieurs table(s)'",
br(),
em("Les variables sont choisies parmi les tables et schรฉmas sรฉlectionnรฉs. Elles seront inclues dans le dictionnaire de variables gรฉnรฉrรฉ au format .xlsx et tรฉlรฉchargeable ci-dessous."),
br(),br(),
selectInput("var_bdd", label = "Variables d'intรฉrรชt", choices = dbGetQuery(writing, "SELECT DISTINCT column_name FROM information_schema.columns"), multiple = TRUE),
downloadButton("download_dic_var", label = "Gรฉnรฉrer le dictionnaire (.xlsx)"))
),
# Fenรชtre principale (mainpanel)
mainPanel(
# Affichage du commentaire de la table
fluidRow(
tags$b("Commentaires renseignรฉs pour cette table de donnรฉes : "),
br(),
tags$em("Affiche <NA> si aucun commentaire n'a รฉtรฉ renseignรฉ"),
br()
),
fluidRow(width = 8, offset = 2, align = "center",
h4(textOutput("com_table")),
hr()
),
# Visualisation des attributs
dataTableOutput("visualisation")
)
)
),
tabPanel(
"Aide ร la saisie des mรฉta-donnรฉes",
# Titre de la fenรชtre
titlePanel("Renseigner des libellรฉs depuis un fichier Excel"), hr(),
# Crรฉation d'un affichage avec menu latรฉral gauche
sidebarLayout(
# Menu de gauche
sidebarPanel(
# Titre
tags$h4(tags$b("Importer un fichier de libellรฉs")),
hr(),
# Type de fichier considรฉrรฉ
radioButtons("type_file", label = "Type de fichier", choices = c('Fichier Excel', 'Autre fichier')),
# Paramรจtres gรฉnรฉraux
tags$b("Libellรฉ des colonnes"),
checkboxInput(inputId = 'header', label = 'Libellรฉs en premiรจre ligne', value = FALSE),
# Liste de paramรจtres (autre fichier)
conditionalPanel(condition = "input.type_file == 'Autre fichier'",radioButtons(inputId = 'sep', label = 'Sรฉparateur de donnรฉes', choices = c("Virgule"=',',"Point-Virgule"=';',"Tabulation"='\t', "Espace"=''), selected = ';')),
# Module de chargement des donnรฉes - FICHIER EXCEL
fluidRow(
column(width = 9,
conditionalPanel(condition = "input.type_file == 'Fichier Excel'", fileInput("file", ""))
),
column(width = 3,
conditionalPanel(condition = "input.type_file == 'Fichier Excel'", numericInput("sheet_num", label = "Nยฐ feuille", value = 1, min = 1)),
)
),
# Module de chargement de donnรฉes - autre fichier
conditionalPanel(condition = "input.type_file == 'Autre fichier'", fileInput("file", "")),
# Sรฉlection des colonnes pour correspondance variable / libellรฉ
hr(),
tags$h4(tags$b("Ajout dans la table d'attributs")), hr(),
column(width = 5,
selectInput("var_id", label = "Colonne des variables", choices = "", multiple = FALSE),
selectInput("label_id", label = "Colonne des libellรฉs", choices = "", multiple = FALSE)
),
column(width = 2),
column(width = 5,
selectInput("schema_id", label = "Schรฉma de rรฉfรฉrence", choices = dbGetQuery(con, "SELECT DISTINCT table_schema FROM information_schema.columns"), multiple = FALSE),
selectInput("table_id", label = "Table de rรฉfรฉrence", choices = "", multiple = FALSE)
),
hr(),
br(),
actionButton("add", label = "Metre ร jour la table d'attributs"),
conditionalPanel(
condition = "input.var_id != ''",
hr(),
tags$h4(tags$b("Dรฉtection des problรจmes de correspondance")),
em("Variable issue du fichier Excel n'exitant pas dans la base de donnรฉes du SIG. Si aucune ligne n'est prรฉsentรฉe, toutes les variables du fichier Excel existent dans la base de donnรฉes."),
DTOutput("tab_verif")
)
),
# Fenรชtre principale (droite)
mainPanel(
# Affichage de la table importรฉe
tableOutput("table")
)
)
)
)
# --- PARTIE SERVER -------------------------------
server <- function(input, output, session){
# SELECTION DE TABLE CONDITIONNELLEMENT au schรฉma choisi
observe({
x <- dbGetQuery(writing, "SELECT table_name, table_schema from information_schema.columns") %>%
filter(table_schema %in% (input$schema_bdd))
# Can also set the label and select items
updateSelectInput(session, "table_bdd",
label = "Sรฉlection de table(s) de donnรฉes",
choices = x$table_name
)
})
# SELECTION DES VARIABLES CONDITIONNELLEMENT AUX SCHEMAS ET TABLES CHOISI(E)S
observe({
x <- dbGetQuery(writing, "SELECT table_name, table_schema, column_name from information_schema.columns") %>%
filter(table_schema %in% (input$schema_bdd), table_name %in% (input$table_bdd))
# Can also set the label and select items
updateSelectInput(session, "var_bdd",
label = "Variables d'intรฉrรชt",
choices = x$column_name
)
})
# Crรฉation de la table d'attributs
tab_attrib <- reactive({
# Rรฉcupรฉration de la table d'attributs
temp <- dbGetQuery(writing, "SELECT col_description((table_schema||'.'||table_name)::regclass::oid, ordinal_position) as column_comment, * from information_schema.columns")
# Sรฉlection de schรฉma (si option choisie)
if(input$select_schema == "Un ou plusieurs schรฉma(s)"){
temp <- temp %>% filter(table_schema %in% (input$schema_bdd))
# Selection de table (si option chosie)
if(input$select_table == "Une ou plusieurs table(s)"){
temp <- temp %>% filter(table_name %in% (input$table_bdd))
}
}
# Libellรฉ plus propres + ordre des variables
temp %>%
select(column_name, column_comment, table_name, table_schema) %>%
rename("Schรฉma" = table_schema, "Table" = table_name, "Variable (nom)" = column_name, "libellรฉ / Descrptif court" = column_comment)
})
# Liste de tables pour le schรฉma sรฉlectionnรฉ
output$visualisation <- renderDataTable(
DT::datatable(
tab_attrib(),
options = list(pageLength = 15, scrollX = TRUE),
filter = "top",
rownames = FALSE)
)
# Commentaire de la table (premiรจre table du premier schรฉma)
com_obj <- reactive({
if(length(input$schema_bdd) == 1 & length(input$table_bdd) == 1){
# Crรฉation requรชte pour la table concernรฉe
req_com_table <- paste0("SELECT obj_description('", input$schema_bdd, ".", input$table_bdd, "'::regclass)")
# Rรฉcupรฉration du champ commentaire sous forme de table
com <- dbGetQuery(con, req_com_table[1])
# Sortie du texte
# cat(com[1,1])
com[1,1]
}else{
if(length(input$schema_bdd) < 1 | length(input$table_bdd) < 1){
com <- "--- Sรฉlectionner un schรฉma et une table de donnรฉes ---"
}else{
com <- "--- Ne sรฉlectionner qu'un seul schรฉma et une seule table pour afficher le commentaire ---"
}
}
})
# Sortie du texte
output$com_table <- renderText(com_obj())
# Sรฉlecteur des variables d'intรฉrรชt (pour export)
tab_dictionnaire <- reactive({
# Ensemble des attributs + sรฉlection des schรฉma et tables d'intรฉrรชt
dbGetQuery(writing, "SELECT col_description((table_schema||'.'||table_name)::regclass::oid, ordinal_position) as column_comment, * from information_schema.columns") %>%
filter(table_schema %in% (input$schema_bdd), table_name %in% (input$table_bdd), column_name %in% (input$var_bdd)) %>%
select(column_name, column_comment, table_name, table_schema) %>%
rename("Variable" = column_name, "Libellรฉ / Descriptif" = column_comment, "Table de donnรฉes" = table_name, "Shรฉma de donnรฉes" = table_schema)
})
# Sortie du dictionnaire de variables
output$download_dic_var <- downloadHandler(
filename = paste("Dictionnaire_variables", Sys.Date(),".xlsx", sep=""),
content = function(file) {
write.xlsx(tab_dictionnaire(), file, rowNames = FALSE, sep=";")
}
)
# IMPORT FICHIER - Transformation du fichier en jeu de donnรฉes
data <- reactive({
file1 <- input$file
if(is.null(file1)){return()
}else{
if(input$type_file == 'Fichier Excel'){
# if(is.null(input$sheet_names)){return()}else{read.xlsx(xlsxFile = file1$datapath, sheet = input$sheet_names, colNames = input$header)}
read.xlsx(xlsxFile = file1$datapath, sheet = input$sheet_num, colNames = input$header)
}else{
read.table(file=file1$datapath, sep=input$sep, header = input$header, stringsAsFactors = input$stringAsFactors)
}
}
})
# IMPORT FICHIER - Affichage table de donnรฉes
output$table <- renderTable({
if(is.null(data())){return ()}
data()
})
# IMPORT - Sรฉlectionner le libellรฉ de colonne "variable"
observe({
x <- data()
y <- names(x)
# Can also set the label and select items
updateSelectInput(session, "var_id",
label = "Colonne des variables",
choices = y
)
})
# IMPORT - Sรฉlectionner le libellรฉ de colonne "variable"
observe({
x <- data()
y <- names(x)
# Can also set the label and select items
updateSelectInput(session, "label_id",
label = "Colonne des libellรฉs",
choices = y
)
})
# IMPORT - Choix de la table (selon schรฉma sรฉlectionnรฉ)
observe({
x <- dbGetQuery(writing, "SELECT table_name, table_schema from information_schema.columns") %>%
filter(table_schema %in% (input$schema_id))
# Can also set the label and select items
updateSelectInput(session, "table_id",
label = "Table de rรฉfรฉrence",
choices = x$table_name
)
})
# Lancement des requรชtes SQL ร l'activation du boutton
observeEvent(input$add, {
# Rรฉcupรฉration de la table de donnรฉes
data <- data()
# Liste des variables en base
liste_var_bd <- data.frame(var = names(dbGetQuery(con, paste0("SELECT * FROM ", input$schema_id, ".", input$table_id))))
# Crรฉation d'une table avec deux variables (variable et libellรฉ)
temp <- data %>% select(input$var_id, input$label_id)
# Remplacement des guillemets
temp[,2] <- str_replace_all(temp[,2], "'", "-")
temp[,2] <- str_replace_all(temp[,2], '"', "-")
# Crรฉation de la requรชte
temp$requete <- paste0("COMMENT ON COLUMN ", input$schema_id, ".", input$table_id, ".", temp[,1], " IS '", temp[,2], "'")
# Conservation uniquement des variables existant en BDD
temp$var <- temp[,1]
temp2 <- inner_join(temp, liste_var_bd, by = "var")
# Initialisation du compteur
i <- 1
# Lancement de la boucle sur toutes les lignes
while(i <= nrow(temp2)){
dbGetQuery(writing, temp2[i,"requete"])
i <- i+1
}
# affichage notification
showNotification("Intรฉgration des mรฉta-donnรฉes rรฉalisรฉe", type = "message")
})
output$tab_verif <- renderDataTable(options = list(paging = FALSE, searching = FALSE), {
# Rรฉcupรฉration de la table de donnรฉes
data <- data()
# LIste des variables en base
liste_var_bd <- data.frame(var = names(dbGetQuery(con, paste0("SELECT * FROM ", input$schema_id, ".", input$table_id))))
liste_var_bd$dispo <- "Existe en BDSIG"
# Crรฉation d'une table avec deux variables (variable et libellรฉ)
temp <- data %>%
select(input$var_id, input$label_id)
temp$var <- temp[,1]
temp$indispo <- "N'existe pas dans la table choisie de la BDSIG (sera retirรฉe lors de l'intรฉgration des libellรฉs)"
left_join(temp, liste_var_bd, by = "var") %>%
filter(is.na(dispo)) %>%
select(var, indispo) %>%
rename(Variable = var, "Disponibilitรฉ en BDD" = indispo)
})
}
# --- CREATION APP -------------------------------
shinyApp(ui = ui, server = server)
| /App.R | no_license | Clement-Bader-bzh/ShinyApp---MetaDataSIG | R | false | false | 19,749 | r | # --------------------------------------------------------------------------- #
# #
# MODULE DE POUR INTERROGATION DES META-DONNEES #
# & GENERATION DICTIONNAIRE DE VARIABLE #
# #
# SHINY APP #
# #
# --------------------------------------------------------------------------- #
# Chargement des packages
library(shiny)
library(RPostgres)
library(DBI)
library(markdown)
library(sqldf)
library(tidyr)
library(ggplot2)
library(dplyr)
library(forcats)
library(openxlsx)
library(DT)
library(shinythemes)
library(shinyjs)
library(RColorBrewer)
library(stringr)
# Ouverture en lecture
db <- 'bdsig'
host_db <- 'postsig'
db_port <- '5434'
db_user <- 'sig_consult'
db_password <- 'lecture'
con <- dbConnect(RPostgres::Postgres(), dbname = db, host=host_db,
port=db_port, user=db_user, password=db_password)
# Ouverture en Ecriture
db <- 'bdsig'
host_db <- 'postsig'
db_port <- '5434'
db_user <- 'sig'
db_password <- 'tetelle'
writing <- dbConnect(RPostgres::Postgres(), dbname = db, host=host_db,
port=db_port, user=db_user, password=db_password)
# ------------------------ CREATION SHINY APP ------------------------------- #
# --- PARTIE UI ------------------------------------
ui <- navbarPage("SIG - Mรฉta-Donnรฉes",windowTitle = "MรฉtaDonnรฉes SIG", collapsible = TRUE,
theme = shinytheme("cosmo"),
# Premier onglet supรฉrieur
tabPanel(
"Interroger les mรฉta-donnรฉes",
# Titre du panel Nยฐ1
titlePanel("Module de d'interrogation des variables en base"),hr(),
# Crรฉation double fenรชtre (sidebar + mainpanel)
sidebarLayout(
# Champ latรฉral gauche (Sidebar)
sidebarPanel(
fluidRow(
column(
width = 6,
# Tous les schรฉmas
radioButtons(
inputId = "select_schema",
label = "Schรฉma de donnรฉes",
inline = FALSE,
choices = c("Tous les schรฉmas", "Un ou plusieurs schรฉma(s)")
),
# sรฉlection du shรฉma
conditionalPanel(
condition = "input.select_schema == 'Un ou plusieurs schรฉma(s)'",
selectInput(
"schema_bdd",
label = "Sรฉlection de schรฉma(s) de donnรฉes",
multiple = TRUE,
choices = dbGetQuery(con, "SELECT DISTINCT table_schema FROM information_schema.columns")
)
),
),
column(
width = 6,
# Toutes les tables du schรฉma
conditionalPanel(
condition = "input.select_schema == 'Un ou plusieurs schรฉma(s)'",
radioButtons(
"select_table",
label = "Table de donnรฉes",
choices = c("Toutes les tables du schรฉma", "Une ou plusieurs table(s)")
)
),
# Sรฉlection conditionnel de la table parmi les schmรฉa sรฉlectionnรฉe (partie en server)
conditionalPanel(
condition = "input.select_table == 'Une ou plusieurs table(s)'",
selectInput(
"table_bdd",
label = "Sรฉlection de table(s) de donnรฉes",
multiple = TRUE,
choices = dbGetQuery(con,
"SELECT DISTINCT table_name FROM information_schema.columns"
)
)
),
)
),
# Sรฉlection conditionnelle des variables parmi les schรฉma et tables choisies + export dictionnaire de variables
hr(),
tags$b("Gรฉnรฉrer un dictionnaire des variables"),
br(),
em("Pour permettre la sรฉlection choisir au moins un schรฉma et une table."),
br(),
conditionalPanel(condition = "input.select_table == 'Une ou plusieurs table(s)'",
br(),
em("Les variables sont choisies parmi les tables et schรฉmas sรฉlectionnรฉs. Elles seront inclues dans le dictionnaire de variables gรฉnรฉrรฉ au format .xlsx et tรฉlรฉchargeable ci-dessous."),
br(),br(),
selectInput("var_bdd", label = "Variables d'intรฉrรชt", choices = dbGetQuery(writing, "SELECT DISTINCT column_name FROM information_schema.columns"), multiple = TRUE),
downloadButton("download_dic_var", label = "Gรฉnรฉrer le dictionnaire (.xlsx)"))
),
# Fenรชtre principale (mainpanel)
mainPanel(
# Affichage du commentaire de la table
fluidRow(
tags$b("Commentaires renseignรฉs pour cette table de donnรฉes : "),
br(),
tags$em("Affiche <NA> si aucun commentaire n'a รฉtรฉ renseignรฉ"),
br()
),
fluidRow(width = 8, offset = 2, align = "center",
h4(textOutput("com_table")),
hr()
),
# Visualisation des attributs
dataTableOutput("visualisation")
)
)
),
tabPanel(
"Aide ร la saisie des mรฉta-donnรฉes",
# Titre de la fenรชtre
titlePanel("Renseigner des libellรฉs depuis un fichier Excel"), hr(),
# Crรฉation d'un affichage avec menu latรฉral gauche
sidebarLayout(
# Menu de gauche
sidebarPanel(
# Titre
tags$h4(tags$b("Importer un fichier de libellรฉs")),
hr(),
# Type de fichier considรฉrรฉ
radioButtons("type_file", label = "Type de fichier", choices = c('Fichier Excel', 'Autre fichier')),
# Paramรจtres gรฉnรฉraux
tags$b("Libellรฉ des colonnes"),
checkboxInput(inputId = 'header', label = 'Libellรฉs en premiรจre ligne', value = FALSE),
# Liste de paramรจtres (autre fichier)
conditionalPanel(condition = "input.type_file == 'Autre fichier'",radioButtons(inputId = 'sep', label = 'Sรฉparateur de donnรฉes', choices = c("Virgule"=',',"Point-Virgule"=';',"Tabulation"='\t', "Espace"=''), selected = ';')),
# Module de chargement des donnรฉes - FICHIER EXCEL
fluidRow(
column(width = 9,
conditionalPanel(condition = "input.type_file == 'Fichier Excel'", fileInput("file", ""))
),
column(width = 3,
conditionalPanel(condition = "input.type_file == 'Fichier Excel'", numericInput("sheet_num", label = "Nยฐ feuille", value = 1, min = 1)),
)
),
# Module de chargement de donnรฉes - autre fichier
conditionalPanel(condition = "input.type_file == 'Autre fichier'", fileInput("file", "")),
# Sรฉlection des colonnes pour correspondance variable / libellรฉ
hr(),
tags$h4(tags$b("Ajout dans la table d'attributs")), hr(),
column(width = 5,
selectInput("var_id", label = "Colonne des variables", choices = "", multiple = FALSE),
selectInput("label_id", label = "Colonne des libellรฉs", choices = "", multiple = FALSE)
),
column(width = 2),
column(width = 5,
selectInput("schema_id", label = "Schรฉma de rรฉfรฉrence", choices = dbGetQuery(con, "SELECT DISTINCT table_schema FROM information_schema.columns"), multiple = FALSE),
selectInput("table_id", label = "Table de rรฉfรฉrence", choices = "", multiple = FALSE)
),
hr(),
br(),
actionButton("add", label = "Metre ร jour la table d'attributs"),
conditionalPanel(
condition = "input.var_id != ''",
hr(),
tags$h4(tags$b("Dรฉtection des problรจmes de correspondance")),
em("Variable issue du fichier Excel n'exitant pas dans la base de donnรฉes du SIG. Si aucune ligne n'est prรฉsentรฉe, toutes les variables du fichier Excel existent dans la base de donnรฉes."),
DTOutput("tab_verif")
)
),
# Fenรชtre principale (droite)
mainPanel(
# Affichage de la table importรฉe
tableOutput("table")
)
)
)
)
# --- PARTIE SERVER -------------------------------
server <- function(input, output, session){
# SELECTION DE TABLE CONDITIONNELLEMENT au schรฉma choisi
observe({
x <- dbGetQuery(writing, "SELECT table_name, table_schema from information_schema.columns") %>%
filter(table_schema %in% (input$schema_bdd))
# Can also set the label and select items
updateSelectInput(session, "table_bdd",
label = "Sรฉlection de table(s) de donnรฉes",
choices = x$table_name
)
})
# SELECTION DES VARIABLES CONDITIONNELLEMENT AUX SCHEMAS ET TABLES CHOISI(E)S
observe({
x <- dbGetQuery(writing, "SELECT table_name, table_schema, column_name from information_schema.columns") %>%
filter(table_schema %in% (input$schema_bdd), table_name %in% (input$table_bdd))
# Can also set the label and select items
updateSelectInput(session, "var_bdd",
label = "Variables d'intรฉrรชt",
choices = x$column_name
)
})
# Crรฉation de la table d'attributs
tab_attrib <- reactive({
# Rรฉcupรฉration de la table d'attributs
temp <- dbGetQuery(writing, "SELECT col_description((table_schema||'.'||table_name)::regclass::oid, ordinal_position) as column_comment, * from information_schema.columns")
# Sรฉlection de schรฉma (si option choisie)
if(input$select_schema == "Un ou plusieurs schรฉma(s)"){
temp <- temp %>% filter(table_schema %in% (input$schema_bdd))
# Selection de table (si option chosie)
if(input$select_table == "Une ou plusieurs table(s)"){
temp <- temp %>% filter(table_name %in% (input$table_bdd))
}
}
# Libellรฉ plus propres + ordre des variables
temp %>%
select(column_name, column_comment, table_name, table_schema) %>%
rename("Schรฉma" = table_schema, "Table" = table_name, "Variable (nom)" = column_name, "libellรฉ / Descrptif court" = column_comment)
})
# Liste de tables pour le schรฉma sรฉlectionnรฉ
output$visualisation <- renderDataTable(
DT::datatable(
tab_attrib(),
options = list(pageLength = 15, scrollX = TRUE),
filter = "top",
rownames = FALSE)
)
# Commentaire de la table (premiรจre table du premier schรฉma)
com_obj <- reactive({
if(length(input$schema_bdd) == 1 & length(input$table_bdd) == 1){
# Crรฉation requรชte pour la table concernรฉe
req_com_table <- paste0("SELECT obj_description('", input$schema_bdd, ".", input$table_bdd, "'::regclass)")
# Rรฉcupรฉration du champ commentaire sous forme de table
com <- dbGetQuery(con, req_com_table[1])
# Sortie du texte
# cat(com[1,1])
com[1,1]
}else{
if(length(input$schema_bdd) < 1 | length(input$table_bdd) < 1){
com <- "--- Sรฉlectionner un schรฉma et une table de donnรฉes ---"
}else{
com <- "--- Ne sรฉlectionner qu'un seul schรฉma et une seule table pour afficher le commentaire ---"
}
}
})
# Sortie du texte
output$com_table <- renderText(com_obj())
# Sรฉlecteur des variables d'intรฉrรชt (pour export)
tab_dictionnaire <- reactive({
# Ensemble des attributs + sรฉlection des schรฉma et tables d'intรฉrรชt
dbGetQuery(writing, "SELECT col_description((table_schema||'.'||table_name)::regclass::oid, ordinal_position) as column_comment, * from information_schema.columns") %>%
filter(table_schema %in% (input$schema_bdd), table_name %in% (input$table_bdd), column_name %in% (input$var_bdd)) %>%
select(column_name, column_comment, table_name, table_schema) %>%
rename("Variable" = column_name, "Libellรฉ / Descriptif" = column_comment, "Table de donnรฉes" = table_name, "Shรฉma de donnรฉes" = table_schema)
})
# Sortie du dictionnaire de variables
output$download_dic_var <- downloadHandler(
filename = paste("Dictionnaire_variables", Sys.Date(),".xlsx", sep=""),
content = function(file) {
write.xlsx(tab_dictionnaire(), file, rowNames = FALSE, sep=";")
}
)
# IMPORT FICHIER - Transformation du fichier en jeu de donnรฉes
data <- reactive({
file1 <- input$file
if(is.null(file1)){return()
}else{
if(input$type_file == 'Fichier Excel'){
# if(is.null(input$sheet_names)){return()}else{read.xlsx(xlsxFile = file1$datapath, sheet = input$sheet_names, colNames = input$header)}
read.xlsx(xlsxFile = file1$datapath, sheet = input$sheet_num, colNames = input$header)
}else{
read.table(file=file1$datapath, sep=input$sep, header = input$header, stringsAsFactors = input$stringAsFactors)
}
}
})
# IMPORT FICHIER - Affichage table de donnรฉes
output$table <- renderTable({
if(is.null(data())){return ()}
data()
})
# IMPORT - Sรฉlectionner le libellรฉ de colonne "variable"
observe({
x <- data()
y <- names(x)
# Can also set the label and select items
updateSelectInput(session, "var_id",
label = "Colonne des variables",
choices = y
)
})
# IMPORT - Sรฉlectionner le libellรฉ de colonne "variable"
observe({
x <- data()
y <- names(x)
# Can also set the label and select items
updateSelectInput(session, "label_id",
label = "Colonne des libellรฉs",
choices = y
)
})
# IMPORT - Choix de la table (selon schรฉma sรฉlectionnรฉ)
observe({
x <- dbGetQuery(writing, "SELECT table_name, table_schema from information_schema.columns") %>%
filter(table_schema %in% (input$schema_id))
# Can also set the label and select items
updateSelectInput(session, "table_id",
label = "Table de rรฉfรฉrence",
choices = x$table_name
)
})
# Lancement des requรชtes SQL ร l'activation du boutton
observeEvent(input$add, {
# Rรฉcupรฉration de la table de donnรฉes
data <- data()
# Liste des variables en base
liste_var_bd <- data.frame(var = names(dbGetQuery(con, paste0("SELECT * FROM ", input$schema_id, ".", input$table_id))))
# Crรฉation d'une table avec deux variables (variable et libellรฉ)
temp <- data %>% select(input$var_id, input$label_id)
# Remplacement des guillemets
temp[,2] <- str_replace_all(temp[,2], "'", "-")
temp[,2] <- str_replace_all(temp[,2], '"', "-")
# Crรฉation de la requรชte
temp$requete <- paste0("COMMENT ON COLUMN ", input$schema_id, ".", input$table_id, ".", temp[,1], " IS '", temp[,2], "'")
# Conservation uniquement des variables existant en BDD
temp$var <- temp[,1]
temp2 <- inner_join(temp, liste_var_bd, by = "var")
# Initialisation du compteur
i <- 1
# Lancement de la boucle sur toutes les lignes
while(i <= nrow(temp2)){
dbGetQuery(writing, temp2[i,"requete"])
i <- i+1
}
# affichage notification
showNotification("Intรฉgration des mรฉta-donnรฉes rรฉalisรฉe", type = "message")
})
output$tab_verif <- renderDataTable(options = list(paging = FALSE, searching = FALSE), {
# Rรฉcupรฉration de la table de donnรฉes
data <- data()
# LIste des variables en base
liste_var_bd <- data.frame(var = names(dbGetQuery(con, paste0("SELECT * FROM ", input$schema_id, ".", input$table_id))))
liste_var_bd$dispo <- "Existe en BDSIG"
# Crรฉation d'une table avec deux variables (variable et libellรฉ)
temp <- data %>%
select(input$var_id, input$label_id)
temp$var <- temp[,1]
temp$indispo <- "N'existe pas dans la table choisie de la BDSIG (sera retirรฉe lors de l'intรฉgration des libellรฉs)"
left_join(temp, liste_var_bd, by = "var") %>%
filter(is.na(dispo)) %>%
select(var, indispo) %>%
rename(Variable = var, "Disponibilitรฉ en BDD" = indispo)
})
}
# --- CREATION APP -------------------------------
shinyApp(ui = ui, server = server)
|
## Format date to Type Date
epc <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
epc$Date <- as.Date(t$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
epct <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
epct <- epct[complete.cases(epct),]
## Combine Date and Time column
dateTime <- paste(epct$Date, epct$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
epct <- epct[ ,!(names(t) %in% c("Date","Time"))]
## Add DateTime column
epct <- cbind(dateTime, epct)
## Format dateTime Column
epct$dateTime <- as.POSIXct(dateTime)
dev.copy(png, file="plot3.png", height=480, width=480)
with(epct, {
plot(Sub_metering_1~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() | /plot3.R | no_license | xela005/ExData_Plotting1 | R | false | false | 1,167 | r | ## Format date to Type Date
epc <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
epc$Date <- as.Date(t$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
epct <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
epct <- epct[complete.cases(epct),]
## Combine Date and Time column
dateTime <- paste(epct$Date, epct$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
epct <- epct[ ,!(names(t) %in% c("Date","Time"))]
## Add DateTime column
epct <- cbind(dateTime, epct)
## Format dateTime Column
epct$dateTime <- as.POSIXct(dateTime)
dev.copy(png, file="plot3.png", height=480, width=480)
with(epct, {
plot(Sub_metering_1~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() |
#################################
### Add cool/warm season days ###
#################################
setwd("E:/Current/New or Updated Files/PennState/Research/IndividualGrowth")
dat<-read.csv("Data/VBG_TimeInterval.csv")
#############Summer days vector approach
summary(dat$Days.1)
#Day 0 to day 1307 (1308 total days)
summerdays<-vector("numeric", length=1308)
#1 indicates average nightly low is above 40degF
#First Summer
summerdays[1:16]<-1
#Second Summer
summerdays[169:381]<-1
#Third Summer
summerdays[534:746]<-1
#Fourth Summer
summerdays[899:1111]<-1
#Fifth Summer
summerdays[1264:1308]<-1
#For each record, create number of summer days and number of winters days
for(i in 1:dim(dat)[1]){
dat$dS[i]<-sum(summerdays[dat$Days[i]:dat$Days.1[i]])
dat$dW[i]<-dat$dT[i]-dat$dS[i]+1
}
write.csv(dat, file="VBG_TimeInterval_SeasonalDaysDuration.csv")
##############Lame-o for loop approach that doesn't work
for(i in 1:dim(dat)[1]){ # Loop through all observations in order to calculate season days
if(dat$Days[i] <= 15){ # For individuals caught within the first warm season
if(dat$Days.1[i] <= 15){ # Recaught within first warm season
dat$dS[i]<-dat$Days.1[i]-dat$Days[i]
dat$dW[i]<-0
}
if(dat$Days.1[i] > 15 & dat$Days.1<=167){ # Recaught within first cool season
dat$dS[i]<-15-dat$Days[i] + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i] + 1 -dat$Days[i]
}
if(dat$Days.1[i] > 167 & dat$Days.1<=380){ # Recaught within second warm season
dat$dS[i]<-15-dat$Days[i] + 1 + dat$Days.1[i]-168 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 380 & dat$Days.1<=532){ # Recaught within second cool season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 532 & dat$Days.1<=745){ # Recaught within third warm season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1 + dat$Days.1[i]-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1 + 745-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1 + 745-533 + 1 + dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1 + 745-533 + 1 + 1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1 + 745-533 + 1 + 1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
}
if(dat$Days[i] > 15 & dat$Days<=167){ # For individuals caught within the first cool season
if(dat$Days.1[i] > 15 & dat$Days.1<=167){ # Recaught within first cool season
dat$dS[i]<-0
dat$dW[i]<-dat$Days.1[i] - dat$Days[i] + 1
}
if(dat$Days.1[i] > 167 & dat$Days.1<=380){ # Recaught within second warm season
dat$dS[i]<-dat$Days.1[i]-167 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 380 & dat$Days.1<=532){ # Recaught within second cool season
dat$dS[i]<-380-168 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 532 & dat$Days.1<=745){ # Recaught within third warm season
dat$dS[i]<-380-168 + 1 + dat$Days.1[i]-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-380-168 + 1 + 745-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-380-168 + 1 + 745-533 + 1 + dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-380-168 + 1 + 745-533 + 1 + 1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-380-168 + 1 + 745-533 + 1 + 1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 167 & dat$Days<=380){ # For individuals caught within the second warm season
if(dat$Days.1[i] > 167 & dat$Days.1<=380){ # Recaught within second warm season
dat$dS[i]<-dat$Days.1[i]-dat$Days[i]
dat$dW[i]<-0
}
if(dat$Days.1[i] > 380 & dat$Days.1<=532){ # Recaught within second cool season
dat$dS[i]<-380-dat$Days[i] + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 532 & dat$Days.1<=745){ # Recaught within third warm season
dat$dS[i]<-380-dat$Days[i] + 1 + dat$Days.1[i]-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-380-dat$Days[i] + 1 + 745-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-380-dat$Days[i] + 1 + 745-533 + 1 + dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-380-dat$Days[i] + 1 + 745-533 + 1 + 1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-380-dat$Days[i] + 1 + 745-533 + 1 + 1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 380 & dat$Days<=532){ # For individuals caught in second cool season
if(dat$Days.1[i] > 380 & dat$Days.1<=532){ # Recaught within second cool season
dat$dS[i]<-0
dat$dW[i]<-dat$Days.1[i]-dat$Days[i]
}
if(dat$Days.1[i] > 532 & dat$Days.1<=745){ # Recaught within third warm season
dat$dS[i]<-dat$Days.1[i]-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-745-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-745-533 + 1 + dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-745-533 + 1 + 1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-745-533 + 1 + 1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 532 & dat$Days<=745){
if(dat$Days.1[i] > 532 & dat$Days.1<=745){ # Recaught within third warm season
dat$dS[i]<-dat$Days.1[i]-dat$Days[i] + 1
dat$dW[i]<-0
}
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-745-dat$Days[i] + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-745-dat$Days[i] + 1 + dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-745-dat$Days[i] + 1 + 1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-745-dat$Days[i] + 1 + 1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 745 & dat$Days<=897){
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-0
dat$dW[i]<-dat$Days.1[i] - dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 897 & dat$Days<=1110){
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-dat$Days.1[i]-dat$Days[i] + 1
dat$dW[i]<-0
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-1110-dat$Days[i] + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-1110-dat$Days[i] + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 1110 & dat$Days<=1262){
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-0
dat$dW[i]<-dat$Days.1[i]-dat$Days[i] + 1
}
else{ # Recaught within last warm season
dat$dS[i]<-dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
else{ # Caught and recaught within last warm season
dat$dS[i]<-dat$Days.1[i]-dat$Days[i] + 1
dat$dW[i]<-0
}
}
| /IndividualGrowth/Scripts/CoolWarmDays.R | no_license | munozmunozmunoz/VBG | R | false | false | 9,979 | r | #################################
### Add cool/warm season days ###
#################################
setwd("E:/Current/New or Updated Files/PennState/Research/IndividualGrowth")
dat<-read.csv("Data/VBG_TimeInterval.csv")
#############Summer days vector approach
summary(dat$Days.1)
#Day 0 to day 1307 (1308 total days)
summerdays<-vector("numeric", length=1308)
#1 indicates average nightly low is above 40degF
#First Summer
summerdays[1:16]<-1
#Second Summer
summerdays[169:381]<-1
#Third Summer
summerdays[534:746]<-1
#Fourth Summer
summerdays[899:1111]<-1
#Fifth Summer
summerdays[1264:1308]<-1
#For each record, create number of summer days and number of winters days
for(i in 1:dim(dat)[1]){
dat$dS[i]<-sum(summerdays[dat$Days[i]:dat$Days.1[i]])
dat$dW[i]<-dat$dT[i]-dat$dS[i]+1
}
write.csv(dat, file="VBG_TimeInterval_SeasonalDaysDuration.csv")
##############Lame-o for loop approach that doesn't work
for(i in 1:dim(dat)[1]){ # Loop through all observations in order to calculate season days
if(dat$Days[i] <= 15){ # For individuals caught within the first warm season
if(dat$Days.1[i] <= 15){ # Recaught within first warm season
dat$dS[i]<-dat$Days.1[i]-dat$Days[i]
dat$dW[i]<-0
}
if(dat$Days.1[i] > 15 & dat$Days.1<=167){ # Recaught within first cool season
dat$dS[i]<-15-dat$Days[i] + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i] + 1 -dat$Days[i]
}
if(dat$Days.1[i] > 167 & dat$Days.1<=380){ # Recaught within second warm season
dat$dS[i]<-15-dat$Days[i] + 1 + dat$Days.1[i]-168 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 380 & dat$Days.1<=532){ # Recaught within second cool season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 532 & dat$Days.1<=745){ # Recaught within third warm season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1 + dat$Days.1[i]-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1 + 745-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1 + 745-533 + 1 + dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1 + 745-533 + 1 + 1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-15-dat$Days[i] + 1 + 380-168 + 1 + 745-533 + 1 + 1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
}
if(dat$Days[i] > 15 & dat$Days<=167){ # For individuals caught within the first cool season
if(dat$Days.1[i] > 15 & dat$Days.1<=167){ # Recaught within first cool season
dat$dS[i]<-0
dat$dW[i]<-dat$Days.1[i] - dat$Days[i] + 1
}
if(dat$Days.1[i] > 167 & dat$Days.1<=380){ # Recaught within second warm season
dat$dS[i]<-dat$Days.1[i]-167 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1 -dat$Days[i]
}
if(dat$Days.1[i] > 380 & dat$Days.1<=532){ # Recaught within second cool season
dat$dS[i]<-380-168 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 532 & dat$Days.1<=745){ # Recaught within third warm season
dat$dS[i]<-380-168 + 1 + dat$Days.1[i]-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-380-168 + 1 + 745-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-380-168 + 1 + 745-533 + 1 + dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-380-168 + 1 + 745-533 + 1 + 1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-380-168 + 1 + 745-533 + 1 + 1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 167 & dat$Days<=380){ # For individuals caught within the second warm season
if(dat$Days.1[i] > 167 & dat$Days.1<=380){ # Recaught within second warm season
dat$dS[i]<-dat$Days.1[i]-dat$Days[i]
dat$dW[i]<-0
}
if(dat$Days.1[i] > 380 & dat$Days.1<=532){ # Recaught within second cool season
dat$dS[i]<-380-dat$Days[i] + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 532 & dat$Days.1<=745){ # Recaught within third warm season
dat$dS[i]<-380-dat$Days[i] + 1 + dat$Days.1[i]-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-380-dat$Days[i] + 1 + 745-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-380-dat$Days[i] + 1 + 745-533 + 1 + dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-380-dat$Days[i] + 1 + 745-533 + 1 + 1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-380-dat$Days[i] + 1 + 745-533 + 1 + 1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 380 & dat$Days<=532){ # For individuals caught in second cool season
if(dat$Days.1[i] > 380 & dat$Days.1<=532){ # Recaught within second cool season
dat$dS[i]<-0
dat$dW[i]<-dat$Days.1[i]-dat$Days[i]
}
if(dat$Days.1[i] > 532 & dat$Days.1<=745){ # Recaught within third warm season
dat$dS[i]<-dat$Days.1[i]-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-745-533 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-745-533 + 1 + dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-745-533 + 1 + 1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-745-533 + 1 + 1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 532 & dat$Days<=745){
if(dat$Days.1[i] > 532 & dat$Days.1<=745){ # Recaught within third warm season
dat$dS[i]<-dat$Days.1[i]-dat$Days[i] + 1
dat$dW[i]<-0
}
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-745-dat$Days[i] + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-745-dat$Days[i] + 1 + dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-745-dat$Days[i] + 1 + 1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-745-dat$Days[i] + 1 + 1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 745 & dat$Days<=897){
if(dat$Days.1[i] > 745 & dat$Days.1<=897){ # Recaught within third cool season
dat$dS[i]<-0
dat$dW[i]<-dat$Days.1[i] - dat$Days[i]
}
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-dat$Days.1[i]-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-1110-898 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-1110-898 + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 897 & dat$Days<=1110){
if(dat$Days.1[i] > 897 & dat$Days.1<=1110){ # Recaught within fourth warm season
dat$dS[i]<-dat$Days.1[i]-dat$Days[i] + 1
dat$dW[i]<-0
}
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-1110-dat$Days[i] + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
else{ # Recaught within last warm season
dat$dS[i]<-1110-dat$Days[i] + 1 + dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
if(dat$Days[i] > 1110 & dat$Days<=1262){
if(dat$Days.1[i] > 1110 & dat$Days.1<=1262){ # Recaught within fourth cool season
dat$dS[i]<-0
dat$dW[i]<-dat$Days.1[i]-dat$Days[i] + 1
}
else{ # Recaught within last warm season
dat$dS[i]<-dat$Days.1[i]-1263 + 1
dat$dW[i]<-dat$Days.1[i] - dat$dS[i]+ 1-dat$Days[i]
}
}
else{ # Caught and recaught within last warm season
dat$dS[i]<-dat$Days.1[i]-dat$Days[i] + 1
dat$dW[i]<-0
}
}
|
############S#################
## TRAIT CHANGE OVER SPACE ##
############S#################
rm(list=ls())
#Detach packages####
detachAllPackages <- function() {
basic.packages <- c("package:stats","package:graphics","package:grDevices","package:utils","package:datasets","package:methods","package:base")
package.list <- search()[ifelse(unlist(gregexpr("package:",search()))==1,TRUE,FALSE)]
package.list <- setdiff(package.list,basic.packages)
if (length(package.list)>0) for (package in package.list) detach(package, character.only=TRUE)
}
detachAllPackages()
#Question 1 - Differences between teas
####Open packages####
library(raster)
library(rgdal)
library(lme4)
library(nlme)
library(stringr)
library(plyr)
library(dplyr)
library(ggplot2)
require(gridExtra)
#library(brms)
library(rstan)
library(StanHeaders)
library(MuMIn)
library(MCMCglmm)
library(postMCMCglmm)
#### CHOOSE TRAIT NAME AND CLIMATE VARIABLE HERE ----
###Read in tea
tea<-read.csv("scripts/users/hthomas/tea/combined_tea.csv", stringsAsFactors = F)
#Remove daily tea - too confusing!
tea<-subset(tea,!grepl("CG_DT_HT",tea$Plot))
#Remove sub zero plots
tea<-subset(tea,Loss>0)
tea[tea$Tea_Type=="Rooibos" & tea$Loss >0.5,]$Loss<-NA
#Make sure only using control plots
ambient<-subset(tea,Treatment=="None")
#Split into seasons to make things easier
summer<-subset(ambient,Season=="Summer")
year<-subset(ambient,Season=="Year")
winter<-subset(ambient,Season=="Winter")
## STAN MODEL - soil temperature ----
#soil temperature#
var.list <- c("Loss", "Loss_Day", "k", "TBI_k", "TBI_S")
#Calculate mean burial length
#Get column number
i=1
var.num<-which(colnames(year)==var.list[i])
season_narm<-year %>%
filter(is.finite(year[,var.num]),is.finite(moisture_mean))
#Subset for tea types
#season_narm_r<-subset(season_narm,Tea_Type=="Rooibos") #AB NOTE: Keeping both tea types and including as interaction in model
season_narm_r <- season_narm #just so I don't have to rename everything
# AB: MULTIPLE OBSERVATION
season_narm_r <- ddply(season_narm_r, c("ESA_cell","Site","Plot","Tea_Type"), transform, NObsPlot = length(Loss))
season_narm_r$MultipleObs <- ifelse(season_narm_r$NObsPlot > 4, 1, 0)
# Multiple Sites
count.sites <- ddply(season_narm_r, c("ESA_cell"), summarise, n.sub = length(unique(Site)))
season_narm_r$MultipleSites <- ifelse(season_narm_r$ESA_cell %in% count.sites$ESA_cell[count.sites$n.sub > 1], 1, 0)
# Multiple plots per Site (more than 1)
count.plots <- ddply(season_narm_r, c("ESA_cell", "Site"), summarise, n.plots = length(unique(Plot)))
season_narm_r$MultiplePlots <- ifelse(season_narm_r$Site %in% count.plots$Site[count.plots$n.plots > 1], 1, 0)
#Add env.levels (original)
season_narm_r$envlevel<-ifelse(season_narm_r$moisture_mean_var_level=="Region",0,
ifelse(season_narm_r$moisture_mean_var_level=="Site",1,2))
#Add env.levels (alternative)
#Add env.levels (new - based on nestedness)
env.levels<- season_narm_r %>%
select(moisture_mean,ESA_cell,Site,Plot)
season_narm_r$envlevel<-0
env.levels2<-ddply(env.levels, c("ESA_cell"), summarise, n.plots = length(unique(moisture_mean)))
season_narm_r$envlevel <- ifelse(season_narm_r$ESA_cell %in% env.levels2$ESA_cell[env.levels2$n.plots > 1], 1, season_narm_r$envlevel)
env.levels2<-ddply(env.levels, c("ESA_cell","Site"), summarise, n.plots = length(unique(moisture_mean)))
season_narm_r$envlevel <- ifelse(season_narm_r$Site %in% env.levels2$Site[env.levels2$n.plots > 1], 2, season_narm_r$envlevel)
#Add categories
season_narm_r$Cat<-ifelse(season_narm_r$MultiplePlots_Region == 0 & season_narm_r$MultipleSites == 0, 1, #No nesting - automatically at plot level
ifelse(season_narm_r$MultiplePlots == 1 & season_narm_r$MultipleSites == 0 & season_narm_r$envlevel == 2,2, #Plot in site, plot level env data
ifelse(season_narm_r$MultiplePlots == 1 & season_narm_r$MultipleSites == 0 & season_narm_r$envlevel != 2,3, #Plot in site, site / region level env data
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 0 & season_narm_r$envlevel == 2,4,
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 0 & season_narm_r$envlevel == 1,4,
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 0 & season_narm_r$envlevel == 0,5,
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 1 & season_narm_r$envlevel == 2,6,
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 1 & season_narm_r$envlevel == 1,7,
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 1 & season_narm_r$envlevel == 2,8,"NA")))))))))
#Subset so only using data I want to check model
#season_narm_r<-subset(season_narm_r, Cat == 1 | Cat == 2 | Cat == 3)
# AB: REMOVE MISSING VALUES OF SOIL moisture AND TEMPERATURE FOR THE moisture X TEMPERATURE INTERACTION MODEL
season_narm_r <- season_narm_r[!is.na(season_narm_r$moisture_mean),]
#Add Region numbers
season_narm_r<-season_narm_r %>%
mutate(RegionNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Tea_Type")))
#Reorder by site number
season_narm_r<-season_narm_r[order(season_narm_r$RegionNum),]
#Add Site numbers
season_narm_r<-season_narm_r %>%
mutate(SiteNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Site","Tea_Type")))
#Reorder by site number
season_narm_r<-season_narm_r[order(season_narm_r$SiteNum),]
#Add Plot numbers
season_narm_r<-season_narm_r %>%
mutate(PlotNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Site","Plot","Tea_Type"))) #AB NOTE: This now includes tea type as well! So there will be a unique plot number for each tea type within a plot
#Reorder by plot number
season_narm_r<-season_narm_r[order(season_narm_r$PlotNum),]
#Centre values - AB note: Either don't name this the same thing or save the amount you center by first so we can add it to the xhats later
moisture_cent_amount <- attr(scale(season_narm_r$moisture_mean, center = TRUE, scale = FALSE), 'scaled:center')
season_narm_r$moisture_mean<-scale(season_narm_r$moisture_mean, center = TRUE, scale = FALSE)
days_cent_amount <- attr(scale(season_narm_r$Days, center = TRUE, scale = FALSE), 'scaled:center')
season_narm_r$Days<-scale(season_narm_r$Days, center = TRUE, scale = FALSE)
#AB: caluclate mean and sd per site - YOU CAN THINK ABOUT WHETHER YOU WANT THIS TO BE THE OVERALL MEAN OR THE MEAN OF MEANS - MEAN OF MEANS MIGHT BE BETTER IN THIS CASE
season_narm_r_sites<-season_narm_r %>%
group_by(SiteNum) %>%
summarise(moisture_mean_site = mean(moisture_mean),
moisture_sd_site = sd(moisture_mean))
season_narm_r$moisture_mean_site<-season_narm_r_sites$moisture_mean_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$moisture_sd_site<-season_narm_r_sites$moisture_sd_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$moisture_sd_site[season_narm_r$moisture_sd_site==0 ] <- mean(season_narm_r$moisture_sd_site[season_narm_r$moisture_sd_site>0],na.rm = T)
season_narm_r$moisture_sd_site[is.na(season_narm_r$moisture_sd_site)] <- 0.01
#AB: caluclate mean and sd per region - YOU CAN THINK ABOUT WHETHER YOU WANT THIS TO BE THE OVERALL MEAN OR THE MEAN OF MEANS - MEAN OF MEANS MIGHT BE BETTER IN THIS CASE
season_narm_r_regions<-season_narm_r %>%
group_by(RegionNum) %>%
summarise(moisture_mean_region = mean(moisture_mean),
moisture_sd_region = sd(moisture_mean))
season_narm_r$moisture_mean_region<-season_narm_r_regions$moisture_mean_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$moisture_sd_region<-season_narm_r_regions$moisture_sd_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$moisture_sd_region[season_narm_r$moisture_sd_region==0] <- mean(season_narm_r$moisture_sd_region[season_narm_r$moisture_sd_region>0],na.rm = T)
season_narm_r$moisture_sd_region[is.na(season_narm_r$moisture_sd_region)] <- 0.01
#Add mean days per region
season_narm_r<-season_narm_r %>%
group_by(SiteNum) %>%
mutate(SiteDays = mean(Days),
SiteDays_sd = sd(Days))
season_narm_r$SiteDays_sd[season_narm_r$SiteDays_sd==0 | is.na(season_narm_r$SiteDays_sd)] <- 0.001
#Add mean days per region
season_narm_r<-season_narm_r %>%
group_by(RegionNum) %>%
mutate(RegionDays = mean(Days),
RegionDays_sd = sd(Days))
season_narm_r$RegionDays_sd[season_narm_r$RegionDays_sd==0 | is.na(season_narm_r$RegionDays_sd)] <- 0.001
mean_burial<-mean(season_narm_r$Days)
min_soil<-min(season_narm_r$moisture_mean,na.rm=TRUE)
max_soil<-max(season_narm_r$moisture_mean,na.rm=TRUE)
min_moisture<-min(season_narm_r$moisture_mean,na.rm=TRUE)
max_moisture<-max(season_narm_r$moisture_mean,na.rm=TRUE)
xhats <- expand.grid(xhat1=seq(min_moisture, max_moisture,by=0.01), xhat3 = mean_burial) #AB: predicting soil moisture at 25% and 75% (assuming you will graph temperature as continuous) but of course you can change this to whatever you want
####Third attempt - adding temperature levels#######
jags.dat<-list(
Nobs=nrow(season_narm_r),
NSite=length(unique(season_narm_r$SiteNum)),
NRegion=length(unique(season_narm_r$RegionNum)),
NPlot=length(unique(season_narm_r$PlotNum)),
NSiteDays=length(unique(season_narm_r$SiteDays)),
NRegionDays=length(unique(season_narm_r$RegionDays)),
NTea=length(unique(season_narm_r$Tea_Type)),
Region=season_narm_r$RegionNum,
Site=season_narm_r$SiteNum,
Plot=season_narm_r$PlotNum,
SiteDays=season_narm_r$SiteDays[!duplicated(season_narm_r$SiteNum)],
SiteDays_sd=season_narm_r$SiteDays_sd[!duplicated(season_narm_r$SiteNum)],
RegionDays=season_narm_r$RegionDays[!duplicated(season_narm_r$RegionNum)],
RegionDays_sd=season_narm_r$RegionDays_sd[!duplicated(season_narm_r$RegionNum)],
Site_short=season_narm_r$SiteNum[!duplicated(season_narm_r$PlotNum)],
Plot_short=unique(season_narm_r$PlotNum),
tea_type_site=ifelse(season_narm_r$Tea_Type[!duplicated(season_narm_r$SiteNum)]=="Green", 1, 2),
tea_type_region=ifelse(season_narm_r$Tea_Type[!duplicated(season_narm_r$RegionNum)]=="Green", 1, 2),
multobs_lobs=season_narm_r$MultipleObs,
multobs_lplot=season_narm_r$MultipleObs[!duplicated(season_narm_r$PlotNum)],
multsites_lobs=season_narm_r$MultipleSites,
multsites_lplot=season_narm_r$MultipleSites[!duplicated(season_narm_r$PlotNum)],
multsites_lsite=season_narm_r$MultipleSites[!duplicated(season_narm_r$SiteNum)],
multsites_lregion=season_narm_r$MultipleSites[!duplicated(season_narm_r$RegionNum)],
multplots_lobs=season_narm_r$MultiplePlots,
multplots_lplot=season_narm_r$MultiplePlots[!duplicated(season_narm_r$PlotNum)],
multplots_lsite=season_narm_r$MultiplePlots[!duplicated(season_narm_r$SiteNum)],
multplots_lregion=season_narm_r$MultiplePlots[!duplicated(season_narm_r$RegionNum)],
multplots_region_lobs=season_narm_r$MultiplePlots_Region,
multplots_region_lplot=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$PlotNum)],
multplots_region_lsite=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$SiteNum)],
multplots_region_lregion=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$RegionNum)],
traitobs=season_narm_r$Loss,
#temp_plot=as.numeric(season_narm_r[!duplicated(season_narm_r$PlotNum),]$moisture_mean),
#temp_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$moisture_mean),
temp_mean_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$moisture_mean_region),
temp_sd_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$moisture_sd_region),
temp_mean_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$moisture_mean_site),
temp_sd_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$moisture_sd_site),
obs_envlevel=season_narm_r$envlevel,
plot_envlevel=season_narm_r[!duplicated(season_narm_r$PlotNum),]$envlevel,
site_envlevel=season_narm_r[!duplicated(season_narm_r$SiteNum),]$envlevel,
region_envlevel=season_narm_r[!duplicated(season_narm_r$RegionNum),]$envlevel,
meanT=mean(as.numeric(season_narm_r$moisture_mean[!duplicated(season_narm_r$ESA_cell)])),
xhat1=xhats$xhat1,
xhat3=xhats$xhat3,
Nxhat=length(xhats$xhat1)
)
str(jags.dat)
# MODEL - ANNE EDITS####
write("
data {
int<lower=0> Nobs; //Number of observations
int<lower=0> NRegion; //Number of regions
int<lower=0> NSite; //Number of sites
int<lower=0> NPlot; //Number of plots
int<lower=0> Nxhat; //No. predictor variables
int<lower=0> NTea; //No. of tea types
int<lower=0> NSiteDays; //No. of days
int<lower=0> NRegionDays; //No. of days
int<lower=1,upper=NPlot> Plot[Nobs]; //Plots (all observations)
int<lower=1,upper=NSite> Site[Nobs]; //Plots (all observations)
int<lower=1,upper=NRegion> Region[Nobs]; //Plots (all observations)
int<lower=1,upper=2> tea_type_site[NSite]; //Tea type (1=Green, 2=Rooibos)
int<lower=1,upper=2> tea_type_region[NRegion]; //Tea type (1=Green, 2=Rooibos)
int<lower=0,upper=1> multobs_lobs[Nobs]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multobs_lplot[NPlot]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multsites_lobs[Nobs]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multsites_lplot[NPlot]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multsites_lsite[NSite]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multsites_lregion[NRegion]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multplots_lobs[Nobs]; //Are plots nested in site (all obs)
int<lower=0,upper=1> multplots_lplot[NPlot]; //Are plots nested in site (no plots)
int<lower=0,upper=2> obs_envlevel[Nobs];
int<lower=0,upper=2> site_envlevel[NSite];
int<lower=0,upper=2> region_envlevel[NRegion];
vector[Nobs] traitobs; //Mass Loss
vector[NSite] temp_mean_site; //Temperature (unique regions)
vector[NSite] temp_sd_site; //Temperature SD (unique regions)
vector[NRegion] temp_mean_region; //Temperature (unique regions)
vector[NRegion] temp_sd_region; //Temperature SD (unique regions)
vector[NSite] SiteDays; //
vector[NSite] SiteDays_sd; //
vector[NRegion] RegionDays; //
vector[NRegion] RegionDays_sd; //
vector[Nxhat] xhat1; //Predictor variables
vector[Nxhat] xhat3; //Predictor variables
}
parameters {
real<lower=-3,upper=3> as[NSite]; // Region effect
real<lower=-5,upper=5> ap[NPlot];
real<lower=-5,upper=5> aMeanRegion[NRegion];
real<lower=-2,upper=2> gamma0[NTea]; // intercept of relationship between mass loss and temp change
real<lower=-2,upper=2> gamma1[NTea]; // slope of temperature - loss relationship
real<lower=-2,upper=2> gamma2[NTea]; // slope of moisture - loss relationship
real<lower=-2,upper=2> gamma3[NTea]; // temperature - moisture interaction
real<lower=-2,upper=2> gamma4[NTea]; // temperature - moisture interaction
real<lower=0,upper=5> sigma_overall; //Error around loss- temp relationship
real<lower=0,upper=5> sigma_plot;
real<lower=0,upper=5> sigma_site;
real<lower=0,upper=5> sigma_region;
real<lower=0,upper=5> sigma_resid;
vector[NSite] temp_pred_site;
vector[NSite] days_pred_site;
vector[NRegion] temp_pred_region;
vector[NRegion] days_pred_region;
}
transformed parameters {
vector[Nobs] mu;
vector[Nobs] app;
vector[Nobs] ass;
vector[Nobs] arr;
for (i in 1:Nobs){
if((multobs_lobs[i]==1 && multplots_lobs[i]==1))
app[i] = ap[Plot[i]];
// set plot effects to 0 for plots that don't have multiple obs or are the only plot within a site
else app[i] = 0;
if(multsites_lobs[i] == 1)
ass[i] = as[Site[i]];
else ass[i] = 0;
if(multsites_lobs[i]==1 && obs_envlevel[i] >0)
arr[i] = 0;
else arr[i] = aMeanRegion[Region[i]];
mu[i] = app[i] + ass[i] + arr[i];;
}
//print(\"ap=\",ap[1:10],\"as=\",as[1:10],\"aMeanSite=\",aMeanSite[1:8],\"mu=\",mu[1:10])
}
model {
for (i in 1:Nobs){
traitobs[i] ~ normal(mu[i], sigma_resid);
}
//Set up plot and site random effects
for (i in 1:NPlot){
if(multobs_lplot[i]==1 && multplots_lplot[i]==1)
ap[i] ~ normal(0, sigma_plot);
}
//Bring in environmental data means and SD per region
for (i in 1:NRegion){
temp_pred_region[i] ~ normal(temp_mean_region[i], temp_sd_region[i]); //temp_mean_region and temp_sd are given as data
days_pred_region[i] ~ normal(RegionDays[i], RegionDays_sd[i]); //temp_mean_region and temp_sd are given as data
}
for (i in 1:NSite){
temp_pred_site[i] ~ normal(temp_mean_site[i], temp_sd_site[i]); //temp_mean_region and temp_sd are given as data
days_pred_site[i] ~ normal(SiteDays[i], SiteDays_sd[i]); //temp_mean_region and temp_sd are given as data
}
//Relationship between mass loss at the region level and temperature and moisture, per tea type
for (i in 1:NSite){
if(multsites_lsite[i] == 1 && site_envlevel[i] >0)
as[i] ~ normal(gamma0[tea_type_site[i]] + gamma1[tea_type_site[i]]*temp_pred_site[i] + gamma4[tea_type_site[i]]*days_pred_site[i], sigma_overall);
else as[i] ~ normal(0, sigma_site);
}
for (i in 1:NRegion){
if(multsites_lregion[i] == 1 && region_envlevel[i] >0)
aMeanRegion [i] ~ normal(0, sigma_region);
else aMeanRegion[i] ~ normal(gamma0[tea_type_region[i]] + gamma1[tea_type_region[i]]*temp_pred_region[i] + gamma4[tea_type_region[i]]*days_pred_region[i] , sigma_overall);
}
} //Close model
generated quantities{
matrix[Nxhat,NTea] preds; //matrix of predictions
real<lower=-5,upper=5> teaDiff;
for (i in 1:Nxhat){
for (j in 1:NTea){
preds[i,j] = (gamma0[j] + gamma1[j]*xhat1[i] + gamma4[j]*xhat3[i]); //predictions
}
}
teaDiff <- gamma0[1]-gamma0[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
}
","scripts/users/hthomas/Tea/moisture_loss_3.stan")
stanc('scripts/users/hthomas/Tea/moisture_loss_3.stan') #check model
options(mc.cores = parallel::detectCores())
initsA <- list(ap=rep(0.6,jags.dat$NPlot), aMeanRegion=rep(0.6,jags.dat$NRegion),as=rep(0.6,jags.dat$NSite))
initsB <- list(ap=rep(0.3,jags.dat$NPlot), aMeanRegion=rep(0.3,jags.dat$NRegion),as=rep(0.3,jags.dat$NSite))
inits <- list(initsA, initsB)
fit_space <- stan(file = 'scripts/users/hthomas/Tea/moisture_loss_3.stan', data = jags.dat, init=inits, iter = 15000, chains = 2, thin = 1, verbose = TRUE, control=list(adapt_delta=0.99,max_treedepth = 15), algorithm = "NUTS")
s = summary(fit_space)
rownames(s$summary)
(s$summary)[202]
max(s$summary[,10],na.rm = T) # max Rhat
hist(s$summary[,"Rhat"], breaks=100)
hist(s$summary[,"n_eff"])
print(fit_space)
stan_trace(fit_space, inc_warmup = TRUE, pars = c("gamma0","gamma1"))
stan_trace(fit_space, inc_warmup = TRUE, pars = c("aMeanSite[1]","aMeanSite[2]"))
cout <- as.data.frame(s$summary)
cout$Param <- unlist(lapply(rownames(cout), function (x) {strsplit(x,split="[",fixed=T)}[[1]][1]))
cout$Number <- as.vector(sapply(strsplit(rownames(cout),"[^0-9]+",fixed=FALSE), "[", 2))
cout[cout$Rhat > 1.1 & !is.na(cout$Rhat),]
hist(cout$mean[cout$Param=="aMeanSite"])
cout[cout$Param %in% c("gamma0","gamma1","gamma2","gamma3"),] #these will tell you about the "significance" of your environmental predictors
#gamma1 = temperature, gamma2 = moisture, gamma3 = temp X moisture interaction (for each tea type)
#Compare to raw data
# plot.compare <- ddply(season_narm_r[season_narm_r$MultipleObs==1,], c("Site","Plot","PlotNum","Tea_Type"), summarise,
# rawLoss = mean(Loss))
#
# plot.compare$StanEst <- cout$mean[match(plot.compare$PlotNum, cout$Number[cout$Param=="ap"])]
# ggplot(plot.compare)+
# geom_point(aes(x=rawLoss,y=StanEst,colour=Tea_Type))
region.compare <- ddply(season_narm_r, c("RegionNum","Tea_Type"), summarise,
rawLoss = mean(Loss))
region.compare$StanEst <- cout$mean[match(region.compare$RegionNum, cout$Number[cout$Param=="aMeanSite"])]
ggplot(region.compare)+
geom_point(aes(x=rawLoss,y=StanEst,colour=Tea_Type))
# Graph predictions
predsout.space <- cout[cout$Param %in% c("preds"),]
predsout.space$moisture <- rep(jags.dat$xhat1, each=2)
predsout.space$MostureBT <- predsout.space$moisture + moisture_cent_amount
predsout.space$Temp <- rep(jags.dat$xhat2, each=2)
predsout.space$TempBT <- predsout.space$Temp + temp_x_cent_amount
predsout.space$Tea_TypeNum <- rep(c(1,2), times = (length(predsout.space$mean)/2))
predsout.space$Tea_Type <- ifelse(predsout.space$Tea_TypeNum==1,"Green","Rooibos")
save(predsout.space, file = "scripts/users/hthomas/Tea/Stan_outputs/moisture_preds_year.Rdata")
save(cout, file = "scripts/users/hthomas/Tea/Stan_outputs/moisture_fits_year.Rdata")
pdf("scripts/users/hthomas/Output_Images/Tea/moisture_med_year.pdf", width = 3, height = 3)
ggplot()+
geom_ribbon(data=predsout.space,aes(x=moisture+moisture_cent_amount,ymin=(`2.5%`),ymax=(`97.5%`),fill=factor(Tea_Type)),alpha=0.2)+
geom_point(data=season_narm_r,aes(x=moisture_mean+moisture_cent_amount,y=Loss,colour=factor(Tea_Type)),pch =16 ,alpha=0.6)+
geom_line(data=predsout.space,aes(x=moisture+moisture_cent_amount,y=mean, colour = Tea_Type), alpha=0.8, lwd = 1.5)+
theme_classic()+
coord_cartesian(y = c(0,1))+
scale_colour_manual(values = c("darkgreen","red3"), name = "Tea Type")+
scale_fill_manual(values = c("darkgreen","red3"), name = "Tea Type")+
scale_linetype_manual(values = c("dashed","solid"), name = "moisture", labels = c("low","high"))+
labs(x = "Air Temperature (ยฐC)", y = "Mass Loss (%)")+
theme(legend.position = "none")
dev.off()
library(effects)
season_narm_r$Tea_Type<-as.factor(season_narm_r$Tea_Type)
#Compare to linear model
lm <- lmer(Loss ~ moisture_mean * Tea_Type + (moisture_mean|ESA_cell/Site/Plot), data = season_narm_r)
out<-as.data.frame(effect(c("moisture_mean","Tea_Type"),lm))
(moisture_only_year<-ggplot()+
geom_ribbon(data = out, mapping = aes(x = moisture_mean+moisture_cent_amount, ymin = lower, ymax = upper, group = Tea_Type),fill="grey", alpha=0.5) +
geom_line(data = out, mapping = aes(x = moisture_mean+moisture_cent_amount, y = fit, group = Tea_Type),colour = "grey") +
geom_ribbon(data=predsout.space,aes(x=moisture+moisture_cent_amount,ymin=(`2.5%`),ymax=(`97.5%`),fill=factor(Tea_Type)),alpha=0.2)+
geom_point(data=season_narm_r,aes(x=moisture_mean+moisture_cent_amount,y=Loss,colour=factor(Tea_Type)),pch =16 ,alpha=0.6)+
geom_line(data=predsout.space,aes(x=moisture+moisture_cent_amount,y=mean, colour = Tea_Type), alpha=0.8, lwd = 1.5)+
theme_classic()+
coord_cartesian(y = c(0,1))+
scale_colour_manual(values = c("darkgreen","red3"), name = "Tea Type")+
scale_fill_manual(values = c("darkgreen","red3"), name = "Tea Type")+
scale_linetype_manual(values = c("dashed","solid"), name = "moisture", labels = c("low","high"))+
labs(x = "Air Temperature (ยฐC)", y = "Mass Loss (%)")+
theme(legend.position = "none"))
save(moisture_only_year, file = "scripts/users/hthomas/Tea/moisture_only_year.Rdata")
| /3. New_Moisture_only_FINAL_YEAR.R | no_license | gejielin/Tundra_teabag_experiment | R | false | false | 24,780 | r | ############S#################
## TRAIT CHANGE OVER SPACE ##
############S#################
rm(list=ls())
#Detach packages####
detachAllPackages <- function() {
basic.packages <- c("package:stats","package:graphics","package:grDevices","package:utils","package:datasets","package:methods","package:base")
package.list <- search()[ifelse(unlist(gregexpr("package:",search()))==1,TRUE,FALSE)]
package.list <- setdiff(package.list,basic.packages)
if (length(package.list)>0) for (package in package.list) detach(package, character.only=TRUE)
}
detachAllPackages()
#Question 1 - Differences between teas
####Open packages####
library(raster)
library(rgdal)
library(lme4)
library(nlme)
library(stringr)
library(plyr)
library(dplyr)
library(ggplot2)
require(gridExtra)
#library(brms)
library(rstan)
library(StanHeaders)
library(MuMIn)
library(MCMCglmm)
library(postMCMCglmm)
#### CHOOSE TRAIT NAME AND CLIMATE VARIABLE HERE ----
###Read in tea
tea<-read.csv("scripts/users/hthomas/tea/combined_tea.csv", stringsAsFactors = F)
#Remove daily tea - too confusing!
tea<-subset(tea,!grepl("CG_DT_HT",tea$Plot))
#Remove sub zero plots
tea<-subset(tea,Loss>0)
tea[tea$Tea_Type=="Rooibos" & tea$Loss >0.5,]$Loss<-NA
#Make sure only using control plots
ambient<-subset(tea,Treatment=="None")
#Split into seasons to make things easier
summer<-subset(ambient,Season=="Summer")
year<-subset(ambient,Season=="Year")
winter<-subset(ambient,Season=="Winter")
## STAN MODEL - soil temperature ----
#soil temperature#
var.list <- c("Loss", "Loss_Day", "k", "TBI_k", "TBI_S")
#Calculate mean burial length
#Get column number
i=1
var.num<-which(colnames(year)==var.list[i])
season_narm<-year %>%
filter(is.finite(year[,var.num]),is.finite(moisture_mean))
#Subset for tea types
#season_narm_r<-subset(season_narm,Tea_Type=="Rooibos") #AB NOTE: Keeping both tea types and including as interaction in model
season_narm_r <- season_narm #just so I don't have to rename everything
# AB: MULTIPLE OBSERVATION
season_narm_r <- ddply(season_narm_r, c("ESA_cell","Site","Plot","Tea_Type"), transform, NObsPlot = length(Loss))
season_narm_r$MultipleObs <- ifelse(season_narm_r$NObsPlot > 4, 1, 0)
# Multiple Sites
count.sites <- ddply(season_narm_r, c("ESA_cell"), summarise, n.sub = length(unique(Site)))
season_narm_r$MultipleSites <- ifelse(season_narm_r$ESA_cell %in% count.sites$ESA_cell[count.sites$n.sub > 1], 1, 0)
# Multiple plots per Site (more than 1)
count.plots <- ddply(season_narm_r, c("ESA_cell", "Site"), summarise, n.plots = length(unique(Plot)))
season_narm_r$MultiplePlots <- ifelse(season_narm_r$Site %in% count.plots$Site[count.plots$n.plots > 1], 1, 0)
#Add env.levels (original)
season_narm_r$envlevel<-ifelse(season_narm_r$moisture_mean_var_level=="Region",0,
ifelse(season_narm_r$moisture_mean_var_level=="Site",1,2))
#Add env.levels (alternative)
#Add env.levels (new - based on nestedness)
env.levels<- season_narm_r %>%
select(moisture_mean,ESA_cell,Site,Plot)
season_narm_r$envlevel<-0
env.levels2<-ddply(env.levels, c("ESA_cell"), summarise, n.plots = length(unique(moisture_mean)))
season_narm_r$envlevel <- ifelse(season_narm_r$ESA_cell %in% env.levels2$ESA_cell[env.levels2$n.plots > 1], 1, season_narm_r$envlevel)
env.levels2<-ddply(env.levels, c("ESA_cell","Site"), summarise, n.plots = length(unique(moisture_mean)))
season_narm_r$envlevel <- ifelse(season_narm_r$Site %in% env.levels2$Site[env.levels2$n.plots > 1], 2, season_narm_r$envlevel)
#Add categories
season_narm_r$Cat<-ifelse(season_narm_r$MultiplePlots_Region == 0 & season_narm_r$MultipleSites == 0, 1, #No nesting - automatically at plot level
ifelse(season_narm_r$MultiplePlots == 1 & season_narm_r$MultipleSites == 0 & season_narm_r$envlevel == 2,2, #Plot in site, plot level env data
ifelse(season_narm_r$MultiplePlots == 1 & season_narm_r$MultipleSites == 0 & season_narm_r$envlevel != 2,3, #Plot in site, site / region level env data
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 0 & season_narm_r$envlevel == 2,4,
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 0 & season_narm_r$envlevel == 1,4,
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 0 & season_narm_r$envlevel == 0,5,
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 1 & season_narm_r$envlevel == 2,6,
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 1 & season_narm_r$envlevel == 1,7,
ifelse(season_narm_r$MultipleSites == 1 & season_narm_r$MultiplePlots == 1 & season_narm_r$envlevel == 2,8,"NA")))))))))
#Subset so only using data I want to check model
#season_narm_r<-subset(season_narm_r, Cat == 1 | Cat == 2 | Cat == 3)
# AB: REMOVE MISSING VALUES OF SOIL moisture AND TEMPERATURE FOR THE moisture X TEMPERATURE INTERACTION MODEL
season_narm_r <- season_narm_r[!is.na(season_narm_r$moisture_mean),]
#Add Region numbers
season_narm_r<-season_narm_r %>%
mutate(RegionNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Tea_Type")))
#Reorder by site number
season_narm_r<-season_narm_r[order(season_narm_r$RegionNum),]
#Add Site numbers
season_narm_r<-season_narm_r %>%
mutate(SiteNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Site","Tea_Type")))
#Reorder by site number
season_narm_r<-season_narm_r[order(season_narm_r$SiteNum),]
#Add Plot numbers
season_narm_r<-season_narm_r %>%
mutate(PlotNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Site","Plot","Tea_Type"))) #AB NOTE: This now includes tea type as well! So there will be a unique plot number for each tea type within a plot
#Reorder by plot number
season_narm_r<-season_narm_r[order(season_narm_r$PlotNum),]
#Centre values - AB note: Either don't name this the same thing or save the amount you center by first so we can add it to the xhats later
moisture_cent_amount <- attr(scale(season_narm_r$moisture_mean, center = TRUE, scale = FALSE), 'scaled:center')
season_narm_r$moisture_mean<-scale(season_narm_r$moisture_mean, center = TRUE, scale = FALSE)
days_cent_amount <- attr(scale(season_narm_r$Days, center = TRUE, scale = FALSE), 'scaled:center')
season_narm_r$Days<-scale(season_narm_r$Days, center = TRUE, scale = FALSE)
#AB: caluclate mean and sd per site - YOU CAN THINK ABOUT WHETHER YOU WANT THIS TO BE THE OVERALL MEAN OR THE MEAN OF MEANS - MEAN OF MEANS MIGHT BE BETTER IN THIS CASE
season_narm_r_sites<-season_narm_r %>%
group_by(SiteNum) %>%
summarise(moisture_mean_site = mean(moisture_mean),
moisture_sd_site = sd(moisture_mean))
season_narm_r$moisture_mean_site<-season_narm_r_sites$moisture_mean_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$moisture_sd_site<-season_narm_r_sites$moisture_sd_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$moisture_sd_site[season_narm_r$moisture_sd_site==0 ] <- mean(season_narm_r$moisture_sd_site[season_narm_r$moisture_sd_site>0],na.rm = T)
season_narm_r$moisture_sd_site[is.na(season_narm_r$moisture_sd_site)] <- 0.01
#AB: caluclate mean and sd per region - YOU CAN THINK ABOUT WHETHER YOU WANT THIS TO BE THE OVERALL MEAN OR THE MEAN OF MEANS - MEAN OF MEANS MIGHT BE BETTER IN THIS CASE
season_narm_r_regions<-season_narm_r %>%
group_by(RegionNum) %>%
summarise(moisture_mean_region = mean(moisture_mean),
moisture_sd_region = sd(moisture_mean))
season_narm_r$moisture_mean_region<-season_narm_r_regions$moisture_mean_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$moisture_sd_region<-season_narm_r_regions$moisture_sd_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$moisture_sd_region[season_narm_r$moisture_sd_region==0] <- mean(season_narm_r$moisture_sd_region[season_narm_r$moisture_sd_region>0],na.rm = T)
season_narm_r$moisture_sd_region[is.na(season_narm_r$moisture_sd_region)] <- 0.01
#Add mean days per region
season_narm_r<-season_narm_r %>%
group_by(SiteNum) %>%
mutate(SiteDays = mean(Days),
SiteDays_sd = sd(Days))
season_narm_r$SiteDays_sd[season_narm_r$SiteDays_sd==0 | is.na(season_narm_r$SiteDays_sd)] <- 0.001
#Add mean days per region
season_narm_r<-season_narm_r %>%
group_by(RegionNum) %>%
mutate(RegionDays = mean(Days),
RegionDays_sd = sd(Days))
season_narm_r$RegionDays_sd[season_narm_r$RegionDays_sd==0 | is.na(season_narm_r$RegionDays_sd)] <- 0.001
mean_burial<-mean(season_narm_r$Days)
min_soil<-min(season_narm_r$moisture_mean,na.rm=TRUE)
max_soil<-max(season_narm_r$moisture_mean,na.rm=TRUE)
min_moisture<-min(season_narm_r$moisture_mean,na.rm=TRUE)
max_moisture<-max(season_narm_r$moisture_mean,na.rm=TRUE)
xhats <- expand.grid(xhat1=seq(min_moisture, max_moisture,by=0.01), xhat3 = mean_burial) #AB: predicting soil moisture at 25% and 75% (assuming you will graph temperature as continuous) but of course you can change this to whatever you want
####Third attempt - adding temperature levels#######
jags.dat<-list(
Nobs=nrow(season_narm_r),
NSite=length(unique(season_narm_r$SiteNum)),
NRegion=length(unique(season_narm_r$RegionNum)),
NPlot=length(unique(season_narm_r$PlotNum)),
NSiteDays=length(unique(season_narm_r$SiteDays)),
NRegionDays=length(unique(season_narm_r$RegionDays)),
NTea=length(unique(season_narm_r$Tea_Type)),
Region=season_narm_r$RegionNum,
Site=season_narm_r$SiteNum,
Plot=season_narm_r$PlotNum,
SiteDays=season_narm_r$SiteDays[!duplicated(season_narm_r$SiteNum)],
SiteDays_sd=season_narm_r$SiteDays_sd[!duplicated(season_narm_r$SiteNum)],
RegionDays=season_narm_r$RegionDays[!duplicated(season_narm_r$RegionNum)],
RegionDays_sd=season_narm_r$RegionDays_sd[!duplicated(season_narm_r$RegionNum)],
Site_short=season_narm_r$SiteNum[!duplicated(season_narm_r$PlotNum)],
Plot_short=unique(season_narm_r$PlotNum),
tea_type_site=ifelse(season_narm_r$Tea_Type[!duplicated(season_narm_r$SiteNum)]=="Green", 1, 2),
tea_type_region=ifelse(season_narm_r$Tea_Type[!duplicated(season_narm_r$RegionNum)]=="Green", 1, 2),
multobs_lobs=season_narm_r$MultipleObs,
multobs_lplot=season_narm_r$MultipleObs[!duplicated(season_narm_r$PlotNum)],
multsites_lobs=season_narm_r$MultipleSites,
multsites_lplot=season_narm_r$MultipleSites[!duplicated(season_narm_r$PlotNum)],
multsites_lsite=season_narm_r$MultipleSites[!duplicated(season_narm_r$SiteNum)],
multsites_lregion=season_narm_r$MultipleSites[!duplicated(season_narm_r$RegionNum)],
multplots_lobs=season_narm_r$MultiplePlots,
multplots_lplot=season_narm_r$MultiplePlots[!duplicated(season_narm_r$PlotNum)],
multplots_lsite=season_narm_r$MultiplePlots[!duplicated(season_narm_r$SiteNum)],
multplots_lregion=season_narm_r$MultiplePlots[!duplicated(season_narm_r$RegionNum)],
multplots_region_lobs=season_narm_r$MultiplePlots_Region,
multplots_region_lplot=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$PlotNum)],
multplots_region_lsite=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$SiteNum)],
multplots_region_lregion=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$RegionNum)],
traitobs=season_narm_r$Loss,
#temp_plot=as.numeric(season_narm_r[!duplicated(season_narm_r$PlotNum),]$moisture_mean),
#temp_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$moisture_mean),
temp_mean_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$moisture_mean_region),
temp_sd_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$moisture_sd_region),
temp_mean_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$moisture_mean_site),
temp_sd_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$moisture_sd_site),
obs_envlevel=season_narm_r$envlevel,
plot_envlevel=season_narm_r[!duplicated(season_narm_r$PlotNum),]$envlevel,
site_envlevel=season_narm_r[!duplicated(season_narm_r$SiteNum),]$envlevel,
region_envlevel=season_narm_r[!duplicated(season_narm_r$RegionNum),]$envlevel,
meanT=mean(as.numeric(season_narm_r$moisture_mean[!duplicated(season_narm_r$ESA_cell)])),
xhat1=xhats$xhat1,
xhat3=xhats$xhat3,
Nxhat=length(xhats$xhat1)
)
str(jags.dat)
# MODEL - ANNE EDITS####
write("
data {
int<lower=0> Nobs; //Number of observations
int<lower=0> NRegion; //Number of regions
int<lower=0> NSite; //Number of sites
int<lower=0> NPlot; //Number of plots
int<lower=0> Nxhat; //No. predictor variables
int<lower=0> NTea; //No. of tea types
int<lower=0> NSiteDays; //No. of days
int<lower=0> NRegionDays; //No. of days
int<lower=1,upper=NPlot> Plot[Nobs]; //Plots (all observations)
int<lower=1,upper=NSite> Site[Nobs]; //Plots (all observations)
int<lower=1,upper=NRegion> Region[Nobs]; //Plots (all observations)
int<lower=1,upper=2> tea_type_site[NSite]; //Tea type (1=Green, 2=Rooibos)
int<lower=1,upper=2> tea_type_region[NRegion]; //Tea type (1=Green, 2=Rooibos)
int<lower=0,upper=1> multobs_lobs[Nobs]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multobs_lplot[NPlot]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multsites_lobs[Nobs]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multsites_lplot[NPlot]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multsites_lsite[NSite]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multsites_lregion[NRegion]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multplots_lobs[Nobs]; //Are plots nested in site (all obs)
int<lower=0,upper=1> multplots_lplot[NPlot]; //Are plots nested in site (no plots)
int<lower=0,upper=2> obs_envlevel[Nobs];
int<lower=0,upper=2> site_envlevel[NSite];
int<lower=0,upper=2> region_envlevel[NRegion];
vector[Nobs] traitobs; //Mass Loss
vector[NSite] temp_mean_site; //Temperature (unique regions)
vector[NSite] temp_sd_site; //Temperature SD (unique regions)
vector[NRegion] temp_mean_region; //Temperature (unique regions)
vector[NRegion] temp_sd_region; //Temperature SD (unique regions)
vector[NSite] SiteDays; //
vector[NSite] SiteDays_sd; //
vector[NRegion] RegionDays; //
vector[NRegion] RegionDays_sd; //
vector[Nxhat] xhat1; //Predictor variables
vector[Nxhat] xhat3; //Predictor variables
}
parameters {
real<lower=-3,upper=3> as[NSite]; // Region effect
real<lower=-5,upper=5> ap[NPlot];
real<lower=-5,upper=5> aMeanRegion[NRegion];
real<lower=-2,upper=2> gamma0[NTea]; // intercept of relationship between mass loss and temp change
real<lower=-2,upper=2> gamma1[NTea]; // slope of temperature - loss relationship
real<lower=-2,upper=2> gamma2[NTea]; // slope of moisture - loss relationship
real<lower=-2,upper=2> gamma3[NTea]; // temperature - moisture interaction
real<lower=-2,upper=2> gamma4[NTea]; // temperature - moisture interaction
real<lower=0,upper=5> sigma_overall; //Error around loss- temp relationship
real<lower=0,upper=5> sigma_plot;
real<lower=0,upper=5> sigma_site;
real<lower=0,upper=5> sigma_region;
real<lower=0,upper=5> sigma_resid;
vector[NSite] temp_pred_site;
vector[NSite] days_pred_site;
vector[NRegion] temp_pred_region;
vector[NRegion] days_pred_region;
}
transformed parameters {
vector[Nobs] mu;
vector[Nobs] app;
vector[Nobs] ass;
vector[Nobs] arr;
for (i in 1:Nobs){
if((multobs_lobs[i]==1 && multplots_lobs[i]==1))
app[i] = ap[Plot[i]];
// set plot effects to 0 for plots that don't have multiple obs or are the only plot within a site
else app[i] = 0;
if(multsites_lobs[i] == 1)
ass[i] = as[Site[i]];
else ass[i] = 0;
if(multsites_lobs[i]==1 && obs_envlevel[i] >0)
arr[i] = 0;
else arr[i] = aMeanRegion[Region[i]];
mu[i] = app[i] + ass[i] + arr[i];;
}
//print(\"ap=\",ap[1:10],\"as=\",as[1:10],\"aMeanSite=\",aMeanSite[1:8],\"mu=\",mu[1:10])
}
model {
for (i in 1:Nobs){
traitobs[i] ~ normal(mu[i], sigma_resid);
}
//Set up plot and site random effects
for (i in 1:NPlot){
if(multobs_lplot[i]==1 && multplots_lplot[i]==1)
ap[i] ~ normal(0, sigma_plot);
}
//Bring in environmental data means and SD per region
for (i in 1:NRegion){
temp_pred_region[i] ~ normal(temp_mean_region[i], temp_sd_region[i]); //temp_mean_region and temp_sd are given as data
days_pred_region[i] ~ normal(RegionDays[i], RegionDays_sd[i]); //temp_mean_region and temp_sd are given as data
}
for (i in 1:NSite){
temp_pred_site[i] ~ normal(temp_mean_site[i], temp_sd_site[i]); //temp_mean_region and temp_sd are given as data
days_pred_site[i] ~ normal(SiteDays[i], SiteDays_sd[i]); //temp_mean_region and temp_sd are given as data
}
//Relationship between mass loss at the region level and temperature and moisture, per tea type
for (i in 1:NSite){
if(multsites_lsite[i] == 1 && site_envlevel[i] >0)
as[i] ~ normal(gamma0[tea_type_site[i]] + gamma1[tea_type_site[i]]*temp_pred_site[i] + gamma4[tea_type_site[i]]*days_pred_site[i], sigma_overall);
else as[i] ~ normal(0, sigma_site);
}
for (i in 1:NRegion){
if(multsites_lregion[i] == 1 && region_envlevel[i] >0)
aMeanRegion [i] ~ normal(0, sigma_region);
else aMeanRegion[i] ~ normal(gamma0[tea_type_region[i]] + gamma1[tea_type_region[i]]*temp_pred_region[i] + gamma4[tea_type_region[i]]*days_pred_region[i] , sigma_overall);
}
} //Close model
generated quantities{
matrix[Nxhat,NTea] preds; //matrix of predictions
real<lower=-5,upper=5> teaDiff;
for (i in 1:Nxhat){
for (j in 1:NTea){
preds[i,j] = (gamma0[j] + gamma1[j]*xhat1[i] + gamma4[j]*xhat3[i]); //predictions
}
}
teaDiff <- gamma0[1]-gamma0[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
}
","scripts/users/hthomas/Tea/moisture_loss_3.stan")
stanc('scripts/users/hthomas/Tea/moisture_loss_3.stan') #check model
options(mc.cores = parallel::detectCores())
initsA <- list(ap=rep(0.6,jags.dat$NPlot), aMeanRegion=rep(0.6,jags.dat$NRegion),as=rep(0.6,jags.dat$NSite))
initsB <- list(ap=rep(0.3,jags.dat$NPlot), aMeanRegion=rep(0.3,jags.dat$NRegion),as=rep(0.3,jags.dat$NSite))
inits <- list(initsA, initsB)
fit_space <- stan(file = 'scripts/users/hthomas/Tea/moisture_loss_3.stan', data = jags.dat, init=inits, iter = 15000, chains = 2, thin = 1, verbose = TRUE, control=list(adapt_delta=0.99,max_treedepth = 15), algorithm = "NUTS")
s = summary(fit_space)
rownames(s$summary)
(s$summary)[202]
max(s$summary[,10],na.rm = T) # max Rhat
hist(s$summary[,"Rhat"], breaks=100)
hist(s$summary[,"n_eff"])
print(fit_space)
stan_trace(fit_space, inc_warmup = TRUE, pars = c("gamma0","gamma1"))
stan_trace(fit_space, inc_warmup = TRUE, pars = c("aMeanSite[1]","aMeanSite[2]"))
cout <- as.data.frame(s$summary)
cout$Param <- unlist(lapply(rownames(cout), function (x) {strsplit(x,split="[",fixed=T)}[[1]][1]))
cout$Number <- as.vector(sapply(strsplit(rownames(cout),"[^0-9]+",fixed=FALSE), "[", 2))
cout[cout$Rhat > 1.1 & !is.na(cout$Rhat),]
hist(cout$mean[cout$Param=="aMeanSite"])
cout[cout$Param %in% c("gamma0","gamma1","gamma2","gamma3"),] #these will tell you about the "significance" of your environmental predictors
#gamma1 = temperature, gamma2 = moisture, gamma3 = temp X moisture interaction (for each tea type)
#Compare to raw data
# plot.compare <- ddply(season_narm_r[season_narm_r$MultipleObs==1,], c("Site","Plot","PlotNum","Tea_Type"), summarise,
# rawLoss = mean(Loss))
#
# plot.compare$StanEst <- cout$mean[match(plot.compare$PlotNum, cout$Number[cout$Param=="ap"])]
# ggplot(plot.compare)+
# geom_point(aes(x=rawLoss,y=StanEst,colour=Tea_Type))
region.compare <- ddply(season_narm_r, c("RegionNum","Tea_Type"), summarise,
rawLoss = mean(Loss))
region.compare$StanEst <- cout$mean[match(region.compare$RegionNum, cout$Number[cout$Param=="aMeanSite"])]
ggplot(region.compare)+
geom_point(aes(x=rawLoss,y=StanEst,colour=Tea_Type))
# Graph predictions
predsout.space <- cout[cout$Param %in% c("preds"),]
predsout.space$moisture <- rep(jags.dat$xhat1, each=2)
predsout.space$MostureBT <- predsout.space$moisture + moisture_cent_amount
predsout.space$Temp <- rep(jags.dat$xhat2, each=2)
predsout.space$TempBT <- predsout.space$Temp + temp_x_cent_amount
predsout.space$Tea_TypeNum <- rep(c(1,2), times = (length(predsout.space$mean)/2))
predsout.space$Tea_Type <- ifelse(predsout.space$Tea_TypeNum==1,"Green","Rooibos")
save(predsout.space, file = "scripts/users/hthomas/Tea/Stan_outputs/moisture_preds_year.Rdata")
save(cout, file = "scripts/users/hthomas/Tea/Stan_outputs/moisture_fits_year.Rdata")
pdf("scripts/users/hthomas/Output_Images/Tea/moisture_med_year.pdf", width = 3, height = 3)
ggplot()+
geom_ribbon(data=predsout.space,aes(x=moisture+moisture_cent_amount,ymin=(`2.5%`),ymax=(`97.5%`),fill=factor(Tea_Type)),alpha=0.2)+
geom_point(data=season_narm_r,aes(x=moisture_mean+moisture_cent_amount,y=Loss,colour=factor(Tea_Type)),pch =16 ,alpha=0.6)+
geom_line(data=predsout.space,aes(x=moisture+moisture_cent_amount,y=mean, colour = Tea_Type), alpha=0.8, lwd = 1.5)+
theme_classic()+
coord_cartesian(y = c(0,1))+
scale_colour_manual(values = c("darkgreen","red3"), name = "Tea Type")+
scale_fill_manual(values = c("darkgreen","red3"), name = "Tea Type")+
scale_linetype_manual(values = c("dashed","solid"), name = "moisture", labels = c("low","high"))+
labs(x = "Air Temperature (ยฐC)", y = "Mass Loss (%)")+
theme(legend.position = "none")
dev.off()
library(effects)
season_narm_r$Tea_Type<-as.factor(season_narm_r$Tea_Type)
#Compare to linear model
lm <- lmer(Loss ~ moisture_mean * Tea_Type + (moisture_mean|ESA_cell/Site/Plot), data = season_narm_r)
out<-as.data.frame(effect(c("moisture_mean","Tea_Type"),lm))
(moisture_only_year<-ggplot()+
geom_ribbon(data = out, mapping = aes(x = moisture_mean+moisture_cent_amount, ymin = lower, ymax = upper, group = Tea_Type),fill="grey", alpha=0.5) +
geom_line(data = out, mapping = aes(x = moisture_mean+moisture_cent_amount, y = fit, group = Tea_Type),colour = "grey") +
geom_ribbon(data=predsout.space,aes(x=moisture+moisture_cent_amount,ymin=(`2.5%`),ymax=(`97.5%`),fill=factor(Tea_Type)),alpha=0.2)+
geom_point(data=season_narm_r,aes(x=moisture_mean+moisture_cent_amount,y=Loss,colour=factor(Tea_Type)),pch =16 ,alpha=0.6)+
geom_line(data=predsout.space,aes(x=moisture+moisture_cent_amount,y=mean, colour = Tea_Type), alpha=0.8, lwd = 1.5)+
theme_classic()+
coord_cartesian(y = c(0,1))+
scale_colour_manual(values = c("darkgreen","red3"), name = "Tea Type")+
scale_fill_manual(values = c("darkgreen","red3"), name = "Tea Type")+
scale_linetype_manual(values = c("dashed","solid"), name = "moisture", labels = c("low","high"))+
labs(x = "Air Temperature (ยฐC)", y = "Mass Loss (%)")+
theme(legend.position = "none"))
save(moisture_only_year, file = "scripts/users/hthomas/Tea/moisture_only_year.Rdata")
|
# pipe.RIPseq.R
# do the A to Z alignment pipeline on a RIPseq sample
`pipe.RIPseq` <- function( sampleID=NULL, annotationFile="Annotation.txt", optionsFile="Options.txt",
...) {
checkSampleNames( sampleID)
target <- getAndSetTarget( optionsFile, verbose=T)
# note the time we start
startTime <- proc.time()
# setup...
ans0 <- pipe.PreAlignTasks( sampleID, annotationFile, optionsFile, dataType="RIP-seq")
# do the alignment
ans1 <- pipe.RIPalignment( sampleID, annotationFile, optionsFile)
# make pileups but not a transcriptome
ans2 <- pipe.AlignToWig( sampleID, annotationFile, optionsFile, dataType="RIP-seq")
# make transcriptomes for all species...
ans3 <- pipe.Transcriptome( sampleID, annotationFile, optionsFile, dataType="RIP-seq")
# do the peak picking...
ans3 <- pipe.RIPpeaks( sampleID, annotationFile, optionsFile, ...)
# make any extra wanted display files
ans4 <- pipe.PostAlignTasks( sampleID, annotationFile, optionsFile, dataType="RIP-seq")
# done... report total time used
cat( "\n", verboseOutputDivider, "\n\nRIP-seq PIPELINE DONE: ", sampleID, "\n\n")
print( elapsedProcTime( startTime, proc.time(), N=ans1$nReadsIn))
return()
}
| /R/pipe.RIPseq.R | no_license | robertdouglasmorrison/DuffyNGS | R | false | false | 1,193 | r | # pipe.RIPseq.R
# do the A to Z alignment pipeline on a RIPseq sample
`pipe.RIPseq` <- function( sampleID=NULL, annotationFile="Annotation.txt", optionsFile="Options.txt",
...) {
checkSampleNames( sampleID)
target <- getAndSetTarget( optionsFile, verbose=T)
# note the time we start
startTime <- proc.time()
# setup...
ans0 <- pipe.PreAlignTasks( sampleID, annotationFile, optionsFile, dataType="RIP-seq")
# do the alignment
ans1 <- pipe.RIPalignment( sampleID, annotationFile, optionsFile)
# make pileups but not a transcriptome
ans2 <- pipe.AlignToWig( sampleID, annotationFile, optionsFile, dataType="RIP-seq")
# make transcriptomes for all species...
ans3 <- pipe.Transcriptome( sampleID, annotationFile, optionsFile, dataType="RIP-seq")
# do the peak picking...
ans3 <- pipe.RIPpeaks( sampleID, annotationFile, optionsFile, ...)
# make any extra wanted display files
ans4 <- pipe.PostAlignTasks( sampleID, annotationFile, optionsFile, dataType="RIP-seq")
# done... report total time used
cat( "\n", verboseOutputDivider, "\n\nRIP-seq PIPELINE DONE: ", sampleID, "\n\n")
print( elapsedProcTime( startTime, proc.time(), N=ans1$nReadsIn))
return()
}
|
## File Name: designMatrices.mfr_aux.R
## File Version: 9.255
.generate.interactions <- function(X, facets, formulaA, mm )
{
d1 <- d0 <- X
h1 <- sapply( colnames(d1), FUN=function(vv){
length(grep( vv, paste(formulaA) )) } )
h1 <- colnames(d1)[ h1==0 ]
d0 <- d0[, ! ( colnames(d1) %in% h1 ), drop=FALSE]
M2 <- stats::model.matrix( #object=
formulaA, data=d1,
contrasts.arg=lapply( d0, contrasts, contrasts=FALSE) )
h2 <- colnames(M2)
h1 <- colnames(mm)
# extract facets
xsi.table <- data.frame( "parameter"=h2 )
xsi.split <- sapply( xsi.table$parameter, FUN=function(ll){
l1 <- as.vector( unlist( strsplit( paste(ll), split=":" ) ) )
v1 <- l1
for (ii in 1:length(l1) ){
for (cc in colnames(X) ){
kk <- grep( cc, l1[ii] )
if (length(kk)>0){ v1[ii] <- cc }
}
}
v1 <- paste0( v1, collapse=":" )
return(v1)
} )
xsi.table$facet <- unlist(xsi.split)
xsi.table$facet.order <- sapply( xsi.table$parameter, FUN=function(ll){
length( as.vector( unlist( strsplit( paste(ll), split=":" ) ) ) ) } )
xsi.table$constraint <- 1 - 1*(xsi.table$parameter %in% h1)
xsi.table$facet.index <- match( xsi.table$facet, unique( xsi.table$facet ) )
# xsi.table$orig.index <- seq(1,nrow(xsi.table))
# xsi.table[ order( paste( xsi.table$facet.index+100, xsi.table$parameter ) ), ]
facets.unique <- unique( xsi.table$facet )
b1 <- xsi.table[ xsi.table$constraint==1, "parameter" ]
c1 <- xsi.table[ xsi.table$constraint==0, "parameter" ]
xsi.constraints <- matrix( NA, nrow=length(b1), ncol=length(c1) )
rownames(xsi.constraints) <- paste(b1)
colnames(xsi.constraints) <- paste(c1)
# b1 <- b1[3]
############################
# loop over terms
for (bb in b1 ){
#bb <- b1[9]
v1 <- 0
mult <- 1
xsi.table.bb <- xsi.table[ xsi.table$parameter==bb, ]
x0 <- x1 <- xsi.table[ xsi.table$facet %in% xsi.table.bb$facet, ]
if ( xsi.table.bb$facet.order==1){
xsi.constraints[paste(bb),] <- 0
xsi.constraints[ paste(bb), paste( x1[ x1$constraint==0, "parameter" ] ) ] <- -1
}
if ( xsi.table.bb$facet=="item:step"){
v1 <- 1
xsi.constraints[paste(bb),] <- 0
s2 <- unlist( strsplit( paste(xsi.table.bb$parameter), split=":" ) )
s20 <- strsplit( paste(x1$parameter), split=":" )
g1 <- unlist( lapply( s20, FUN=function(ll){
ll[1]==s2[1] } ) )
x1 <- x1[ g1, ]
mult <- 1
# cat("......",bb,"......\n")
# print(x1)
varsc <- paste( x1[ x1$constraint==0, "parameter" ] )
if ( length(varsc)==0){
g1 <- unlist( lapply( s20, FUN=function(ll){
ll[2]==s2[2] } ) )
x1 <- x0[ g1, ]
varsc <- paste(x1[ x1$constraint==0, "parameter" ])
mult <- 1
if ( length(varsc)==0){
varsc <- x1[, "parameter" ]
varsc <- setdiff( varsc, paste(bb) )
h1 <- colSums( xsi.constraints[ varsc,, drop=FALSE] )
varsc <- names(h1)[ h1 !=0 ]
mult <- -1
}
}
xsi.constraints[ paste(bb), varsc ] <- -1*mult
}
##################
### order 2
if ( xsi.table.bb$facet.order==2 & v1==0 ){
xsi.constraints[paste(bb),] <- 0
s2 <- unlist( strsplit( paste(xsi.table.bb$parameter), split=":" ) )
s20 <- strsplit( paste(x1$parameter), split=":" )
g1 <- unlist( lapply( s20, FUN=function(ll){
ll[2]==s2[2] } ) )
x1 <- x1[ g1, ]
varsc <- x1[ x1$constraint==0, "parameter" ]
if ( length(varsc)==0){
g1 <- unlist( lapply( s20, FUN=function(ll){
ll[1]==s2[1] } ) )
x1 <- x0[ g1, ]
varsc <- x1[ x1$constraint==0, "parameter" ]
if ( length(varsc)==0){
varsc <- x1[, "parameter" ]
varsc <- setdiff( varsc, paste(bb) )
h1 <- colSums( xsi.constraints[ varsc,, drop=FALSE] )
varsc <- names(h1)[ h1 !=0 ]
mult <- -1
}
}
xsi.constraints[ paste(bb), paste( varsc ) ] <- -1 * mult
}
#########################
### order 3
if ( xsi.table.bb$facet.order==3 & v1==0 ){
mult <- 1
xsi.constraints[paste(bb),] <- 0
s2 <- unlist( strsplit( paste(xsi.table.bb$parameter), split=":" ) )
s20 <- strsplit( paste(x1$parameter), split=":" )
g1 <- unlist( lapply( s20, FUN=function(ll){
( ll[2]==s2[2] ) & (ll[3]==s2[3] ) } ) )
x1 <- x1[ g1, ]
varsc <- x1[ x1$constraint==0, "parameter" ]
if ( length(varsc)==0 ){
g1 <- unlist( lapply( s20, FUN=function(ll){
( ll[1]==s2[1] ) & (ll[3]==s2[3]) } ) )
x1 <- x0[ g1, ]
varsc <- x1[ x1$constraint==0, "parameter" ]
if ( length(varsc)==0 ){
varsc <- x1[, "parameter" ]
varsc <- setdiff( varsc, paste(bb) )
h1 <- colSums( xsi.constraints[ varsc,, drop=FALSE] )
varsc <- names(h1)[ h1 !=0 ]
varsc <- na.omit( varsc)
mult <- -1
if ( length(varsc)==0 ){
g1 <- unlist( lapply( s20, FUN=function(ll){
( ll[1]==s2[1] ) & (ll[2]==s2[2]) } ) )
x1 <- x0[ g1, ]
varsc <- x1[ x1$constraint==0, "parameter" ]
mult <- 1
if ( length(varsc)==0 ){
# varsc <- setdiff( varsc, paste(bb) )
varsc <- x1[, "parameter" ]
varsc <- setdiff( varsc, paste(bb) )
h1 <- colSums( xsi.constraints[ varsc,, drop=FALSE] )
varsc <- names(h1)[ h1 !=0 ]
varsc <- stats::na.omit( varsc)
mult <- -1
}
}
}
}
if ( length(varsc) > 0 ){
xsi.constraints[ paste(bb), paste( varsc ) ] <- -1 * mult
} else {
xsi.constraints[ paste(bb), ] <- NA
}
}
}
xsi.constraints[ rowSums( abs(xsi.constraints) )==0, ] <- NA
res <- list( "xsi.constraints"=xsi.constraints, "xsi.table"=xsi.table )
#print(res) ; stop("here")
return(res)
}
######################
# rename item names
.rename.items <- function( matr, itemren, cols=TRUE ){
rM <- rownames(matr)
cM <- colnames(matr)
I <- nrow(itemren)
vers <- FALSE
vers <- TRUE
#-----------------------
# rows
if (vers){
rM0 <- rM
itemren2 <- paste0(itemren[,2], "-")
nc2 <- nchar(itemren2)
N1 <- min(nc2)
N2 <- max(nc2)
for (nn in N1:N2){
i1 <- match( substring( rM0, 1, nn ), itemren2 )
h1 <- paste0( itemren[ i1,1], "-", substring( rM0, nn+1, nchar(rM0) ) )
i2 <- ! is.na(i1)
rM0[ i2 ] <- h1[ i2]
}
rM <- rM0
if ( cols){
cM0 <- cM
ind <- match( cM0, itemren[,2])
ind <- stats::na.omit(ind)
cM0[ ind ] <- paste(itemren[,1])
itemren2 <- paste0(itemren[,2], ":")
nc2 <- nchar(itemren2)
N1 <- min(nc2)
N2 <- max(nc2)
for (nn in N1:N2){
i1 <- match( substring( cM0, 1, nn ), itemren2 )
h1 <- paste0( itemren[ i1,1], ":", substring( cM0, nn+1, nchar(cM0) ) )
i2 <- ! is.na(i1)
cM0[ i2 ] <- h1[ i2]
}
cM <- cM0
}
}
if (!vers){
for ( ii in 1:I){
rM <- gsub( paste0( itemren[ii,2], "-"), paste0( itemren[ii,1], "-"), rM )
if (cols){
cM <- gsub( paste0( itemren[ii,2], ":"), paste0( itemren[ii,1], ":"), cM )
cM[ cM==itemren[ii,2] ] <- paste(itemren[ii,1])
}
}
}
rM -> rownames(matr)
if ( cols){ cM -> colnames(matr) }
return(matr)
}
#############################################################
.rename.items2 <- function( vec, itemren )
{
cM <- vec
I <- nrow(itemren)
vers <- TRUE
if (is.null(cM)){
vers <- FALSE
}
if (vers){
v0 <- Sys.time()
cM0 <- cM
# relabel items
ind <- match( cM, itemren[,2])
ind <- na.omit(ind)
cM0[ ind ] <- paste(itemren[,1])
itemren2 <- paste0(itemren[,2], ":")
nc2 <- nchar(itemren2)
N1 <- min(nc2)
N2 <- max(nc2)
for (nn in N1:N2){
i1 <- match( substring( cM0, 1, nn ), itemren2 )
h1 <- paste0( itemren[ i1,1], ":", substring( cM0, nn+1, nchar(cM0) ) )
i2 <- ! is.na(i1)
cM0[ i2 ] <- h1[ i2]
}
cM <- cM0
}
if (!vers){
for ( ii in 1:I){
cM <- gsub( paste0( itemren[ii,2], ":"), paste0( itemren[ii,1], ":"), cM )
cM[ cM==itemren[ii,2] ] <- paste(itemren[ii,1])
}
}
return(cM)
}
#############################################################
.rename.items3 <- function( matr, facet.list, I, cols=TRUE ){
### check for equalities in rM and cM in all entries!!!!
rM <- rownames(matr)
rMsplit <- strsplit( paste(rM), split="-" )
RR <- length(rMsplit)
FF <- length(facet.list)
for (rr in 1:RR){
rr1 <- rMsplit[[rr]]
if (FF > 0 ){
for (ff in 1:FF){ # begin ff
# for (ff in seq(1,FF,1) ){
itemren <- facet.list[[ff]]
I <- nrow(itemren)
for (ii in 1:I ){
rr1[ rr1==itemren[ii,2] ] <- paste(itemren[ii,1])
rMsplit[[rr]] <- rr1
}
} # end ff
} # end if FF
}
rM <- unlist( lapply( rMsplit, FUN=function(ll){ paste( ll, collapse="-") } ) )
rownames(matr) <- rM
#****************************************
if ( cols){
cM <- colnames(matr)
cMsplit <- strsplit( paste(cM), split=":" )
RR <- length(cMsplit)
FF <- length(facet.list)
for (rr in 1:RR){
rr1 <- cMsplit[[rr]]
if (FF>0){
for (ff in 1:FF){ # begin ff
itemren <- facet.list[[ff]]
I <- nrow(itemren)
for (ii in 1:I ){
rr1[ rr1==itemren[ii,2] ] <- paste(itemren[ii,1])
cMsplit[[rr]] <- rr1
}
} # end ff
}
}
cM <- unlist( lapply( cMsplit, FUN=function(ll){ paste( ll, collapse=":") } ) )
colnames(matr) <- cM
}
return(matr)
}
#############################################################
.rename.items2a <- function( vec, facet.list, I ){
### check for equalities!!!
cM <- vec
FF <- length(facet.list)
rM <- cM
v0 <- Sys.time()
if ( ! is.null(rM) ){
rMsplit <- strsplit( paste(rM), split="-" )
RR <- length(rMsplit)
FF <- length(facet.list)
NRM <- max( unlist( lapply( rMsplit, FUN=function(ll){ length(ll) } ) ))
rMsplit0 <- rMsplit
rMsplit0 <- matrix( unlist( rMsplit0 ), ncol=NRM, byrow=TRUE )
# cat(" *** ren2a split " ) ; v1 <- Sys.time() ; print(v1-v0) ; v0 <- v1
if (FF>0){
for (ff in 1:FF){
itemren <- facet.list[[ff]]
for (nn in 1:NRM){
# nn <- 3
rm_nn <- rMsplit0[,nn]
ind1 <- match( rm_nn, itemren[,2] )
ind1 <- stats::na.omit(ind1)
h1 <- paste(itemren[ ind1, 1] )
if ( length(h1) > 0 ){
rMsplit0[,nn] <- h1
}
}
}
# cat(" *** loop facets " ) ; v1 <- Sys.time() ; print(v1-v0) ; v0 <- v1
cM0 <- apply( rMsplit0, 1, FUN=function(ll){ paste0( ll, collapse="-") } )
cM <- cM0
# cat(" *** apply " ) ; v1 <- Sys.time() ; print(v1-v0) ; v0 <- v1
}
}
return(cM)
}
#############################################################
.rename.items2aa <- function( vec, facet.list, I ){
### check for equalities!!!
cM <- vec
FF <- length(facet.list)
rM <- cM
if ( ! is.null(rM) ){
rMsplit <- strsplit( paste(rM), split="-" )
RR <- length(rMsplit)
FF <- length(facet.list)
NRM <- max( unlist( lapply( rMsplit, FUN=function(ll){ length(ll) } ) ))
rMsplit0 <- rMsplit
rMsplit0 <- matrix( unlist( rMsplit0 ), ncol=NRM, byrow=TRUE )
if (FF>0){
for (ff in 1:FF){
itemren <- facet.list[[ff]]
for (nn in 1:NRM){
# nn <- 3
rm_nn <- rMsplit0[,nn]
ind1 <- match( rm_nn, itemren[,2] )
ind1 <- stats::na.omit(ind1)
h1 <- paste(itemren[ ind1, 1] )
if ( length(h1) > 0 ){
rMsplit0[,nn] <- h1
}
}
}
cM0 <- apply( rMsplit0, 1, FUN=function(ll){ paste0( ll, collapse="-") } )
cM <- cM0
}
}
return(cM)
}
##################################################################
#############################################################
#############################################################
.rename.items3a <- function( matr, facet.list, I, cols=TRUE,
xsi.table ){
### check for equalities in rM and cM in all entries!!!!
rM <- rownames(matr)
rMsplit <- strsplit( paste(rM), split="-" )
RR <- length(rMsplit)
rMM <- matrix( unlist(rMsplit), nrow=RR, byrow=TRUE)
rMM.ncol <- ncol(rMM)
FF <- length(facet.list)
rMM1 <- rMM
NRM <- ncol(rMM1)
if (FF>0){
for (ff in 1:FF){
# ff <- 1 # facet ff
itemren <- facet.list[[ff]]
for (nn in 1:NRM){
ind1 <- match( rMM1[,nn], itemren[,2] )
ind1 <- stats::na.omit(ind1)
h1 <- paste(itemren[ ind1, 1] )
if ( length(h1) > 0 ){
rMM1[,nn] <- h1
}
}
}
}
rMM1 <- apply( rMM1, 1, FUN=function(ll){ paste0( ll, collapse="-") } )
rM <- rMM1
if ( cols){
rM <- colnames(matr)
rMsplit <- unlist( strsplit( paste(rM), split=":" ) )
xsi.table <- xsi.table[xsi.table$constraint==0,]
XT <- nrow(xsi.table)
F0 <- max(xsi.table$facet.order)
index <- sapply( 1:XT, FUN=function(xx){
m1 <- cbind( xx, 1:xsi.table[xx,"facet.order"] )
matrix( t(m1), ncol=1, byrow=FALSE)
} )
index <- matrix( unlist(index), ncol=2, byrow=T)
rMMsub <- matrix("", nrow=XT, ncol=F0)
rMMsub[ index ] <- rMsplit
FF <- length(facet.list)
if (FF>0){
for (ff in 1:FF){ # ff <- 1
itemren <- facet.list[[ff]]
I <- nrow(itemren)
for (ii in 1:I){ # ii <- 1
for (kk in 1:F0){# kk <- 3
rMMsub[ rMMsub[,kk]==itemren[ii,2], kk ] <- paste(itemren[ii,1])
}
}
}
} # end if FF>0
rM <- unlist( sapply( 1:XT, FUN=function(kk){
paste( rMMsub[ kk, seq(1, xsi.table$facet.order[kk] ) ], collapse=":" ) } )
)
colnames(matr) <- rM
}
return(matr)
}
################################################################################
################################################################################
.rename.items2b <- function( vec, facet.list, I, xsi.table, sel1=0 ){
### check for equalities!!!
# cM <- vec
rM <- vec
if ( ! is.null(rM)){
rMsplit <- unlist( strsplit( rM, split=":" ) )
if (sel1==1){ xsi.table <- xsi.table[xsi.table$constraint==0,] }
if (sel1==2){ xsi.table <- xsi.table[xsi.table$constraint==1,] }
XT <- nrow(xsi.table)
F0 <- max(xsi.table$facet.order)
index <- sapply( 1:XT, FUN=function(xx){
m1 <- cbind( xx, 1:xsi.table[xx,"facet.order"] )
matrix( t(m1), ncol=1, byrow=FALSE)
} )
index <- matrix( unlist(index), ncol=2, byrow=T)
rMMsub <- matrix("", nrow=XT, ncol=F0)
rMMsub[ index ] <- rMsplit
FF <- length(facet.list)
if (FF>0){
for (ff in 1:FF){ # ff <- 1
itemren <- facet.list[[ff]]
I <- nrow(itemren)
for (ii in 1:I){ # ii <- 1
for (kk in 1:F0){# kk <- 3
rMMsub[ rMMsub[,kk]==itemren[ii,2], kk ] <- paste(itemren[ii,1])
}
}
}
}
rM <- unlist( sapply( 1:XT, FUN=function(kk){
paste( rMMsub[ kk, seq(1, xsi.table$facet.order[kk] ) ], collapse=":" ) } )
)
}
return(rM)
}
| /R/designMatrices.mfr_aux.R | no_license | alexanderrobitzsch/TAM | R | false | false | 18,353 | r | ## File Name: designMatrices.mfr_aux.R
## File Version: 9.255
.generate.interactions <- function(X, facets, formulaA, mm )
{
d1 <- d0 <- X
h1 <- sapply( colnames(d1), FUN=function(vv){
length(grep( vv, paste(formulaA) )) } )
h1 <- colnames(d1)[ h1==0 ]
d0 <- d0[, ! ( colnames(d1) %in% h1 ), drop=FALSE]
M2 <- stats::model.matrix( #object=
formulaA, data=d1,
contrasts.arg=lapply( d0, contrasts, contrasts=FALSE) )
h2 <- colnames(M2)
h1 <- colnames(mm)
# extract facets
xsi.table <- data.frame( "parameter"=h2 )
xsi.split <- sapply( xsi.table$parameter, FUN=function(ll){
l1 <- as.vector( unlist( strsplit( paste(ll), split=":" ) ) )
v1 <- l1
for (ii in 1:length(l1) ){
for (cc in colnames(X) ){
kk <- grep( cc, l1[ii] )
if (length(kk)>0){ v1[ii] <- cc }
}
}
v1 <- paste0( v1, collapse=":" )
return(v1)
} )
xsi.table$facet <- unlist(xsi.split)
xsi.table$facet.order <- sapply( xsi.table$parameter, FUN=function(ll){
length( as.vector( unlist( strsplit( paste(ll), split=":" ) ) ) ) } )
xsi.table$constraint <- 1 - 1*(xsi.table$parameter %in% h1)
xsi.table$facet.index <- match( xsi.table$facet, unique( xsi.table$facet ) )
# xsi.table$orig.index <- seq(1,nrow(xsi.table))
# xsi.table[ order( paste( xsi.table$facet.index+100, xsi.table$parameter ) ), ]
facets.unique <- unique( xsi.table$facet )
b1 <- xsi.table[ xsi.table$constraint==1, "parameter" ]
c1 <- xsi.table[ xsi.table$constraint==0, "parameter" ]
xsi.constraints <- matrix( NA, nrow=length(b1), ncol=length(c1) )
rownames(xsi.constraints) <- paste(b1)
colnames(xsi.constraints) <- paste(c1)
# b1 <- b1[3]
############################
# loop over terms
for (bb in b1 ){
#bb <- b1[9]
v1 <- 0
mult <- 1
xsi.table.bb <- xsi.table[ xsi.table$parameter==bb, ]
x0 <- x1 <- xsi.table[ xsi.table$facet %in% xsi.table.bb$facet, ]
if ( xsi.table.bb$facet.order==1){
xsi.constraints[paste(bb),] <- 0
xsi.constraints[ paste(bb), paste( x1[ x1$constraint==0, "parameter" ] ) ] <- -1
}
if ( xsi.table.bb$facet=="item:step"){
v1 <- 1
xsi.constraints[paste(bb),] <- 0
s2 <- unlist( strsplit( paste(xsi.table.bb$parameter), split=":" ) )
s20 <- strsplit( paste(x1$parameter), split=":" )
g1 <- unlist( lapply( s20, FUN=function(ll){
ll[1]==s2[1] } ) )
x1 <- x1[ g1, ]
mult <- 1
# cat("......",bb,"......\n")
# print(x1)
varsc <- paste( x1[ x1$constraint==0, "parameter" ] )
if ( length(varsc)==0){
g1 <- unlist( lapply( s20, FUN=function(ll){
ll[2]==s2[2] } ) )
x1 <- x0[ g1, ]
varsc <- paste(x1[ x1$constraint==0, "parameter" ])
mult <- 1
if ( length(varsc)==0){
varsc <- x1[, "parameter" ]
varsc <- setdiff( varsc, paste(bb) )
h1 <- colSums( xsi.constraints[ varsc,, drop=FALSE] )
varsc <- names(h1)[ h1 !=0 ]
mult <- -1
}
}
xsi.constraints[ paste(bb), varsc ] <- -1*mult
}
##################
### order 2
if ( xsi.table.bb$facet.order==2 & v1==0 ){
xsi.constraints[paste(bb),] <- 0
s2 <- unlist( strsplit( paste(xsi.table.bb$parameter), split=":" ) )
s20 <- strsplit( paste(x1$parameter), split=":" )
g1 <- unlist( lapply( s20, FUN=function(ll){
ll[2]==s2[2] } ) )
x1 <- x1[ g1, ]
varsc <- x1[ x1$constraint==0, "parameter" ]
if ( length(varsc)==0){
g1 <- unlist( lapply( s20, FUN=function(ll){
ll[1]==s2[1] } ) )
x1 <- x0[ g1, ]
varsc <- x1[ x1$constraint==0, "parameter" ]
if ( length(varsc)==0){
varsc <- x1[, "parameter" ]
varsc <- setdiff( varsc, paste(bb) )
h1 <- colSums( xsi.constraints[ varsc,, drop=FALSE] )
varsc <- names(h1)[ h1 !=0 ]
mult <- -1
}
}
xsi.constraints[ paste(bb), paste( varsc ) ] <- -1 * mult
}
#########################
### order 3
if ( xsi.table.bb$facet.order==3 & v1==0 ){
mult <- 1
xsi.constraints[paste(bb),] <- 0
s2 <- unlist( strsplit( paste(xsi.table.bb$parameter), split=":" ) )
s20 <- strsplit( paste(x1$parameter), split=":" )
g1 <- unlist( lapply( s20, FUN=function(ll){
( ll[2]==s2[2] ) & (ll[3]==s2[3] ) } ) )
x1 <- x1[ g1, ]
varsc <- x1[ x1$constraint==0, "parameter" ]
if ( length(varsc)==0 ){
g1 <- unlist( lapply( s20, FUN=function(ll){
( ll[1]==s2[1] ) & (ll[3]==s2[3]) } ) )
x1 <- x0[ g1, ]
varsc <- x1[ x1$constraint==0, "parameter" ]
if ( length(varsc)==0 ){
varsc <- x1[, "parameter" ]
varsc <- setdiff( varsc, paste(bb) )
h1 <- colSums( xsi.constraints[ varsc,, drop=FALSE] )
varsc <- names(h1)[ h1 !=0 ]
varsc <- na.omit( varsc)
mult <- -1
if ( length(varsc)==0 ){
g1 <- unlist( lapply( s20, FUN=function(ll){
( ll[1]==s2[1] ) & (ll[2]==s2[2]) } ) )
x1 <- x0[ g1, ]
varsc <- x1[ x1$constraint==0, "parameter" ]
mult <- 1
if ( length(varsc)==0 ){
# varsc <- setdiff( varsc, paste(bb) )
varsc <- x1[, "parameter" ]
varsc <- setdiff( varsc, paste(bb) )
h1 <- colSums( xsi.constraints[ varsc,, drop=FALSE] )
varsc <- names(h1)[ h1 !=0 ]
varsc <- stats::na.omit( varsc)
mult <- -1
}
}
}
}
if ( length(varsc) > 0 ){
xsi.constraints[ paste(bb), paste( varsc ) ] <- -1 * mult
} else {
xsi.constraints[ paste(bb), ] <- NA
}
}
}
xsi.constraints[ rowSums( abs(xsi.constraints) )==0, ] <- NA
res <- list( "xsi.constraints"=xsi.constraints, "xsi.table"=xsi.table )
#print(res) ; stop("here")
return(res)
}
######################
# rename item names
.rename.items <- function( matr, itemren, cols=TRUE ){
rM <- rownames(matr)
cM <- colnames(matr)
I <- nrow(itemren)
vers <- FALSE
vers <- TRUE
#-----------------------
# rows
if (vers){
rM0 <- rM
itemren2 <- paste0(itemren[,2], "-")
nc2 <- nchar(itemren2)
N1 <- min(nc2)
N2 <- max(nc2)
for (nn in N1:N2){
i1 <- match( substring( rM0, 1, nn ), itemren2 )
h1 <- paste0( itemren[ i1,1], "-", substring( rM0, nn+1, nchar(rM0) ) )
i2 <- ! is.na(i1)
rM0[ i2 ] <- h1[ i2]
}
rM <- rM0
if ( cols){
cM0 <- cM
ind <- match( cM0, itemren[,2])
ind <- stats::na.omit(ind)
cM0[ ind ] <- paste(itemren[,1])
itemren2 <- paste0(itemren[,2], ":")
nc2 <- nchar(itemren2)
N1 <- min(nc2)
N2 <- max(nc2)
for (nn in N1:N2){
i1 <- match( substring( cM0, 1, nn ), itemren2 )
h1 <- paste0( itemren[ i1,1], ":", substring( cM0, nn+1, nchar(cM0) ) )
i2 <- ! is.na(i1)
cM0[ i2 ] <- h1[ i2]
}
cM <- cM0
}
}
if (!vers){
for ( ii in 1:I){
rM <- gsub( paste0( itemren[ii,2], "-"), paste0( itemren[ii,1], "-"), rM )
if (cols){
cM <- gsub( paste0( itemren[ii,2], ":"), paste0( itemren[ii,1], ":"), cM )
cM[ cM==itemren[ii,2] ] <- paste(itemren[ii,1])
}
}
}
rM -> rownames(matr)
if ( cols){ cM -> colnames(matr) }
return(matr)
}
#############################################################
.rename.items2 <- function( vec, itemren )
{
cM <- vec
I <- nrow(itemren)
vers <- TRUE
if (is.null(cM)){
vers <- FALSE
}
if (vers){
v0 <- Sys.time()
cM0 <- cM
# relabel items
ind <- match( cM, itemren[,2])
ind <- na.omit(ind)
cM0[ ind ] <- paste(itemren[,1])
itemren2 <- paste0(itemren[,2], ":")
nc2 <- nchar(itemren2)
N1 <- min(nc2)
N2 <- max(nc2)
for (nn in N1:N2){
i1 <- match( substring( cM0, 1, nn ), itemren2 )
h1 <- paste0( itemren[ i1,1], ":", substring( cM0, nn+1, nchar(cM0) ) )
i2 <- ! is.na(i1)
cM0[ i2 ] <- h1[ i2]
}
cM <- cM0
}
if (!vers){
for ( ii in 1:I){
cM <- gsub( paste0( itemren[ii,2], ":"), paste0( itemren[ii,1], ":"), cM )
cM[ cM==itemren[ii,2] ] <- paste(itemren[ii,1])
}
}
return(cM)
}
#############################################################
.rename.items3 <- function( matr, facet.list, I, cols=TRUE ){
### check for equalities in rM and cM in all entries!!!!
rM <- rownames(matr)
rMsplit <- strsplit( paste(rM), split="-" )
RR <- length(rMsplit)
FF <- length(facet.list)
for (rr in 1:RR){
rr1 <- rMsplit[[rr]]
if (FF > 0 ){
for (ff in 1:FF){ # begin ff
# for (ff in seq(1,FF,1) ){
itemren <- facet.list[[ff]]
I <- nrow(itemren)
for (ii in 1:I ){
rr1[ rr1==itemren[ii,2] ] <- paste(itemren[ii,1])
rMsplit[[rr]] <- rr1
}
} # end ff
} # end if FF
}
rM <- unlist( lapply( rMsplit, FUN=function(ll){ paste( ll, collapse="-") } ) )
rownames(matr) <- rM
#****************************************
if ( cols){
cM <- colnames(matr)
cMsplit <- strsplit( paste(cM), split=":" )
RR <- length(cMsplit)
FF <- length(facet.list)
for (rr in 1:RR){
rr1 <- cMsplit[[rr]]
if (FF>0){
for (ff in 1:FF){ # begin ff
itemren <- facet.list[[ff]]
I <- nrow(itemren)
for (ii in 1:I ){
rr1[ rr1==itemren[ii,2] ] <- paste(itemren[ii,1])
cMsplit[[rr]] <- rr1
}
} # end ff
}
}
cM <- unlist( lapply( cMsplit, FUN=function(ll){ paste( ll, collapse=":") } ) )
colnames(matr) <- cM
}
return(matr)
}
#############################################################
.rename.items2a <- function( vec, facet.list, I ){
### check for equalities!!!
cM <- vec
FF <- length(facet.list)
rM <- cM
v0 <- Sys.time()
if ( ! is.null(rM) ){
rMsplit <- strsplit( paste(rM), split="-" )
RR <- length(rMsplit)
FF <- length(facet.list)
NRM <- max( unlist( lapply( rMsplit, FUN=function(ll){ length(ll) } ) ))
rMsplit0 <- rMsplit
rMsplit0 <- matrix( unlist( rMsplit0 ), ncol=NRM, byrow=TRUE )
# cat(" *** ren2a split " ) ; v1 <- Sys.time() ; print(v1-v0) ; v0 <- v1
if (FF>0){
for (ff in 1:FF){
itemren <- facet.list[[ff]]
for (nn in 1:NRM){
# nn <- 3
rm_nn <- rMsplit0[,nn]
ind1 <- match( rm_nn, itemren[,2] )
ind1 <- stats::na.omit(ind1)
h1 <- paste(itemren[ ind1, 1] )
if ( length(h1) > 0 ){
rMsplit0[,nn] <- h1
}
}
}
# cat(" *** loop facets " ) ; v1 <- Sys.time() ; print(v1-v0) ; v0 <- v1
cM0 <- apply( rMsplit0, 1, FUN=function(ll){ paste0( ll, collapse="-") } )
cM <- cM0
# cat(" *** apply " ) ; v1 <- Sys.time() ; print(v1-v0) ; v0 <- v1
}
}
return(cM)
}
#############################################################
.rename.items2aa <- function( vec, facet.list, I ){
### check for equalities!!!
cM <- vec
FF <- length(facet.list)
rM <- cM
if ( ! is.null(rM) ){
rMsplit <- strsplit( paste(rM), split="-" )
RR <- length(rMsplit)
FF <- length(facet.list)
NRM <- max( unlist( lapply( rMsplit, FUN=function(ll){ length(ll) } ) ))
rMsplit0 <- rMsplit
rMsplit0 <- matrix( unlist( rMsplit0 ), ncol=NRM, byrow=TRUE )
if (FF>0){
for (ff in 1:FF){
itemren <- facet.list[[ff]]
for (nn in 1:NRM){
# nn <- 3
rm_nn <- rMsplit0[,nn]
ind1 <- match( rm_nn, itemren[,2] )
ind1 <- stats::na.omit(ind1)
h1 <- paste(itemren[ ind1, 1] )
if ( length(h1) > 0 ){
rMsplit0[,nn] <- h1
}
}
}
cM0 <- apply( rMsplit0, 1, FUN=function(ll){ paste0( ll, collapse="-") } )
cM <- cM0
}
}
return(cM)
}
##################################################################
#############################################################
#############################################################
.rename.items3a <- function( matr, facet.list, I, cols=TRUE,
xsi.table ){
### check for equalities in rM and cM in all entries!!!!
rM <- rownames(matr)
rMsplit <- strsplit( paste(rM), split="-" )
RR <- length(rMsplit)
rMM <- matrix( unlist(rMsplit), nrow=RR, byrow=TRUE)
rMM.ncol <- ncol(rMM)
FF <- length(facet.list)
rMM1 <- rMM
NRM <- ncol(rMM1)
if (FF>0){
for (ff in 1:FF){
# ff <- 1 # facet ff
itemren <- facet.list[[ff]]
for (nn in 1:NRM){
ind1 <- match( rMM1[,nn], itemren[,2] )
ind1 <- stats::na.omit(ind1)
h1 <- paste(itemren[ ind1, 1] )
if ( length(h1) > 0 ){
rMM1[,nn] <- h1
}
}
}
}
rMM1 <- apply( rMM1, 1, FUN=function(ll){ paste0( ll, collapse="-") } )
rM <- rMM1
if ( cols){
rM <- colnames(matr)
rMsplit <- unlist( strsplit( paste(rM), split=":" ) )
xsi.table <- xsi.table[xsi.table$constraint==0,]
XT <- nrow(xsi.table)
F0 <- max(xsi.table$facet.order)
index <- sapply( 1:XT, FUN=function(xx){
m1 <- cbind( xx, 1:xsi.table[xx,"facet.order"] )
matrix( t(m1), ncol=1, byrow=FALSE)
} )
index <- matrix( unlist(index), ncol=2, byrow=T)
rMMsub <- matrix("", nrow=XT, ncol=F0)
rMMsub[ index ] <- rMsplit
FF <- length(facet.list)
if (FF>0){
for (ff in 1:FF){ # ff <- 1
itemren <- facet.list[[ff]]
I <- nrow(itemren)
for (ii in 1:I){ # ii <- 1
for (kk in 1:F0){# kk <- 3
rMMsub[ rMMsub[,kk]==itemren[ii,2], kk ] <- paste(itemren[ii,1])
}
}
}
} # end if FF>0
rM <- unlist( sapply( 1:XT, FUN=function(kk){
paste( rMMsub[ kk, seq(1, xsi.table$facet.order[kk] ) ], collapse=":" ) } )
)
colnames(matr) <- rM
}
return(matr)
}
################################################################################
################################################################################
.rename.items2b <- function( vec, facet.list, I, xsi.table, sel1=0 ){
### check for equalities!!!
# cM <- vec
rM <- vec
if ( ! is.null(rM)){
rMsplit <- unlist( strsplit( rM, split=":" ) )
if (sel1==1){ xsi.table <- xsi.table[xsi.table$constraint==0,] }
if (sel1==2){ xsi.table <- xsi.table[xsi.table$constraint==1,] }
XT <- nrow(xsi.table)
F0 <- max(xsi.table$facet.order)
index <- sapply( 1:XT, FUN=function(xx){
m1 <- cbind( xx, 1:xsi.table[xx,"facet.order"] )
matrix( t(m1), ncol=1, byrow=FALSE)
} )
index <- matrix( unlist(index), ncol=2, byrow=T)
rMMsub <- matrix("", nrow=XT, ncol=F0)
rMMsub[ index ] <- rMsplit
FF <- length(facet.list)
if (FF>0){
for (ff in 1:FF){ # ff <- 1
itemren <- facet.list[[ff]]
I <- nrow(itemren)
for (ii in 1:I){ # ii <- 1
for (kk in 1:F0){# kk <- 3
rMMsub[ rMMsub[,kk]==itemren[ii,2], kk ] <- paste(itemren[ii,1])
}
}
}
}
rM <- unlist( sapply( 1:XT, FUN=function(kk){
paste( rMMsub[ kk, seq(1, xsi.table$facet.order[kk] ) ], collapse=":" ) } )
)
}
return(rM)
}
|
## These two functions 'makeCacheMatrix' and 'cacheSolve' work together
## to invert a matrix and to store the result of that inversion in cache.
## An example of how to implement the codes at the end of this code (as a comment)
## These functions make the assumption that the matrix is invertible.
## If the matrix is not square, or is not invertible (singular),
## error messages will be printed to the screen (from the 'solve' function).
## The function 'makeCacheMatrix' creates a special "matrix" object
## that can cache its inverse.
## set and setinverse work on the first run through of the function
## to store the inverse in cache.
## get and getinverse work to retrieve the inverse from cache.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function first checks to see if the results of the
## matrix inversion are already held in cache.
## If the results are not already within the cache it then
## uses the 'solve' function to invert the matrix
## and stores this result so that it can be called when needed.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
## note that the functions need to be run in the following manner;
## Example (not run) to invert a matrix X:
## p<-makeCacheMatrix(X)
## Xinv<-cacheSolve(p)
| /cachematrix.R | no_license | dtoher/NewProg2 | R | false | false | 1,793 | r | ## These two functions 'makeCacheMatrix' and 'cacheSolve' work together
## to invert a matrix and to store the result of that inversion in cache.
## An example of how to implement the codes at the end of this code (as a comment)
## These functions make the assumption that the matrix is invertible.
## If the matrix is not square, or is not invertible (singular),
## error messages will be printed to the screen (from the 'solve' function).
## The function 'makeCacheMatrix' creates a special "matrix" object
## that can cache its inverse.
## set and setinverse work on the first run through of the function
## to store the inverse in cache.
## get and getinverse work to retrieve the inverse from cache.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function first checks to see if the results of the
## matrix inversion are already held in cache.
## If the results are not already within the cache it then
## uses the 'solve' function to invert the matrix
## and stores this result so that it can be called when needed.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
## note that the functions need to be run in the following manner;
## Example (not run) to invert a matrix X:
## p<-makeCacheMatrix(X)
## Xinv<-cacheSolve(p)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function() inv <<- solve(x) #calculate the inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
| /cachematrix.R | no_license | GOKL69/ProjectAssignment2 | R | false | false | 984 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function() inv <<- solve(x) #calculate the inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
|
test_that("cdm_copy_to() works as it is intended?", {
# test copying `dm` w/o keys
map(
dbplyr:::test_srcs$get(),
~ expect_error(
cdm_copy_to(., cdm_test_obj),
NA
)
)
# test copying `dm` with keys but no setting of key constraints
map(
dbplyr:::test_srcs$get(),
~ expect_error(
cdm_copy_to(., dm_for_filter, set_key_constraints = FALSE),
NA
)
)
# test copying `dm` with keys including setting of key constraints
map(
dbplyr:::test_srcs$get(),
~ expect_error(
cdm_copy_to(., dm_for_filter, unique_table_names = TRUE),
NA
)
)
map(
dbplyr:::test_srcs$get(),
~ expect_error(
cdm_copy_to(., dm_for_filter, overwrite = TRUE),
class = cdm_error("no_overwrite"),
error_txt_no_overwrite(),
fixed = TRUE
)
)
})
| /tests/testthat/test-copy-to.R | permissive | TSchiefer/dm | R | false | false | 834 | r | test_that("cdm_copy_to() works as it is intended?", {
# test copying `dm` w/o keys
map(
dbplyr:::test_srcs$get(),
~ expect_error(
cdm_copy_to(., cdm_test_obj),
NA
)
)
# test copying `dm` with keys but no setting of key constraints
map(
dbplyr:::test_srcs$get(),
~ expect_error(
cdm_copy_to(., dm_for_filter, set_key_constraints = FALSE),
NA
)
)
# test copying `dm` with keys including setting of key constraints
map(
dbplyr:::test_srcs$get(),
~ expect_error(
cdm_copy_to(., dm_for_filter, unique_table_names = TRUE),
NA
)
)
map(
dbplyr:::test_srcs$get(),
~ expect_error(
cdm_copy_to(., dm_for_filter, overwrite = TRUE),
class = cdm_error("no_overwrite"),
error_txt_no_overwrite(),
fixed = TRUE
)
)
})
|
ROC_curve <- function(data_set, theta, lambdamin = 0, lambdamax = 1, lambdacount = 200){
numOfDims <- ncol(data_set)
max_lambda = lambdamax
min_lambda = lambdamin
points <- data.frame(matrix(ncol = 3, nrow = lambdacount))
lambda_seq <- seq(from = min_lambda, to = max_lambda, length.out= lambdacount)
#print(lambda_seq)
points[,3] <- lambda_seq
trueedge <- true_edge(theta)
for (i in seq(length(lambda_seq))){
print(c('Calculating points:',i))
glassotry <- glasso(s, rho = lambda_seq[i])
estimate_edge <- true_edge(glassotry$wi)
confusion <- confusion_matrix(estimate_edge, trueedge)
#print(confusion)
TPR <- confusion$TP_rate
FPR <- confusion$FP_rate
#print(TPR)
#print(FPR)
points[i,1] <- TPR
points[i,2] <- FPR
}
colnames(points) <- c("TPR","FPR","Lambda")
return(points)
}
| /2.2-Graphical-lasso/roc_glasso.R | no_license | lxw101019/ST443-Group-project | R | false | false | 859 | r | ROC_curve <- function(data_set, theta, lambdamin = 0, lambdamax = 1, lambdacount = 200){
numOfDims <- ncol(data_set)
max_lambda = lambdamax
min_lambda = lambdamin
points <- data.frame(matrix(ncol = 3, nrow = lambdacount))
lambda_seq <- seq(from = min_lambda, to = max_lambda, length.out= lambdacount)
#print(lambda_seq)
points[,3] <- lambda_seq
trueedge <- true_edge(theta)
for (i in seq(length(lambda_seq))){
print(c('Calculating points:',i))
glassotry <- glasso(s, rho = lambda_seq[i])
estimate_edge <- true_edge(glassotry$wi)
confusion <- confusion_matrix(estimate_edge, trueedge)
#print(confusion)
TPR <- confusion$TP_rate
FPR <- confusion$FP_rate
#print(TPR)
#print(FPR)
points[i,1] <- TPR
points[i,2] <- FPR
}
colnames(points) <- c("TPR","FPR","Lambda")
return(points)
}
|
# Exercise 8: Pulitzer Prizes
# Read in the data
pulitzer <- read.csv("data/pulitzer-circulation-data.csv", stringsAsFactors = FALSE)
# Install and load the needed libraries
# Be sure to comment out the install.packages function so it won't install it every time it runs
# Remeber you only need to install a package once
#install.packages(dplyr)
library(dplyr)
# View in the data set. Start to understand what the data columns contains
# Be sure to comment out the function so it won't view everytime you run the code.
# Use 'colnames' to print out the names of the columns
# Use 'str' to print what types of values are contained in each column
# Did any value type surprise you? Why do you think they are that type?
# Add a column in a dataframe called 'Pulitzer.Prize.Change` that contains the diffrence in changes
# in Pulitzer Prize Winners from 2004 to 2013 and Pultizer Prize Winners from 1990 to 2003.
# What publication gained the most pulitzer prizes from 2004-2014?
# Be sure to use the pipe operator!
# Which publication with at least 5 Pulitzers won from 2004-2014 had the biggest decrease(negative) in Daily circulation numbers?
# This publication should have Pulitzer prizes won a minimum of 5 Pulitzers, as well as the biggest decrease in circulation
# Your turn! An important part about being a data scientist is asking questions.
# Create a question and use dplyr to figure out the answer.
| /ch10-dplyr/exercise-8/exercise.R | permissive | yprisma/INFO-201 | R | false | false | 1,463 | r | # Exercise 8: Pulitzer Prizes
# Read in the data
pulitzer <- read.csv("data/pulitzer-circulation-data.csv", stringsAsFactors = FALSE)
# Install and load the needed libraries
# Be sure to comment out the install.packages function so it won't install it every time it runs
# Remeber you only need to install a package once
#install.packages(dplyr)
library(dplyr)
# View in the data set. Start to understand what the data columns contains
# Be sure to comment out the function so it won't view everytime you run the code.
# Use 'colnames' to print out the names of the columns
# Use 'str' to print what types of values are contained in each column
# Did any value type surprise you? Why do you think they are that type?
# Add a column in a dataframe called 'Pulitzer.Prize.Change` that contains the diffrence in changes
# in Pulitzer Prize Winners from 2004 to 2013 and Pultizer Prize Winners from 1990 to 2003.
# What publication gained the most pulitzer prizes from 2004-2014?
# Be sure to use the pipe operator!
# Which publication with at least 5 Pulitzers won from 2004-2014 had the biggest decrease(negative) in Daily circulation numbers?
# This publication should have Pulitzer prizes won a minimum of 5 Pulitzers, as well as the biggest decrease in circulation
# Your turn! An important part about being a data scientist is asking questions.
# Create a question and use dplyr to figure out the answer.
|
clean.text = function(txtclean)
{
# remove retweets
txtclean = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", " ", txtclean)
# remove @quotes
txtclean = gsub("@\\w+", " ", txtclean)
# remove hashtags
txtclean = gsub("#\\S+", " ", txtclean)
# remove links
txtclean = gsub("htt\\S+", " ", txtclean)
# remove \n
txtclean = gsub("\\n", " ", txtclean,fixed = TRUE)
# remove \r
txtclean = gsub("\\r", " ", txtclean,fixed = TRUE)
# remove \t
txtclean = gsub("\\t", " ", txtclean,fixed = TRUE)
txtclean = gsub("[^[:alnum:][:space:]']", " ", txtclean)
# remove numbers
txtclean = gsub("[[:digit:]]", " ", txtclean)
# remove blank spaces at the beginning
txtclean = gsub("^ ", "", txtclean)
# remove blank spaces at the end
txtclean = gsub(" $", "", txtclean)
return(txtclean)
} | /R/clean.text.R | no_license | bertuccio/SNA-Elections | R | false | false | 807 | r | clean.text = function(txtclean)
{
# remove retweets
txtclean = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", " ", txtclean)
# remove @quotes
txtclean = gsub("@\\w+", " ", txtclean)
# remove hashtags
txtclean = gsub("#\\S+", " ", txtclean)
# remove links
txtclean = gsub("htt\\S+", " ", txtclean)
# remove \n
txtclean = gsub("\\n", " ", txtclean,fixed = TRUE)
# remove \r
txtclean = gsub("\\r", " ", txtclean,fixed = TRUE)
# remove \t
txtclean = gsub("\\t", " ", txtclean,fixed = TRUE)
txtclean = gsub("[^[:alnum:][:space:]']", " ", txtclean)
# remove numbers
txtclean = gsub("[[:digit:]]", " ", txtclean)
# remove blank spaces at the beginning
txtclean = gsub("^ ", "", txtclean)
# remove blank spaces at the end
txtclean = gsub(" $", "", txtclean)
return(txtclean)
} |
\name{nSurvival}
\alias{nSurvival}
\alias{nEvents}
\alias{zn2hr}
\alias{hrn2z}
\alias{hrz2n}
\alias{print.nSurvival}
\alias{Survival sample size}
\title{3.4: Time-to-event sample size calculation (Lachin-Foulkes)}
\description{\code{nSurvival()} is used to calculate the sample size for a clinical trial with a time-to-event endpoint. The Lachin and Foulkes (1986) method is used.
\code{nEvents} uses the Schoenfeld (1981) approximation to provide sample size and power in terms of the underlying hazard ratio and the number of events observed in a survival analysis.
The functions \code{hrz2n()}, \code{hrn2z()} and \code{zn2hr()} also use the Schoenfeld approximation to provide simple translations between hazard ratios, z-values and the number of events in an analysis; input variables can be given as vectors.
}
\usage{
nSurvival(lambda1=1/12, lambda2=1/24, Ts=24, Tr=12, eta = 0, ratio = 1,
alpha = 0.025, beta = 0.10, sided = 1, approx = FALSE,
type = c("rr", "rd"), entry = c("unif", "expo"), gamma = NA)
\method{print}{nSurvival}(x,...)
nEvents(hr = .6, alpha = .025, beta = .1, ratio = 1, sided = 1,
hr0 = 1, n = 0, tbl = FALSE)
hrn2z(hr, n, ratio=1, hr0=1, hr1=.7)
hrz2n(hr, z, ratio=1, hr0=1)
zn2hr(z, n, ratio=1, hr0=1, hr1=.7)
}
\arguments{
\item{lambda1, lambda2}{event hazard rate for placebo and treatment
group respectively.}
\item{eta}{equal dropout hazard rate for both groups.}
\item{ratio}{randomization ratio between placebo and treatment
group. Default is balanced design, i.e., randomization ratio is 1.}
\item{Ts}{maximum study duration.}
\item{Tr}{accrual (recruitment) duration.}
\item{alpha}{type I error rate. Default is 0.025 since 1-sided testing is default.}
\item{beta}{type II error rate. Default is 0.10 (90\% power). Not needed for \code{nEvents()} if n is provided.}
\item{sided}{one or two-sided test? Default is one-sided test.}
\item{approx}{logical. If \code{TRUE}, the approximation sample size
formula for risk difference is used.}
\item{type}{type of sample size calculation: risk ratio (\dQuote{rr}) or risk
difference (\dQuote{rd}).}
\item{entry}{patient entry type: uniform entry (\code{"unif"}) or exponential
entry (\code{"expo"}).}
\item{gamma}{rate parameter for exponential entry. \code{NA} if entry type is
\code{"unif"} (uniform). A non-zero value is supplied if entry type is
\code{"expo"} (exponential).}
\item{x}{An object of class "nSurvival" returned by \code{nSurvival()}
(optional: used for output; "months" or "years" would be the 'usual' choices).}
\item{hr}{Hazard ratio. For \code{nEvents}, this is the hazard ratio under the alternative hypothesis (>0).}
\item{hr0}{Hazard ratio under the null hypothesis (>0, for \code{nEvents}, \code{!= hr}).}
\item{hr1}{Hazard ratio under the alternate hypothesis for \code{hrn2z, zn2hr} (>0, \code{!= hr0})}
\item{n}{Number of events. For \code{nEvents} may be input to compute power rather than sample size.}
\item{tbl}{Indicator of whether or not scalar (vector) or tabular output is desired for \code{nEvents()}.}
\item{z}{A z-statistic.}
\item{...}{Allows additional arguments for \code{print.nSurvival()}.}
}
\details{
\code{nSurvival()} produces an object of class "nSurvival" with
the number of subjects and events for a set of
pre-specified trial parameters, such as accrual duration and follow-up
period. The calculation is based on Lachin and Foulkes (1986) method and can
be used for risk ratio or risk difference. The function also consider
non-uniform (exponential) entry as well as uniform entry.
If the logical \code{approx} is \code{TRUE}, the variance under alternative
hypothesis is used to replace the variance under null hypothesis.
For non-uniform entry, a non-zero value of \code{gamma} for exponential entry
must be supplied. For positive \code{gamma}, the entry distribution is
convex, whereas for negative \code{gamma}, the entry distribution is concave.
\code{nEvents()} uses the Schoenfeld (1981) method to approximate the number of events \code{n} (given \code{beta}) or the power (given \code{n}). Arguments may be vectors or scalars, but any vectors must have the same length.
The functions \code{hrz2n}, \code{hrn2z} and \code{zn2hr} also all apply the Schoenfeld approximation for proportional hazards modelling.
This approximation is based on the asymptotic normal distribtuion of the logrank statistic as well as related statistics are asymptotically normal.
Let \eqn{\lambda} denote the underlying hazard ratio (\code{lambda1/lambda2} in terms of the arguments to \code{nSurvival}). Further, let \eqn{n} denote the number of events observed when computing the statistic of interest and \eqn{r} the ratio of the sample size in an experimental group relative to a control. The estimated natural logarithm of the hazard ratio from a proportional hazards ratio is approximately normal with a mean of \eqn{log{\lambda}} and variance \eqn{(1+r)^2/nr}.
Let \eqn{z} denote a logrank statistic (or a Wald statistic or score statistic from a proportional hazards regression model).
The same asymptotic theory implies \eqn{z} is asymptotically equivalent to a normalized estimate of the hazard ratio \eqn{\lambda} and thus \eqn{z} is asymptotically normal with variance 1 and mean \deqn{\frac{log{\lambda}r}{(1+r)^2}.}
Plugging the estimated hazard ratio into the above equation allows approximating any one of the following based on the other two: the estimate hazard ratio, the number of events and the z-statistic.
That is,
\deqn{\hat{\lambda}= \exp(z(1+r)/\sqrt{rn})}
\deqn{z=log(\hat{\lambda})\sqrt{nr}/(1+r)}
\deqn{n= (z(1+r)/log(\hat{\lambda}))^2/r.}
\code{hrz2n()} translates an observed interim hazard ratio and interim z-value into the number of events required for the Z-value and hazard ratio to correspond to each other. \code{hrn2z()} translates a hazard ratio and number of events into an approximate corresponding Z-value. \code{zn2hr()} translates a Z-value and number of events into an approximate corresponding hazard ratio. Each of these functions has a default assumption of an underlying hazard ratio of 1 which can be changed using the argument \code{hr0}. \code{hrn2z()} and \code{zn2hr()} also have an argument \code{hr1} which is only used to compute the sign of the computed Z-value in the case of \code{hrn2z()} and whether or not a z-value > 0 corresponds to a hazard ratio > or < the null hazard ratio \code{hr0}.
}
\value{
\code{nSurvival} produces a list with the following component returned:
\item{type}{As input.}
\item{entry}{As input.}
\item{n}{Sample size required (computed).}
\item{nEvents}{Number of events required (computed).}
\item{lambda1}{As input.}
\item{lambda2}{As input.}
\item{eta}{As input.}
\item{ratio}{As input.}
\item{gamma}{As input.}
\item{alpha}{As input.}
\item{beta}{As input.}
\item{sided}{As input.}
\item{Ts}{As input.}
\item{Tr}{As input.}
\code{nEvents} produces a scalar or vector of sample sizes (or powers) when \code{tbl=FALSE} or, when \code{tbl=TRUE} a data frame of values with the following columns:
\item{hr}{As input.}
\item{n}{If \code{n[1]=0} on input (default), output contains the number of events need to obtain the input Type I and II error. If \code{n[1]>0} on input, the input value is returned.}
\item{alpha}{As input.}
\item{beta}{If \code{n[1]=0} on input (default), \code{beta} is output as input. Otherwise, this is the computed Type II error based on the input \code{n}.}
\item{Power}{One minus the output \code{beta}. When \code{tbl=FALSE, n[1]>0}, this is the value or vector of values returned.}
\item{delta}{Standardized effect size represented by input difference between null and alternative hypothesis hazard ratios.}
\item{ratio}{Ratio of experimental to control sample size where 'experimental' is the same as the group with hazard represented in the numerator of the hazard ratio.}
\item{se}{Estimated standard error for the observed log(hazard ratio) with the given sample size.}
\code{hrz2n} outputs a number of events required to approximately have the input hazard ratio, z-statistic and sample size correspond.
\code{hrn2z} outputs an approximate z-statistic corresponding to an input hazard ratio and number of events.
\code{zn2hr} outputs an approximate hazard ratio corresponding to an input z-statistic and number of events.
}
\seealso{\link{gsDesign package overview}, \link{Plots for group sequential designs}, \link{gsDesign}, \link{gsHR}}
\author{Shanhong Guan \email{shanhong.guan@gmail.com}, Keaven Anderson \email{keaven_anderson@merck.com}}
\references{
Lachin JM and Foulkes MA (1986),
Evaluation of Sample Size and Power for Analyses of Survival
with Allowance for Nonuniform Patient Entry, Losses to Follow-Up,
Noncompliance, and Stratification. \emph{Biometrics}, 42, 507-519.
Schoenfeld D (1981),
The Asymptotic Properties of Nonparametric Tests for Comparing Survival Distributions. \emph{Biometrika},
68, 316-319.
}
\examples{
# consider a trial with
# 2 year maximum follow-up
# 6 month uniform enrollment
# Treatment/placebo hazards = 0.1/0.2 per 1 person-year
# drop out hazard 0.1 per 1 person-year
# alpha = 0.025 (1-sided)
# power = 0.9 (default beta=.1)
ss <- nSurvival(lambda1=.2 , lambda2=.1, eta = .1, Ts = 2, Tr = .5,
sided=1, alpha=.025)
# group sequential translation with default bounds
# note that delta1 is log hazard ratio; used later in gsBoundSummary summary
x<-gsDesign(k = 5, test.type = 2, n.fix=ss$nEvents, nFixSurv=ss$n,
delta1=log(ss$lambda2/ss$lambda1))
# boundary plot
plot(x)
# effect size plot
plot(x, plottype = "hr")
# total sample size
x$nSurv
# number of events at analyses
x$n.I
# print the design
x
# overall design summary
cat(summary(x))
# tabular summary of bounds
gsBoundSummary(x,deltaname="HR",Nname="Events",logdelta=TRUE)
# approximate number of events required using Schoenfeld's method
# for 2 different hazard ratios
nEvents(hr=c(.5, .6), tbl=TRUE)
# vector output
nEvents(hr=c(.5, .6))
# approximate power using Schoenfeld's method
# given 2 sample sizes and hr=.6
nEvents(hr=.6, n=c(50, 100), tbl=TRUE)
# vector output
nEvents(hr=.6, n=c(50, 100))
# approximate hazard ratio corresponding to 100 events and z-statistic of 2
zn2hr(n=100,z=2)
# same when hr0 is 1.1
zn2hr(n=100,z=2,hr0=1.1)
# same when hr0 is .9 and hr1 is greater than hr0
zn2hr(n=100,z=2,hr0=.9,hr1=1)
# approximate number of events corresponding to z-statistic of 2 and
# estimated hazard ratio of .5 (or 2)
hrz2n(hr=.5,z=2)
hrz2n(hr=2,z=2)
# approximate z statistic corresponding to 75 events
# and estimated hazard ratio of .6 (or 1/.6)
# assuming 2-to-1 randomization of experimental to control
hrn2z(hr=.6,n=75,ratio=2)
hrn2z(hr=1/.6,n=75,ratio=2)
}
\keyword{design}
| /man/nSurvival.Rd | no_license | AEBilgrau/gsDesign | R | false | false | 10,883 | rd | \name{nSurvival}
\alias{nSurvival}
\alias{nEvents}
\alias{zn2hr}
\alias{hrn2z}
\alias{hrz2n}
\alias{print.nSurvival}
\alias{Survival sample size}
\title{3.4: Time-to-event sample size calculation (Lachin-Foulkes)}
\description{\code{nSurvival()} is used to calculate the sample size for a clinical trial with a time-to-event endpoint. The Lachin and Foulkes (1986) method is used.
\code{nEvents} uses the Schoenfeld (1981) approximation to provide sample size and power in terms of the underlying hazard ratio and the number of events observed in a survival analysis.
The functions \code{hrz2n()}, \code{hrn2z()} and \code{zn2hr()} also use the Schoenfeld approximation to provide simple translations between hazard ratios, z-values and the number of events in an analysis; input variables can be given as vectors.
}
\usage{
nSurvival(lambda1=1/12, lambda2=1/24, Ts=24, Tr=12, eta = 0, ratio = 1,
alpha = 0.025, beta = 0.10, sided = 1, approx = FALSE,
type = c("rr", "rd"), entry = c("unif", "expo"), gamma = NA)
\method{print}{nSurvival}(x,...)
nEvents(hr = .6, alpha = .025, beta = .1, ratio = 1, sided = 1,
hr0 = 1, n = 0, tbl = FALSE)
hrn2z(hr, n, ratio=1, hr0=1, hr1=.7)
hrz2n(hr, z, ratio=1, hr0=1)
zn2hr(z, n, ratio=1, hr0=1, hr1=.7)
}
\arguments{
\item{lambda1, lambda2}{event hazard rate for placebo and treatment
group respectively.}
\item{eta}{equal dropout hazard rate for both groups.}
\item{ratio}{randomization ratio between placebo and treatment
group. Default is balanced design, i.e., randomization ratio is 1.}
\item{Ts}{maximum study duration.}
\item{Tr}{accrual (recruitment) duration.}
\item{alpha}{type I error rate. Default is 0.025 since 1-sided testing is default.}
\item{beta}{type II error rate. Default is 0.10 (90\% power). Not needed for \code{nEvents()} if n is provided.}
\item{sided}{one or two-sided test? Default is one-sided test.}
\item{approx}{logical. If \code{TRUE}, the approximation sample size
formula for risk difference is used.}
\item{type}{type of sample size calculation: risk ratio (\dQuote{rr}) or risk
difference (\dQuote{rd}).}
\item{entry}{patient entry type: uniform entry (\code{"unif"}) or exponential
entry (\code{"expo"}).}
\item{gamma}{rate parameter for exponential entry. \code{NA} if entry type is
\code{"unif"} (uniform). A non-zero value is supplied if entry type is
\code{"expo"} (exponential).}
\item{x}{An object of class "nSurvival" returned by \code{nSurvival()}
(optional: used for output; "months" or "years" would be the 'usual' choices).}
\item{hr}{Hazard ratio. For \code{nEvents}, this is the hazard ratio under the alternative hypothesis (>0).}
\item{hr0}{Hazard ratio under the null hypothesis (>0, for \code{nEvents}, \code{!= hr}).}
\item{hr1}{Hazard ratio under the alternate hypothesis for \code{hrn2z, zn2hr} (>0, \code{!= hr0})}
\item{n}{Number of events. For \code{nEvents} may be input to compute power rather than sample size.}
\item{tbl}{Indicator of whether or not scalar (vector) or tabular output is desired for \code{nEvents()}.}
\item{z}{A z-statistic.}
\item{...}{Allows additional arguments for \code{print.nSurvival()}.}
}
\details{
\code{nSurvival()} produces an object of class "nSurvival" with
the number of subjects and events for a set of
pre-specified trial parameters, such as accrual duration and follow-up
period. The calculation is based on Lachin and Foulkes (1986) method and can
be used for risk ratio or risk difference. The function also consider
non-uniform (exponential) entry as well as uniform entry.
If the logical \code{approx} is \code{TRUE}, the variance under alternative
hypothesis is used to replace the variance under null hypothesis.
For non-uniform entry, a non-zero value of \code{gamma} for exponential entry
must be supplied. For positive \code{gamma}, the entry distribution is
convex, whereas for negative \code{gamma}, the entry distribution is concave.
\code{nEvents()} uses the Schoenfeld (1981) method to approximate the number of events \code{n} (given \code{beta}) or the power (given \code{n}). Arguments may be vectors or scalars, but any vectors must have the same length.
The functions \code{hrz2n}, \code{hrn2z} and \code{zn2hr} also all apply the Schoenfeld approximation for proportional hazards modelling.
This approximation is based on the asymptotic normal distribtuion of the logrank statistic as well as related statistics are asymptotically normal.
Let \eqn{\lambda} denote the underlying hazard ratio (\code{lambda1/lambda2} in terms of the arguments to \code{nSurvival}). Further, let \eqn{n} denote the number of events observed when computing the statistic of interest and \eqn{r} the ratio of the sample size in an experimental group relative to a control. The estimated natural logarithm of the hazard ratio from a proportional hazards ratio is approximately normal with a mean of \eqn{log{\lambda}} and variance \eqn{(1+r)^2/nr}.
Let \eqn{z} denote a logrank statistic (or a Wald statistic or score statistic from a proportional hazards regression model).
The same asymptotic theory implies \eqn{z} is asymptotically equivalent to a normalized estimate of the hazard ratio \eqn{\lambda} and thus \eqn{z} is asymptotically normal with variance 1 and mean \deqn{\frac{log{\lambda}r}{(1+r)^2}.}
Plugging the estimated hazard ratio into the above equation allows approximating any one of the following based on the other two: the estimate hazard ratio, the number of events and the z-statistic.
That is,
\deqn{\hat{\lambda}= \exp(z(1+r)/\sqrt{rn})}
\deqn{z=log(\hat{\lambda})\sqrt{nr}/(1+r)}
\deqn{n= (z(1+r)/log(\hat{\lambda}))^2/r.}
\code{hrz2n()} translates an observed interim hazard ratio and interim z-value into the number of events required for the Z-value and hazard ratio to correspond to each other. \code{hrn2z()} translates a hazard ratio and number of events into an approximate corresponding Z-value. \code{zn2hr()} translates a Z-value and number of events into an approximate corresponding hazard ratio. Each of these functions has a default assumption of an underlying hazard ratio of 1 which can be changed using the argument \code{hr0}. \code{hrn2z()} and \code{zn2hr()} also have an argument \code{hr1} which is only used to compute the sign of the computed Z-value in the case of \code{hrn2z()} and whether or not a z-value > 0 corresponds to a hazard ratio > or < the null hazard ratio \code{hr0}.
}
\value{
\code{nSurvival} produces a list with the following component returned:
\item{type}{As input.}
\item{entry}{As input.}
\item{n}{Sample size required (computed).}
\item{nEvents}{Number of events required (computed).}
\item{lambda1}{As input.}
\item{lambda2}{As input.}
\item{eta}{As input.}
\item{ratio}{As input.}
\item{gamma}{As input.}
\item{alpha}{As input.}
\item{beta}{As input.}
\item{sided}{As input.}
\item{Ts}{As input.}
\item{Tr}{As input.}
\code{nEvents} produces a scalar or vector of sample sizes (or powers) when \code{tbl=FALSE} or, when \code{tbl=TRUE} a data frame of values with the following columns:
\item{hr}{As input.}
\item{n}{If \code{n[1]=0} on input (default), output contains the number of events need to obtain the input Type I and II error. If \code{n[1]>0} on input, the input value is returned.}
\item{alpha}{As input.}
\item{beta}{If \code{n[1]=0} on input (default), \code{beta} is output as input. Otherwise, this is the computed Type II error based on the input \code{n}.}
\item{Power}{One minus the output \code{beta}. When \code{tbl=FALSE, n[1]>0}, this is the value or vector of values returned.}
\item{delta}{Standardized effect size represented by input difference between null and alternative hypothesis hazard ratios.}
\item{ratio}{Ratio of experimental to control sample size where 'experimental' is the same as the group with hazard represented in the numerator of the hazard ratio.}
\item{se}{Estimated standard error for the observed log(hazard ratio) with the given sample size.}
\code{hrz2n} outputs a number of events required to approximately have the input hazard ratio, z-statistic and sample size correspond.
\code{hrn2z} outputs an approximate z-statistic corresponding to an input hazard ratio and number of events.
\code{zn2hr} outputs an approximate hazard ratio corresponding to an input z-statistic and number of events.
}
\seealso{\link{gsDesign package overview}, \link{Plots for group sequential designs}, \link{gsDesign}, \link{gsHR}}
\author{Shanhong Guan \email{shanhong.guan@gmail.com}, Keaven Anderson \email{keaven_anderson@merck.com}}
\references{
Lachin JM and Foulkes MA (1986),
Evaluation of Sample Size and Power for Analyses of Survival
with Allowance for Nonuniform Patient Entry, Losses to Follow-Up,
Noncompliance, and Stratification. \emph{Biometrics}, 42, 507-519.
Schoenfeld D (1981),
The Asymptotic Properties of Nonparametric Tests for Comparing Survival Distributions. \emph{Biometrika},
68, 316-319.
}
\examples{
# consider a trial with
# 2 year maximum follow-up
# 6 month uniform enrollment
# Treatment/placebo hazards = 0.1/0.2 per 1 person-year
# drop out hazard 0.1 per 1 person-year
# alpha = 0.025 (1-sided)
# power = 0.9 (default beta=.1)
ss <- nSurvival(lambda1=.2 , lambda2=.1, eta = .1, Ts = 2, Tr = .5,
sided=1, alpha=.025)
# group sequential translation with default bounds
# note that delta1 is log hazard ratio; used later in gsBoundSummary summary
x<-gsDesign(k = 5, test.type = 2, n.fix=ss$nEvents, nFixSurv=ss$n,
delta1=log(ss$lambda2/ss$lambda1))
# boundary plot
plot(x)
# effect size plot
plot(x, plottype = "hr")
# total sample size
x$nSurv
# number of events at analyses
x$n.I
# print the design
x
# overall design summary
cat(summary(x))
# tabular summary of bounds
gsBoundSummary(x,deltaname="HR",Nname="Events",logdelta=TRUE)
# approximate number of events required using Schoenfeld's method
# for 2 different hazard ratios
nEvents(hr=c(.5, .6), tbl=TRUE)
# vector output
nEvents(hr=c(.5, .6))
# approximate power using Schoenfeld's method
# given 2 sample sizes and hr=.6
nEvents(hr=.6, n=c(50, 100), tbl=TRUE)
# vector output
nEvents(hr=.6, n=c(50, 100))
# approximate hazard ratio corresponding to 100 events and z-statistic of 2
zn2hr(n=100,z=2)
# same when hr0 is 1.1
zn2hr(n=100,z=2,hr0=1.1)
# same when hr0 is .9 and hr1 is greater than hr0
zn2hr(n=100,z=2,hr0=.9,hr1=1)
# approximate number of events corresponding to z-statistic of 2 and
# estimated hazard ratio of .5 (or 2)
hrz2n(hr=.5,z=2)
hrz2n(hr=2,z=2)
# approximate z statistic corresponding to 75 events
# and estimated hazard ratio of .6 (or 1/.6)
# assuming 2-to-1 randomization of experimental to control
hrn2z(hr=.6,n=75,ratio=2)
hrn2z(hr=1/.6,n=75,ratio=2)
}
\keyword{design}
|
library(tidyverse)
library(dslabs)
data(murders)
murders %>%
ggplot(aes(population, total, label = abb, color = region))+
geom_label()
| /myfirstscript.r | no_license | My-source395/Yobro | R | false | false | 140 | r | library(tidyverse)
library(dslabs)
data(murders)
murders %>%
ggplot(aes(population, total, label = abb, color = region))+
geom_label()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_sample_hist.R
\name{plot_sample_hist}
\alias{plot_sample_hist}
\title{Create a sample histogram from a population}
\usage{
plot_sample_hist(pop, samples, var_name, sample_size)
}
\arguments{
\item{pop}{the virtual population as a tibble}
\item{samples}{the samples as a tibble}
\item{var_name}{the name of the column for the variable that is being generated}
\item{sample_size}{a vector of the sample sizes}
}
\value{
a list of the sample histogram plots
}
\description{
This function creates a grid of sample distributions from a population for different sample sizes.
The plotted sample is the first replication from the samples (rep == 1). The other replicates
are used for plotting the sampling distribution (applying the \code{plot_sampling_hist} function).
}
\examples{
pop <- generate_virtual_pop(100, height, rnorm, 0, 1)
samples <- draw_samples(pop, 3, c(1, 10))
plot_sample_hist(pop, samples, height, c(1, 10))
}
| /man/plot_sample_hist.Rd | permissive | tguo9/samplingsimulatorr | R | false | true | 1,009 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_sample_hist.R
\name{plot_sample_hist}
\alias{plot_sample_hist}
\title{Create a sample histogram from a population}
\usage{
plot_sample_hist(pop, samples, var_name, sample_size)
}
\arguments{
\item{pop}{the virtual population as a tibble}
\item{samples}{the samples as a tibble}
\item{var_name}{the name of the column for the variable that is being generated}
\item{sample_size}{a vector of the sample sizes}
}
\value{
a list of the sample histogram plots
}
\description{
This function creates a grid of sample distributions from a population for different sample sizes.
The plotted sample is the first replication from the samples (rep == 1). The other replicates
are used for plotting the sampling distribution (applying the \code{plot_sampling_hist} function).
}
\examples{
pop <- generate_virtual_pop(100, height, rnorm, 0, 1)
samples <- draw_samples(pop, 3, c(1, 10))
plot_sample_hist(pop, samples, height, c(1, 10))
}
|
#1
A=matrix(c(1,5,-2,1,2,-1,3,6,-3),ncol=3)
A%*%A%*%A
A=cbind(A,A[,2]+A[,3])
A=A[,-3]
#2
a=cbind(rep(10,15),rep(-10,15),rep(10,15))
t(a)%*%a
#3
aa=matrix(c(0,1,0,0,0,0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,1,0),ncol=6)
aa
row(aa)
col(aa)
aa=matrix(rep(1,36),ncol=6)
aa=col(aa)-row(aa)
aa
#4
b=matrix(c(0,1,2,3,4,1,2,3,4,5,2,3,4,5,6,3,4,5,6,7,4,5,6,7,8),ncol=5)
?outer
| /Week_One_Probility/R Code HomeWork Exercise Two.R | no_license | keruicao/Boot-Camp-R-Code | R | false | false | 388 | r | #1
A=matrix(c(1,5,-2,1,2,-1,3,6,-3),ncol=3)
A%*%A%*%A
A=cbind(A,A[,2]+A[,3])
A=A[,-3]
#2
a=cbind(rep(10,15),rep(-10,15),rep(10,15))
t(a)%*%a
#3
aa=matrix(c(0,1,0,0,0,0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,1,0),ncol=6)
aa
row(aa)
col(aa)
aa=matrix(rep(1,36),ncol=6)
aa=col(aa)-row(aa)
aa
#4
b=matrix(c(0,1,2,3,4,1,2,3,4,5,2,3,4,5,6,3,4,5,6,7,4,5,6,7,8),ncol=5)
?outer
|
# Author: Martin Papenberg
# Year: 2019
# This script can be used to reproduce the run time computation
# reported in the manuscript "Using anticlustering to partition
# data sets into equivalent parts" (Papenberg & Klau, 2019)
# Compute run time for the anticlustering methods by N
library(anticlust)
K <- 2
N <- c(seq(10, 30, 2), 4:10 * 10)
M <- 2
runtimes <- matrix(ncol = 3, nrow = length(N))
runtimes <- cbind(N, runtimes)
colnames(runtimes) <- c("N", "Exact ILP", "ILP/Preclustering", "Exchange method")
for (i in seq_along(N)) {
# Generate random data
features <- matrix(rnorm(N[i] * M), ncol = M)
if (N[i] < 40) {
start <- Sys.time()
anticlustering(features, K = K, method = "ilp")
runtimes[i, "Exact ILP"] <- difftime(Sys.time(), start, units = "s")
}
if (N[i] < 80) {
start <- Sys.time()
anticlustering(features, K = K, method = "ilp", preclustering = TRUE)
runtimes[i, "ILP/Preclustering"] <- difftime(Sys.time(), start, units = "s")
}
start <- Sys.time()
anticlustering(features, K = K)
runtimes[i, "Exchange method"] <- difftime(Sys.time(), start, units = "s")
}
round(runtimes, 2)
# Write table to file
write.table(runtimes, "runtimes.csv", quote = FALSE, row.names = FALSE)
| /1_Running_Time/runtime.R | permissive | m-Py/anticlust-code | R | false | false | 1,244 | r |
# Author: Martin Papenberg
# Year: 2019
# This script can be used to reproduce the run time computation
# reported in the manuscript "Using anticlustering to partition
# data sets into equivalent parts" (Papenberg & Klau, 2019)
# Compute run time for the anticlustering methods by N
library(anticlust)
K <- 2
N <- c(seq(10, 30, 2), 4:10 * 10)
M <- 2
runtimes <- matrix(ncol = 3, nrow = length(N))
runtimes <- cbind(N, runtimes)
colnames(runtimes) <- c("N", "Exact ILP", "ILP/Preclustering", "Exchange method")
for (i in seq_along(N)) {
# Generate random data
features <- matrix(rnorm(N[i] * M), ncol = M)
if (N[i] < 40) {
start <- Sys.time()
anticlustering(features, K = K, method = "ilp")
runtimes[i, "Exact ILP"] <- difftime(Sys.time(), start, units = "s")
}
if (N[i] < 80) {
start <- Sys.time()
anticlustering(features, K = K, method = "ilp", preclustering = TRUE)
runtimes[i, "ILP/Preclustering"] <- difftime(Sys.time(), start, units = "s")
}
start <- Sys.time()
anticlustering(features, K = K)
runtimes[i, "Exchange method"] <- difftime(Sys.time(), start, units = "s")
}
round(runtimes, 2)
# Write table to file
write.table(runtimes, "runtimes.csv", quote = FALSE, row.names = FALSE)
|
##########################################################################################################################################
#(์ฃผ์) -> ์์ฐจ์ ์ผ๋ก ์ฝ๋๋ฅผ ์คํํ๋ ๊ฒ์ ๊ถํจ!
#์๋ฌ ๋ฐ์ ์ github Time_Series_Analysis/R_Time_Series_Data_Analysis ๊ฒฝ๋ก์ issue๋ฅผ ๋จ๊ธฐ๋ฉด ํ์ธ
##########################################################################################################################################
### Fraction Mean์ ์ํ Band Plot ์ ์ฉ์ฌ๋ก
# ์๊ฒ์ด ์๋ฃ๋ฅผ ๋ช ๊ฐ์ ๋ถ๋ถ์ผ๋ก ๊ตฌ๋ถํ์ฌ Fraction Mean์ ์ฐ์ ํ์ฌ ํํํ๋ฅผ ์ํ
# ํ์คํธ์ฐจ๋ฅผ ์ฐ์ ํ์ฌ ์ฐ์ ๋ฒ์๋ฅผ ํํํ๋ Band Plot์ ํ๋ก๊ทธ๋จ ์ค๋ช
๋ฐ ๋ถ์ ์ฌ๋ก๋ ์๋ ์ฐธ์กฐ.
# ๋ถ์ ์ฌ๋ก๋ ์ ๊ท๋ถํฌ์ ๋์ 1000๊ฐ๋ฅผ ๋ฐ์ํ์ฌ ์ ์ํ์์ผ๋ฉฐ, ์๊ณ์ด ์๋ฃ์ ๋ถ๋ถ๊ตฌ๋ถ ๋ฐ ํ๊ฐ๋ฅผ ์ํ ๋ถ์๋ฐฉ๋ฒ์ ์๋์ ๊ฐ์.
# x = ์๊ฐ์๋ฃ, y = ์๊ณ์ด ์๋ฃ.
# add= ์ถ๊ฐ์๋ฃ, sd = ํ์คํธ์ฐจ์ ๊ตฌ๊ฐ์ค์ .
# sd.lwd = ํ์คํธ์ฐจ ์ ์ ํน์ฑ, sd.col = ํ์คํธ์ฐจ ์ ์ ์
# method = fraction : ์๊ณ์ด ์๋ฃ๋ฅผ ๋ช ๊ฐ์ ๋ถ๋ถ์ผ๋ก ๊ตฌ๋ถ, ๋ถ๋ถํ๊ท ์ฐ์ , ์ ์ฉ
# = nobs : ์๊ณ์ด ์๋ฃ์๋ฅผ ์ง์ ํ์ฌ ๊ตฌ๋ถ, ๋ถ๋ถํ๊ท ์ฐ์ , ์ ์ฉ
# = width : ์๊ณ์ด ์๋ฃ์ ํญ์ ์ค์ ํ์ฌ ๊ตฌ๋ถ, ๋ถ๋ถํ๊ท ์ฐ์ , ์ ์ฉ
# = range : ์๊ณ์ด ์๋ฃ์ ๋ฒ์๋ฅผ ์ค์ ํ์ฌ ๊ตฌ๋ถ, ๋ถ๋ถํ๊ท ์ฐ์ , ์ ์ฉ
# width=1/5, ์๊ณ์ด ์๋ฃ์ ๊ตฌ๋ถํญ, ์๋ฌด ์ง์ ์ด ์์ผ๋ฉด default = 1/5, ์ฆ 5๊ฐ๋ก ๊ตฌ๋ถ.
# n = 50, ์ฐ์ ์ ์ํ ์ถ์ ๊ฐ์ ์, ์๋ฌด ์ง์ ์ด ์์ผ๋ฉด 50๊ฐ์ ์๋ฃ ์ถ์
# bandplot: ํ๊ท ๊ณผ ํ์คํธ์ฐจ์ ๋ฒ์๋ฅผ ์ฐ์ ํ์ฌ ๊ทธ๋ํ๋ฅผ ์์ฑํ๋ ํ๋ก๊ทธ๋จ.
.libPaths("C://Users//yjang//R") # ์ ์ฅ ๋๋ ํ ๋ฆฌ ๋ฐ lib ๊ฒฝ๋ก ์ค์ .
install.packages("gplots")
library(gplots)
x<-1:1000
y<- rnorm(1000, mean = 1, sd = 1 + x/1000)
bandplot(x,y, main="Band Plot by Fraction")
legend("bottomleft",c("m +/- 2d", "m +/- d", "Mean"), col=c("magenta","blue","red"), lwd = c(2,2,2), cex = 0.6)
graphics.off() # ๊ทธ๋ํ๋ฅผ ์ง์์ฃผ๋ ํจ์.
# ๋ถ์์๋ฃ: x = 1์์ 1000
# y = ์ ๊ท๋ถํฌ์ ๋์ 1000๊ฐ
# Fraction mean: 1000๊ฐ์ ์๊ณ์ด ์๋ฃ
# width = 1/5๋ฅผ ์ ์ฉํ์ฌ ๋ถ์
# mean +/- 2sd, mean +/- sd ๊ทธ๋ํ๋ ์ง์ ๋๋ ค๋ณด๊ณ ์ฐธ์กฐ.
############################################################๊ฒฐ๊ณผ๊ฐ(print)#################################################################
# # > > x<-1:1000
# >
# > y<- rnorm(1000, mean = 1, sd = 1 + x/1000)
# > bandplot(x,y, main="Band Plot by Fraction")
# > legend("bottomleft",c("m +/- 2d", "m +/- d", "Mean"), col=c("magenta","blue","red"), lwd = c(2,2,2), cex = 0.6)
# > graphics.off()
########################################################################################################################################## | /R_Time_Series_Data_Analysis/Ch03_Graphics_of_Time_Series_data/07_Fraction_Mean.R | no_license | Fintecuriosity11/Time_Series_Analysis | R | false | false | 3,119 | r | ##########################################################################################################################################
#(์ฃผ์) -> ์์ฐจ์ ์ผ๋ก ์ฝ๋๋ฅผ ์คํํ๋ ๊ฒ์ ๊ถํจ!
#์๋ฌ ๋ฐ์ ์ github Time_Series_Analysis/R_Time_Series_Data_Analysis ๊ฒฝ๋ก์ issue๋ฅผ ๋จ๊ธฐ๋ฉด ํ์ธ
##########################################################################################################################################
### Fraction Mean์ ์ํ Band Plot ์ ์ฉ์ฌ๋ก
# ์๊ฒ์ด ์๋ฃ๋ฅผ ๋ช ๊ฐ์ ๋ถ๋ถ์ผ๋ก ๊ตฌ๋ถํ์ฌ Fraction Mean์ ์ฐ์ ํ์ฌ ํํํ๋ฅผ ์ํ
# ํ์คํธ์ฐจ๋ฅผ ์ฐ์ ํ์ฌ ์ฐ์ ๋ฒ์๋ฅผ ํํํ๋ Band Plot์ ํ๋ก๊ทธ๋จ ์ค๋ช
๋ฐ ๋ถ์ ์ฌ๋ก๋ ์๋ ์ฐธ์กฐ.
# ๋ถ์ ์ฌ๋ก๋ ์ ๊ท๋ถํฌ์ ๋์ 1000๊ฐ๋ฅผ ๋ฐ์ํ์ฌ ์ ์ํ์์ผ๋ฉฐ, ์๊ณ์ด ์๋ฃ์ ๋ถ๋ถ๊ตฌ๋ถ ๋ฐ ํ๊ฐ๋ฅผ ์ํ ๋ถ์๋ฐฉ๋ฒ์ ์๋์ ๊ฐ์.
# x = ์๊ฐ์๋ฃ, y = ์๊ณ์ด ์๋ฃ.
# add= ์ถ๊ฐ์๋ฃ, sd = ํ์คํธ์ฐจ์ ๊ตฌ๊ฐ์ค์ .
# sd.lwd = ํ์คํธ์ฐจ ์ ์ ํน์ฑ, sd.col = ํ์คํธ์ฐจ ์ ์ ์
# method = fraction : ์๊ณ์ด ์๋ฃ๋ฅผ ๋ช ๊ฐ์ ๋ถ๋ถ์ผ๋ก ๊ตฌ๋ถ, ๋ถ๋ถํ๊ท ์ฐ์ , ์ ์ฉ
# = nobs : ์๊ณ์ด ์๋ฃ์๋ฅผ ์ง์ ํ์ฌ ๊ตฌ๋ถ, ๋ถ๋ถํ๊ท ์ฐ์ , ์ ์ฉ
# = width : ์๊ณ์ด ์๋ฃ์ ํญ์ ์ค์ ํ์ฌ ๊ตฌ๋ถ, ๋ถ๋ถํ๊ท ์ฐ์ , ์ ์ฉ
# = range : ์๊ณ์ด ์๋ฃ์ ๋ฒ์๋ฅผ ์ค์ ํ์ฌ ๊ตฌ๋ถ, ๋ถ๋ถํ๊ท ์ฐ์ , ์ ์ฉ
# width=1/5, ์๊ณ์ด ์๋ฃ์ ๊ตฌ๋ถํญ, ์๋ฌด ์ง์ ์ด ์์ผ๋ฉด default = 1/5, ์ฆ 5๊ฐ๋ก ๊ตฌ๋ถ.
# n = 50, ์ฐ์ ์ ์ํ ์ถ์ ๊ฐ์ ์, ์๋ฌด ์ง์ ์ด ์์ผ๋ฉด 50๊ฐ์ ์๋ฃ ์ถ์
# bandplot: ํ๊ท ๊ณผ ํ์คํธ์ฐจ์ ๋ฒ์๋ฅผ ์ฐ์ ํ์ฌ ๊ทธ๋ํ๋ฅผ ์์ฑํ๋ ํ๋ก๊ทธ๋จ.
.libPaths("C://Users//yjang//R") # ์ ์ฅ ๋๋ ํ ๋ฆฌ ๋ฐ lib ๊ฒฝ๋ก ์ค์ .
install.packages("gplots")
library(gplots)
x<-1:1000
y<- rnorm(1000, mean = 1, sd = 1 + x/1000)
bandplot(x,y, main="Band Plot by Fraction")
legend("bottomleft",c("m +/- 2d", "m +/- d", "Mean"), col=c("magenta","blue","red"), lwd = c(2,2,2), cex = 0.6)
graphics.off() # ๊ทธ๋ํ๋ฅผ ์ง์์ฃผ๋ ํจ์.
# ๋ถ์์๋ฃ: x = 1์์ 1000
# y = ์ ๊ท๋ถํฌ์ ๋์ 1000๊ฐ
# Fraction mean: 1000๊ฐ์ ์๊ณ์ด ์๋ฃ
# width = 1/5๋ฅผ ์ ์ฉํ์ฌ ๋ถ์
# mean +/- 2sd, mean +/- sd ๊ทธ๋ํ๋ ์ง์ ๋๋ ค๋ณด๊ณ ์ฐธ์กฐ.
############################################################๊ฒฐ๊ณผ๊ฐ(print)#################################################################
# # > > x<-1:1000
# >
# > y<- rnorm(1000, mean = 1, sd = 1 + x/1000)
# > bandplot(x,y, main="Band Plot by Fraction")
# > legend("bottomleft",c("m +/- 2d", "m +/- d", "Mean"), col=c("magenta","blue","red"), lwd = c(2,2,2), cex = 0.6)
# > graphics.off()
########################################################################################################################################## |
library(Sleuth2)
### Name: ex0918
### Title: Speed of Evolution
### Aliases: ex0918
### Keywords: datasets
### ** Examples
str(ex0918)
| /data/genthat_extracted_code/Sleuth2/examples/ex0918.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 142 | r | library(Sleuth2)
### Name: ex0918
### Title: Speed of Evolution
### Aliases: ex0918
### Keywords: datasets
### ** Examples
str(ex0918)
|
source('./R/utils.R')
library(xts)
library(forecast)
library(tidyverse)
library(readxl)
library(timetk)
library(tictoc)
library(lubridate)
final_forecast_horizon <- c(2019, 12)
h_max = 8 # last rgdp data is 2017 Q4
number_of_cv = 8
train_span = 16
country_names <- c("Argentina", "Bolivia", "Brasil", "Chile", "Colombia",
"Ecuador", "Peru", "Paraguay", "Uruguay")
for (name in country_names) {
country_name <- name
data_path <- paste0("./data/excel_data/", country_name,".xlsx")
m_analysis_path <- paste0("data/", country_name,"_m_analysis_rgdp.xlsx")
rds_file_name = paste0("./data/sarimax_objects_", country_name,".rds")
tic()
myres <- bsarimax_as_function(data_path = data_path, number_of_cv = number_of_cv,
train_span = train_span, h_max = h_max,
final_forecast_horizon = final_forecast_horizon,
outer_cv_round = 0, s4xreg = FALSE)
toc()
var_lag_order_season <- myres$var_lag_order_season
level_fc_using_accu_level_weights <- myres$expo_final_rgdp_and_w_fc
level_fc_using_accu_yoy_weights <- myres$expo_final_rgdp_and_yoyw_fc
yoy_fc_using_accu_level_weights <- myres$yoy_growth_expo_final_rgdp_and_w_fc
yoy_fc_using_accu_yoy_weights <- myres$yoy_growth_expo_final_rgdp_and_yoyw_fc
cv_rmse_yoy_rgdp_conditional_on_x <- myres$cv_all_x_rmse_each_h_yoy
cv_rmse_yoy_rgdp <- myres$cv_rmse_each_h_rgdp_yoy
cv_rmse_level_rgdp_conditional_on_x <- myres$cv_all_x_rmse_each_h
cv_rmse_level_rgdp <- myres$cv_rmse_each_h_rgdp
names(cv_rmse_level_rgdp_conditional_on_x)[1:8] <- paste0("level_rmse_", 1:8)
names(cv_rmse_yoy_rgdp_conditional_on_x)[1:8] <- paste0("yoy_rmse_", 1:8)
names(cv_rmse_yoy_rgdp)[1:8] <- paste0("yoy_rmse_", 1:8)
names(cv_rmse_level_rgdp)[1:8] <- paste0("level_rmse_", 1:8)
m_arg <- read_excel(m_analysis_path)
m_all_rmse <- m_arg[, c("cond_exo", "rmse1", "rmse2", "rmse3", "rmse4", "rmse5", "rmse6", "rmse7", "rmse8")]
m_to_compare_rmse <- m_all_rmse %>%
mutate_if(is.double, function(x) 0.01 * x) %>%
mutate(pre_variable = str_remove(cond_exo, "S4.l"),
variable = str_extract(pre_variable, "([^\\s]+)"),
lag = ifelse(str_detect(pre_variable, "LS"),
ifelse(str_detect(pre_variable, "L2S"), 2, 1) , 0)
) %>%
select(-pre_variable) %>%
mutate(variable = ifelse(variable == "_NONE", "rgdp", variable))
compare_rmse <- rbind(cv_rmse_level_rgdp, cv_rmse_level_rgdp_conditional_on_x) %>%
left_join(m_to_compare_rmse, by = c("variable", "lag")) %>%
dplyr::select( -c(cond_exo))
compare_rmse_yoy <- rbind(cv_rmse_yoy_rgdp,
cv_rmse_yoy_rgdp_conditional_on_x)
country_objects <- list(
var_lag_order_season = var_lag_order_season,
compare_rmse = compare_rmse,
compare_rmse_yoy = compare_rmse_yoy,
yoy_fc_using_accu_level_weights = yoy_fc_using_accu_level_weights,
yoy_fc_using_accu_yoy_weights = yoy_fc_using_accu_yoy_weights,
level_fc_using_accu_level_weights = level_fc_using_accu_level_weights,
level_fc_using_accu_yoy_weights = level_fc_using_accu_yoy_weights,
extended_x_data_ts = myres$mdata_ext_ts,
rgdp_ts_in_arima = myres$rgdp_ts_in_arima)
saveRDS(country_objects, file = rds_file_name)
}
| /R/loop_countries_sarimax_demetra_fns.R | no_license | Allisterh/bridge_sarimax | R | false | false | 3,373 | r | source('./R/utils.R')
library(xts)
library(forecast)
library(tidyverse)
library(readxl)
library(timetk)
library(tictoc)
library(lubridate)
final_forecast_horizon <- c(2019, 12)
h_max = 8 # last rgdp data is 2017 Q4
number_of_cv = 8
train_span = 16
country_names <- c("Argentina", "Bolivia", "Brasil", "Chile", "Colombia",
"Ecuador", "Peru", "Paraguay", "Uruguay")
for (name in country_names) {
country_name <- name
data_path <- paste0("./data/excel_data/", country_name,".xlsx")
m_analysis_path <- paste0("data/", country_name,"_m_analysis_rgdp.xlsx")
rds_file_name = paste0("./data/sarimax_objects_", country_name,".rds")
tic()
myres <- bsarimax_as_function(data_path = data_path, number_of_cv = number_of_cv,
train_span = train_span, h_max = h_max,
final_forecast_horizon = final_forecast_horizon,
outer_cv_round = 0, s4xreg = FALSE)
toc()
var_lag_order_season <- myres$var_lag_order_season
level_fc_using_accu_level_weights <- myres$expo_final_rgdp_and_w_fc
level_fc_using_accu_yoy_weights <- myres$expo_final_rgdp_and_yoyw_fc
yoy_fc_using_accu_level_weights <- myres$yoy_growth_expo_final_rgdp_and_w_fc
yoy_fc_using_accu_yoy_weights <- myres$yoy_growth_expo_final_rgdp_and_yoyw_fc
cv_rmse_yoy_rgdp_conditional_on_x <- myres$cv_all_x_rmse_each_h_yoy
cv_rmse_yoy_rgdp <- myres$cv_rmse_each_h_rgdp_yoy
cv_rmse_level_rgdp_conditional_on_x <- myres$cv_all_x_rmse_each_h
cv_rmse_level_rgdp <- myres$cv_rmse_each_h_rgdp
names(cv_rmse_level_rgdp_conditional_on_x)[1:8] <- paste0("level_rmse_", 1:8)
names(cv_rmse_yoy_rgdp_conditional_on_x)[1:8] <- paste0("yoy_rmse_", 1:8)
names(cv_rmse_yoy_rgdp)[1:8] <- paste0("yoy_rmse_", 1:8)
names(cv_rmse_level_rgdp)[1:8] <- paste0("level_rmse_", 1:8)
m_arg <- read_excel(m_analysis_path)
m_all_rmse <- m_arg[, c("cond_exo", "rmse1", "rmse2", "rmse3", "rmse4", "rmse5", "rmse6", "rmse7", "rmse8")]
m_to_compare_rmse <- m_all_rmse %>%
mutate_if(is.double, function(x) 0.01 * x) %>%
mutate(pre_variable = str_remove(cond_exo, "S4.l"),
variable = str_extract(pre_variable, "([^\\s]+)"),
lag = ifelse(str_detect(pre_variable, "LS"),
ifelse(str_detect(pre_variable, "L2S"), 2, 1) , 0)
) %>%
select(-pre_variable) %>%
mutate(variable = ifelse(variable == "_NONE", "rgdp", variable))
compare_rmse <- rbind(cv_rmse_level_rgdp, cv_rmse_level_rgdp_conditional_on_x) %>%
left_join(m_to_compare_rmse, by = c("variable", "lag")) %>%
dplyr::select( -c(cond_exo))
compare_rmse_yoy <- rbind(cv_rmse_yoy_rgdp,
cv_rmse_yoy_rgdp_conditional_on_x)
country_objects <- list(
var_lag_order_season = var_lag_order_season,
compare_rmse = compare_rmse,
compare_rmse_yoy = compare_rmse_yoy,
yoy_fc_using_accu_level_weights = yoy_fc_using_accu_level_weights,
yoy_fc_using_accu_yoy_weights = yoy_fc_using_accu_yoy_weights,
level_fc_using_accu_level_weights = level_fc_using_accu_level_weights,
level_fc_using_accu_yoy_weights = level_fc_using_accu_yoy_weights,
extended_x_data_ts = myres$mdata_ext_ts,
rgdp_ts_in_arima = myres$rgdp_ts_in_arima)
saveRDS(country_objects, file = rds_file_name)
}
|
build_demographics_age_pnad <- function(Data){
metadata = harmonizePNAD:::get_metadata(Data)
year_var <- data.frame(
year = c(1973, 1976:1979, 1981:1990, 1992, 1993, 1995:1999, 2001:2009, 2011:2015),
var = c("v0156", "v2105", "v0173", "v2805", "v2805", rep("v0805", 10), rep("v8005",21)),
stringsAsFactors = F
)
year_i = which(year_var == metadata$year)
var_i = year_var[year_i, "var"]
harmonizePNAD:::check_necessary_vars(Data, var_i)
Data[, age := eval(parse(text = var_i))]
Data[age == 999, age := NA]
if(metadata$year == 1973) {
warning("PNAD 1973: information only for persons aged 10+. Age is truncated at 99 years")
}
if(metadata$year == 1976) {
warning("PNAD 1976: age is truncated at 99 years")
}
Data
}
| /R/build_demographics_age_pnad.R | no_license | arthurwelle/harmonizePNAD | R | false | false | 907 | r | build_demographics_age_pnad <- function(Data){
metadata = harmonizePNAD:::get_metadata(Data)
year_var <- data.frame(
year = c(1973, 1976:1979, 1981:1990, 1992, 1993, 1995:1999, 2001:2009, 2011:2015),
var = c("v0156", "v2105", "v0173", "v2805", "v2805", rep("v0805", 10), rep("v8005",21)),
stringsAsFactors = F
)
year_i = which(year_var == metadata$year)
var_i = year_var[year_i, "var"]
harmonizePNAD:::check_necessary_vars(Data, var_i)
Data[, age := eval(parse(text = var_i))]
Data[age == 999, age := NA]
if(metadata$year == 1973) {
warning("PNAD 1973: information only for persons aged 10+. Age is truncated at 99 years")
}
if(metadata$year == 1976) {
warning("PNAD 1976: age is truncated at 99 years")
}
Data
}
|
# Generate some scatterplots on raw counts data from DamID experiment
library(data.table)
library(dplyr)
library(ggplot2)
library(GenomicRanges)
library(GGally)
rm(list = ls())
setwd(paste0("~/IMG/Projects/",
"HP1.Lamin.Polycomb.DNA.contacts.Effect.on.expression/",
"DamID-seq.HP1.PC.Lam.WBr.Nrn.Glia.Fb/final_variant/",
"BioHMM2.qn.full.PC.HMM3/"))
load("scatters.RData")
DATA <- fread("../CSV/01.Raw.Counts.csv")
DATA2 <- cbind(DATA[, 1:4],
as.data.frame(
sapply(DATA[, 5:ncol(DATA)], function(col){
col/sum(col, na.rm = T) * 1e6
})
))
# remove some ugly outliers
DATA2$DAM.FB.m_25mkM4HT.1.all[DATA2$DAM.FB.m_25mkM4HT.1.all > 500] <- NA
DATA2$LAM.FB.m_25mkM4HT.1.all[DATA2$LAM.FB.m_25mkM4HT.1.all > 500] <- NA
DATA2$LAM.FB.m_25mkM4HT.2.all[DATA2$LAM.FB.m_25mkM4HT.2.all > 500] <- NA
DATA2$PC.FB.m_25mkM4HT.1.all[DATA2$PC.FB.m_25mkM4HT.1.all > 1000] <- NA
DATA2$PC.FB.m_25mkM4HT.2.all[DATA2$PC.FB.m_25mkM4HT.2.all > 1000] <- NA
# DATA2$HP1.FB.m_25mkM4HT.2.all[DATA2$HP1.FB.m_25mkM4HT.2.all > 600] <- NA
ScatCor <- function(data, lastcol = 4, pref){
png.name <- paste0(pref, ".Scatter_Plots_and_Correlations.png")
corplot <- ggpairs(
data[, (lastcol + 1):ncol(data)],
title = "Scatter Plots and Pearson Correlations",
upper = list(
continuous = wrap("cor", size = 15)),
lower = list(
continuous=wrap("smooth", colour="blue")
),
diag = NULL) +
theme_grey(base_size = 20)
print(corplot)
}
ScatterPlotting <- function(dataSet) {
for (j in unique(sub("^([^.]+\\.[^.]+)\\..*(\\d)\\.all$", "\\1", names(dataSet)[-c(1:4)], perl=T))) {
repSet <- sort(grep(j, names(dataSet), value=T))
if (length(repSet) > 1) {
png(filename = paste("scatter_on_", j, ".png", sep=""), width = 1200, height = 1200)
up.brd <- max(max(dataSet[[repSet[2]]], na.rm=T), max(dataSet[[repSet[1]]], na.rm=T))
par(mar = c(6,5,4,2) + 0.1)
par(cex=1.5)
Cor.P <- round(cor(dataSet[[repSet[1]]], dataSet[[repSet[2]]], method="pearson", use="pairwise.complete.obs"), digits=2)
Cor.S <- round(cor(dataSet[[repSet[1]]], dataSet[[repSet[2]]], method="spearman", use="pairwise.complete.obs"), digits=2)
plot(x=dataSet[[repSet[1]]], y=dataSet[[repSet[2]]], cex.axis = 3,
xlim = c(0, 0.9*up.brd), ylim = c(0, 0.9*up.brd),
# xlab=sub("^([^.]+\\.[^.]+)\\..*(\\d)\\.all$", "\\1.\\2", repSet[1]),
# ylab=sub("^([^.]+\\.[^.]+)\\..*(\\d)\\.all$", "\\1.\\2", repSet[2]),
xlab = "",
ylab = "",
text(x= up.brd*0.15, y=up.brd*0.85, paste0("Pearson: ", Cor.P, "\nSpearman: ", Cor.S), cex = 3),
mgp = c(3, 2.4, 0))
# x <- c(0, max(dataSet[[repSet[1]]], na.rm=T)); y <- c(0, max(dataSet[[repSet[2]]], na.rm=T))
# lines(x, y, col = "red")
# print(ggplot(dataSet, aes(dataSet[[i]], dataSet[[x]]))+geom_point(alpha=1/10, colour="red", size=4) + xlab(i) + ylab(x) + geom_text(data = data.frame(), size = 4, hjust=0, aes(min(dataSet[, i], na.rm=T), max(dataSet[, x], na.rm=T)*0.75, label =c(paste("Pearson.Cor = ", Cor.P, "\n\n", sep=""), paste("Spearman.Cor = ", Cor.S, sep="")))) + theme_bw())
rm(Cor.P)
rm(Cor.S)
dev.off()
} else {
print(paste("Skip make the Scatter Plots from", j, sep=" "))
}
}
}
# names(DATA) <- gsub("^([^.]+\\.[^.]+)\\..*(\\d)\\.all$", "\\1.\\2", names(DATA))
ScatterPlotting(DATA2)
| /damid.corplots.R | no_license | foriin/Brains.DamID | R | false | false | 3,612 | r | # Generate some scatterplots on raw counts data from DamID experiment
library(data.table)
library(dplyr)
library(ggplot2)
library(GenomicRanges)
library(GGally)
rm(list = ls())
setwd(paste0("~/IMG/Projects/",
"HP1.Lamin.Polycomb.DNA.contacts.Effect.on.expression/",
"DamID-seq.HP1.PC.Lam.WBr.Nrn.Glia.Fb/final_variant/",
"BioHMM2.qn.full.PC.HMM3/"))
load("scatters.RData")
DATA <- fread("../CSV/01.Raw.Counts.csv")
DATA2 <- cbind(DATA[, 1:4],
as.data.frame(
sapply(DATA[, 5:ncol(DATA)], function(col){
col/sum(col, na.rm = T) * 1e6
})
))
# remove some ugly outliers
DATA2$DAM.FB.m_25mkM4HT.1.all[DATA2$DAM.FB.m_25mkM4HT.1.all > 500] <- NA
DATA2$LAM.FB.m_25mkM4HT.1.all[DATA2$LAM.FB.m_25mkM4HT.1.all > 500] <- NA
DATA2$LAM.FB.m_25mkM4HT.2.all[DATA2$LAM.FB.m_25mkM4HT.2.all > 500] <- NA
DATA2$PC.FB.m_25mkM4HT.1.all[DATA2$PC.FB.m_25mkM4HT.1.all > 1000] <- NA
DATA2$PC.FB.m_25mkM4HT.2.all[DATA2$PC.FB.m_25mkM4HT.2.all > 1000] <- NA
# DATA2$HP1.FB.m_25mkM4HT.2.all[DATA2$HP1.FB.m_25mkM4HT.2.all > 600] <- NA
ScatCor <- function(data, lastcol = 4, pref){
png.name <- paste0(pref, ".Scatter_Plots_and_Correlations.png")
corplot <- ggpairs(
data[, (lastcol + 1):ncol(data)],
title = "Scatter Plots and Pearson Correlations",
upper = list(
continuous = wrap("cor", size = 15)),
lower = list(
continuous=wrap("smooth", colour="blue")
),
diag = NULL) +
theme_grey(base_size = 20)
print(corplot)
}
ScatterPlotting <- function(dataSet) {
for (j in unique(sub("^([^.]+\\.[^.]+)\\..*(\\d)\\.all$", "\\1", names(dataSet)[-c(1:4)], perl=T))) {
repSet <- sort(grep(j, names(dataSet), value=T))
if (length(repSet) > 1) {
png(filename = paste("scatter_on_", j, ".png", sep=""), width = 1200, height = 1200)
up.brd <- max(max(dataSet[[repSet[2]]], na.rm=T), max(dataSet[[repSet[1]]], na.rm=T))
par(mar = c(6,5,4,2) + 0.1)
par(cex=1.5)
Cor.P <- round(cor(dataSet[[repSet[1]]], dataSet[[repSet[2]]], method="pearson", use="pairwise.complete.obs"), digits=2)
Cor.S <- round(cor(dataSet[[repSet[1]]], dataSet[[repSet[2]]], method="spearman", use="pairwise.complete.obs"), digits=2)
plot(x=dataSet[[repSet[1]]], y=dataSet[[repSet[2]]], cex.axis = 3,
xlim = c(0, 0.9*up.brd), ylim = c(0, 0.9*up.brd),
# xlab=sub("^([^.]+\\.[^.]+)\\..*(\\d)\\.all$", "\\1.\\2", repSet[1]),
# ylab=sub("^([^.]+\\.[^.]+)\\..*(\\d)\\.all$", "\\1.\\2", repSet[2]),
xlab = "",
ylab = "",
text(x= up.brd*0.15, y=up.brd*0.85, paste0("Pearson: ", Cor.P, "\nSpearman: ", Cor.S), cex = 3),
mgp = c(3, 2.4, 0))
# x <- c(0, max(dataSet[[repSet[1]]], na.rm=T)); y <- c(0, max(dataSet[[repSet[2]]], na.rm=T))
# lines(x, y, col = "red")
# print(ggplot(dataSet, aes(dataSet[[i]], dataSet[[x]]))+geom_point(alpha=1/10, colour="red", size=4) + xlab(i) + ylab(x) + geom_text(data = data.frame(), size = 4, hjust=0, aes(min(dataSet[, i], na.rm=T), max(dataSet[, x], na.rm=T)*0.75, label =c(paste("Pearson.Cor = ", Cor.P, "\n\n", sep=""), paste("Spearman.Cor = ", Cor.S, sep="")))) + theme_bw())
rm(Cor.P)
rm(Cor.S)
dev.off()
} else {
print(paste("Skip make the Scatter Plots from", j, sep=" "))
}
}
}
# names(DATA) <- gsub("^([^.]+\\.[^.]+)\\..*(\\d)\\.all$", "\\1.\\2", names(DATA))
ScatterPlotting(DATA2)
|
# generate imputed time series values from sci diabetes extract data
# function to output values
# numericValueColumnIndex = the column that the numeric value of interest is in (hba1c, sbp etc)
generateImputedTimeSeriesData <- function(inputFrame, input_deathData, startTime, endTime, input_binLengthMonths, label, numericValueColumnIndex) {
# inputFrame = cleanBMIData; input_deathData = deathData; startTime = runInStartDate; endTime = runInEndDate, input_binLengthMonths = 2; label = 'BMI_test'; numericValueColumnIndex = 8
library(data.table)
library(imputeTS)
id_per_location <- function(ID) {
return(length(unique(ID)))
}
flagMove <- function(ID, charL) {
charLreport <- charL
charLnumeric <- as.numeric(factor(charL))
testFrame <- data.frame(charLreport, charLnumeric)
testFrame$flagMove <- 0
testFrame$flagMove[1:nrow(testFrame)-1] <- diff(testFrame$charLnumeric)
testFrame$nextL <- c("spacer")
testFrame$nextL[1:(nrow(testFrame)-1)] <- charLreport[2:length(charLreport)]
testFrame$charLreport <- as.character(factor(charL))
outputList <- list(testFrame$charLreport, testFrame$nextL, testFrame$flagMove)
return(outputList)
}
returnUnixDateTime<-function(date) {
returnVal<-as.numeric(as.POSIXct(date, format="%Y-%m-%d", tz="GMT"))
return(returnVal)
}
findSimilarDrugs <- function(inputFrame) {
# inputFrame <- interestSet
# inputFrame <- inputFrame[1:10000,]
inputFrame$DrugName.original <- inputFrame$DrugName
inputFrame$DrugNameNew <- inputFrame$DrugName
inputFrame <- subset(inputFrame, DrugNameNew != "Disposable")
inputFrame$DrugNameNew[grep("Glucose", inputFrame$DrugName, ignore.case = TRUE)] <- "Glucose"
inputFrame$DrugNameNew[grep("Glucogel", inputFrame$DrugName, ignore.case = TRUE)] <- "Glucose"
inputFrame$DrugNameNew[grep("Glucagen Hypokit", inputFrame$DrugName, ignore.case = TRUE)] <- "Glucagon"
inputFrame$DrugNameNew[grep("Optium Plus", inputFrame$DrugName, ignore.case = TRUE)] <- "Test Strips"
inputFrame$DrugNameNew[grep("Metformin", inputFrame$DrugName, ignore.case = TRUE)] <- "Metformin"
inputFrame$DrugNameNew[grep("Glucophage", inputFrame$DrugName, ignore.case = TRUE)] <- "Metformin"
inputFrame$DrugNameNew[grep("Gliclazide", inputFrame$DrugName, ignore.case = TRUE)] <- "Gliclazide"
inputFrame$DrugNameNew[grep("Diamicron", inputFrame$DrugName, ignore.case = TRUE)] <- "Gliclazide"
inputFrame$DrugNameNew[grep("Rosiglitazone", inputFrame$DrugName, ignore.case = TRUE)] <- "Rosiglitazone"
inputFrame$DrugNameNew[grep("Avandia", inputFrame$DrugName, ignore.case = TRUE)] <- "Rosiglitazone"
inputFrame$DrugNameNew[grep("Linagliptin", inputFrame$DrugName, ignore.case = TRUE)] <- "Linagliptin"
inputFrame$DrugNameNew[grep("Victoza", inputFrame$DrugName, ignore.case = TRUE)] <- "Liraglutide"
inputFrame$DrugNameNew[grep("Liraglutide", inputFrame$DrugName, ignore.case = TRUE)] <- "Liraglutide"
inputFrame$DrugNameNew[grep("Pioglitazone", inputFrame$DrugName, ignore.case = TRUE)] <- "Pioglitazone"
inputFrame$DrugNameNew[grep("Sitagliptin", inputFrame$DrugName, ignore.case = TRUE)] <- "Sitagliptin"
inputFrame$DrugNameNew[grep("Januvia", inputFrame$DrugName, ignore.case = TRUE)] <- "Sitagliptin"
inputFrame$DrugNameNew[grep("Dapagliflozin", inputFrame$DrugName, ignore.case = TRUE)] <- "Dapagliflozin"
inputFrame$DrugNameNew[grep("Humalog Mix25", inputFrame$DrugName, ignore.case = TRUE)] <- "Humalog Mix 25"
inputFrame$DrugNameNew[grep("Lantus", inputFrame$DrugName, ignore.case = TRUE)] <- "Insulin Glargine"
inputFrame$DrugNameNew[grep("Levemir", inputFrame$DrugName, ignore.case = TRUE)] <- "Insulin Detemir"
inputFrame$DrugNameNew[grep("Insulatard", inputFrame$DrugName, ignore.case = TRUE)] <- "Insulatard"
inputFrame$DrugNameNew[grep("Actrapid", inputFrame$DrugName, ignore.case = TRUE)] <- "Actrapid"
inputFrame$DrugNameNew[grep("Humalog 100units/ml solution", inputFrame$DrugName, ignore.case = TRUE)] <- "Humalog"
inputFrame$DrugNameNew[grep("Novorapid", inputFrame$DrugName, ignore.case = TRUE)] <- "Novorapid"
inputFrame$DrugNameNew[grep("Novomix 30", inputFrame$DrugName, ignore.case = TRUE)] <- "Novomix 30"
inputFrame$DrugNameNew[grep("Mixtard 30", inputFrame$DrugName, ignore.case = TRUE)] <- "Mixtard 30"
inputFrame$DrugNameNew[grep("Mixtard 20", inputFrame$DrugName, ignore.case = TRUE)] <- "Mixtard 20"
inputFrame$DrugNameNew[grep("Humulin M3", inputFrame$DrugName, ignore.case = TRUE)] <- "Humulin M3"
inputFrame$DrugNameNew[grep("Humalog Mix50", inputFrame$DrugName, ignore.case = TRUE)] <- "Humalog Mix50"
inputFrame$DrugNameNew[grep("strip", inputFrame$DrugName, ignore.case = TRUE)] <- "Test Strips"
inputFrame$DrugNameNew[grep("Bd-Microfine", inputFrame$DrugName, ignore.case = TRUE)] <- "Needle"
inputFrame$DrugNameNew[grep("Needle", inputFrame$DrugName, ignore.case = TRUE)] <- "Needle"
outputFrame <- inputFrame
outputFrame$DrugName.original <- NULL
outputFrame$DrugName <- outputFrame$DrugNameNew
outputFrame$DrugNameNew <- NULL
return(outputFrame)
}
# generate node and link files
cleanTSdata <- inputFrame
# cleanHbA1cData <- read.csv("~/R/GlCoSy/SD_workingSource/hba1cDTclean.csv", sep=",", header = TRUE, row.names = NULL)
cleanTSdata$timeSeriesDataPoint <- cleanTSdata[, numericValueColumnIndex]
timeSeriesData <- cleanTSdata
timeSeriesDataDT <- data.table(timeSeriesData)
# load and process mortality data
deathData <- input_deathData
# deathData <- read.csv("~/R/GlCoSy/SDsource/diagnosisDateDeathDate.txt", sep=",")
deathData$unix_deathDate <- returnUnixDateTime(deathData$DeathDate)
deathData$unix_deathDate[is.na(deathData$unix_deathDate)] <- 0
deathData$isDead <- ifelse(deathData$unix_deathDate > 0, 1, 0)
deathData$unix_diagnosisDate <- returnUnixDateTime(deathData$DateOfDiagnosisDiabetes_Date)
deathDataDT <- data.table(deathData)
# set runin period of interest
startRuninPeriod <- startTime
endRuninPeriod <- endTime
# startRuninPeriod <- '2010-01-01'
# endRuninPeriod <- '2015-01-01'
# testDeathDate <- '2013-01-01'
interestSetDT <- timeSeriesDataDT[dateplustime1 > returnUnixDateTime(startRuninPeriod) &
dateplustime1 < returnUnixDateTime(endRuninPeriod)]
interestSetDF <- data.frame(interestSetDT)
###############################
## start data manipulation
###############################
# scale time to 0 to 1 range
interestSetDT$dateplustime1.original <- interestSetDT$dateplustime1
interestSetDT$dateplustime1 <- (interestSetDT$dateplustime1 - min(interestSetDT$dateplustime1)) / (max(interestSetDT$dateplustime1) - min(interestSetDT$dateplustime1))
interestSetDT <- transform(interestSetDT,id=as.numeric(factor(LinkId)))
# set time bins
# bin length in months
binLengthMonths = input_binLengthMonths
binLengthSeconds = (60*60*24*(365.25 / 12)) * binLengthMonths
unixRunInDuration = returnUnixDateTime(endRuninPeriod) - returnUnixDateTime(startRuninPeriod)
unixRunInDurationYears = round(unixRunInDuration / (60*60*24*365.25), 0)
numberOfBins = round(unixRunInDuration / binLengthSeconds, 0)
sequence <- seq(0, 1 , (1/numberOfBins))
# generate bag of drugs frame
timesetWordFrame <- as.data.frame(matrix(nrow = length(unique(interestSetDT$LinkId)), ncol = (length(sequence)-1) ))
colnames(timesetWordFrame) <- c(1:(length(sequence)-1))
timesetWordFrame$LinkId <- 0
medianFrame <- as.data.frame(matrix(nrow = length(unique(interestSetDT$LinkId)), ncol = 1 ))
colnames(medianFrame) <- c("median")
# function to generate drugwords for each time interval
returnIntervals <- function(LinkId, timeSeriesDataPoint, dateplustime1, sequence, id) {
# timeSeriesDataPoint <- subset(interestSetDT, id == 2)$timeSeriesDataPoint; dateplustime1 <- subset(interestSetDT, id == 2)$dateplustime1; id = 2; LinkId <- subset(interestSetDT, id == 2)$LinkId
inputSet <- data.table(timeSeriesDataPoint, dateplustime1)
## add nil values to fill time slots without any drugs
nilFrame <- as.data.frame(matrix(nrow = length(sequence), ncol = ncol(inputSet)))
colnames(nilFrame) <- colnames(inputSet)
nilFrame$timeSeriesDataPoint <- 0
nilFrame$dateplustime1 <- sequence
outputSet <- rbind(nilFrame, inputSet)
dataBreaks <- split(outputSet$timeSeriesDataPoint, cut(outputSet$dateplustime1, breaks = sequence))
outputVector <- c(rep(0, length(sequence)- 1))
# returns either 0, or the median of all values in the time bin
for (kk in seq(1, length(dataBreaks), 1)) {
values <- dataBreaks[[kk]]
if (length(values) == 1) { outputVector[kk] = 0}
if (length(values) > 0) { outputVector[kk] = quantile(values[values > 0])[3]}
}
return(c(outputVector, LinkId[1]))
}
print(max(interestSetDT$id))
for (j in seq(1, max(interestSetDT$id), 1)) {
# for (j in seq(1 ,1000, )) {
if(j%%100 == 0) {print(j)}
injectionSet <- interestSetDT[id == j]
timesetWordFrame[j, ] <- returnIntervals(injectionSet$LinkId, injectionSet$timeSeriesDataPoint, injectionSet$dateplustime1, sequence, j)
medianFrame$median[j] <- quantile(injectionSet$hba1cNumeric)[3]
}
# write out timesetWordFrame for analysis
# write.table(timesetWordFrame, file = "~/R/_workingDirectory/bagOfDrugs/local_py/dataFiles/10y_30increments_2004-2014_hba1c_TS.csv", sep=",", row.names = FALSE)
# timesetWordFrame <- read.csv("~/R/_workingDirectory/bagOfDrugs/local_py/hba1c_TS.csv")
# last value carry forward imputation of values
timesetWordFrame[,1][is.na(timesetWordFrame[,1])] <- 0
interpolatedTS <- as.data.frame(matrix(nrow = nrow(timesetWordFrame), ncol = (ncol(timesetWordFrame) - 1)))
for (jj in seq(1, nrow(timesetWordFrame), 1)) {
if(jj%%1000 == 0) {print(jj)}
testVector <- c(0, timesetWordFrame[jj, 1:(ncol(timesetWordFrame) - 1)])
# interpolatedTS[jj, ] <- na.interpolation(as.numeric(timesetWordFrame[jj, 1:(ncol(timesetWordFrame) - 1)]), option ="linear")
# interpolatedTS[jj, ] <- na.interpolation(as.numeric(testVector), option ="linear")[2: length(testVector)]
interpolatedTS[jj, ] <- na.locf(as.numeric(testVector), option ="locf")[2: length(testVector)]
}
interpolatedTS$LinkId <- timesetWordFrame$LinkId
interpolatedTS$median <- medianFrame$median
# write out timesetWordFrame for analysis
# write.table(interpolatedTS, file = "~/R/_workingDirectory/bagOfDrugs/local_py/dataFiles/interpolatedTS_hba1c_10y_30increments_2004-2014_locf.csv", sep=",", row.names = FALSE)
# write.table(drugWordFrame, file = "~/R/GlCoSy/MLsource/drugWordFrame_withID_2005_2015.csv", sep=",")
# drugWordFrame <- read.csv("~/R/GlCoSy/MLsource/drugWordFrame.csv", stringsAsFactors = F, row.names = NULL); drugWordFrame$row.names <- NULL
# here do analysis to select rows (IDs) for later analysis
# mortality outcome at 2017-01-01
interpolatedTS_mortality <- merge(interpolatedTS, deathData, by.x = "LinkId", by.y= "LinkId")
# type 2 diabetes only
interpolatedTS_mortality <- subset(interpolatedTS_mortality, DiabetesMellitusType_Mapped == 'Type 2 Diabetes Mellitus')
# type 1 diabetes only
# interpolatedTS_mortality <- subset(interpolatedTS_mortality, DiabetesMellitusType_Mapped == 'Type 1 Diabetes Mellitus')
# remove those dead before end of FU
# analysis frame = those who are not dead, or those who have died after the end of the runin period. ie all individuals in analysis alive at the end of the runin period
interpolatedTS_mortality <- subset(interpolatedTS_mortality, isDead == 0 | (isDead == 1 & unix_deathDate > returnUnixDateTime(endRuninPeriod)) )
# remove those diagnosed after the end of the runin period
interpolatedTS_mortality <- subset(interpolatedTS_mortality, unix_diagnosisDate <= returnUnixDateTime(endRuninPeriod) )
# remove those diagnosed after the start of the runin period
interpolatedTS_mortality <- subset(interpolatedTS_mortality, unix_diagnosisDate <= returnUnixDateTime(startRuninPeriod) )
interpolatedTS_mortality$age_at_startOfFollowUp <- (returnUnixDateTime(startRuninPeriod) - returnUnixDateTime(as.character(interpolatedTS_mortality$BirthDate))) / (60*60*24*365.25)
# remove those diagnosed after the beginning of the runin period ie all in analysis have had DM throughout followup period
# drugWordFrame_mortality <- subset(drugWordFrame_mortality, unix_diagnosisDate <= returnUnixDateTime(startRuninPeriod) )
interpolatedTS_forAnalysis <- interpolatedTS_mortality[, 2:length(sequence)]
mean = apply(interpolatedTS_forAnalysis, 1, mean)
stdev = apply(interpolatedTS_forAnalysis, 1, sd)
cv = stdev / mean
values_plusID_forExport <- data.frame(interpolatedTS_forAnalysis, interpolatedTS_mortality$LinkId, interpolatedTS_mortality$unix_deathDate, interpolatedTS_mortality$age_at_startOfFollowUp, interpolatedTS_mortality$median, cv)
write.table(values_plusID_forExport, file = paste("~/R/_workingDirectory/nEqOneTrial/sourceData/interpolatedTS_", label, "_", unixRunInDurationYears,"y_", numberOfBins, "increments_", startRuninPeriod, "_to_", endRuninPeriod, "_locf.csv", sep=""), sep=",", row.names = FALSE)
y_vector <- interpolatedTS_mortality$isDead
y_vector_isType1 <- ifelse(interpolatedTS_mortality$DiabetesMellitusType_Mapped == 'Type 1 Diabetes Mellitus', 1, 0)
y_vector_deadAt_1_year <- ifelse(interpolatedTS_mortality$isDead == 1 & interpolatedTS_mortality$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (1 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_2_year <- ifelse(interpolatedTS_mortality$isDead == 1 & interpolatedTS_mortality$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (2 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_3_year <- ifelse(interpolatedTS_mortality$isDead == 1 & interpolatedTS_mortality$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (3 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_4_year <- ifelse(interpolatedTS_mortality$isDead == 1 & interpolatedTS_mortality$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (4 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_5_year <- ifelse(interpolatedTS_mortality$isDead == 1 & interpolatedTS_mortality$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (5 * 365.25 * 24 * 60 * 60)), 1, 0)
# write out sequence for analysis
# write.table(interpolatedTS_forAnalysis, file = "~/R/_workingDirectory/bagOfDrugs/local_py/hba1c_5y_30increments_2008-2013_locf_T1.csv", sep=",", row.names = FALSE)
# write out sequence for analysis with LinkId
# write.table(timesetWordFrame_mortality, file = "~/R/GlCoSy/MLsource/hba1c_5y_30increments_2008-2013_chained_y_rawWithId.csv", sep=",", row.names = FALSE)
# write out dep variable (y)
#write.table(y_vector, file = "~/R/GlCoSy/MLsource/hba1c_5y_mortality_y_10y_2002to2012_6mBins_10y_chained_y.csv", sep = ",", row.names = FALSE)
#write.table(y_vector_isType1, file = "~/R/GlCoSy/MLsource/isType1_for_hb1ac_10y_2002to2012_6mBins_10y_chained_y.csv", sep = ",", row.names = FALSE)
#
write.table(y_vector_deadAt_1_year, paste("~/R/_workingDirectory/nEqOneTrial/sourceData/", label, "_1y_mortality", unixRunInDurationYears,"y_", numberOfBins, "increments_", startRuninPeriod, "_to_", endRuninPeriod, "_locf.csv", sep=""), sep = ",", row.names = FALSE)
#write.table(y_vector_deadAt_2_year, file = "~/R/GlCoSy/MLsource/hba1c_2y_mortality_y_10y_2002to2012_6mBins_10y_chained_y.csv", sep = ",", row.names = FALSE)
write.table(y_vector_deadAt_3_year, paste("~/R/_workingDirectory/nEqOneTrial/sourceData/", label, "_3y_mortality", unixRunInDurationYears,"y_", numberOfBins, "increments_", startRuninPeriod, "_to_", endRuninPeriod, "_locf.csv", sep=""), sep = ",", row.names = FALSE)
#
write.table(y_vector_deadAt_4_year, paste("~/R/_workingDirectory/nEqOneTrial/sourceData/", label,"_4y_mortality", unixRunInDurationYears,"y_", numberOfBins, "increments_", startRuninPeriod, "_to_", endRuninPeriod, "_locf.csv", sep=""), sep = ",", row.names = FALSE)
}
##
# execution code
# load in raw data required
deathData <- read.csv("~/R/GlCoSy/SDsource/diagnosisDateDeathDate.txt", sep=",")
cleanHbA1cData <- read.csv("~/R/GlCoSy/SD_workingSource/hba1cDTclean.csv", sep=",", header = TRUE, row.names = NULL)
cleanSBPData <- read.csv("~/R/GlCoSy/SD_workingSource/SBPsetDTclean.csv", sep=",", header = TRUE, row.names = NULL)
cleanDBPData <- read.csv("~/R/GlCoSy/SD_workingSource/DBPsetDTclean.csv", sep=",", header = TRUE, row.names = NULL)
cleanBMIData <- read.csv("~/R/GlCoSy/SD_workingSource/BMIsetDTclean.csv", sep=",", header = TRUE, row.names = NULL)
##
# set variables
runInStartDate = "2010-01-01"
runInEndDate = "2015-01-01"
binLength_months = 2
generateImputedTimeSeriesData(cleanHbA1cData, deathData, runInStartDate, runInEndDate, binLength_months, "hba1c", 8)
generateImputedTimeSeriesData(cleanSBPData, deathData, runInStartDate, runInEndDate, binLength_months, "SBP", 8)
generateImputedTimeSeriesData(cleanDBPData, deathData, runInStartDate, runInEndDate, binLength_months, "DBP", 8)
generateImputedTimeSeriesData(cleanBMIData, deathData, runInStartDate, runInEndDate, binLength_months, "BMI_test", 6)
| /archived_201117/bagOfValues_asFunction.R | no_license | csainsbury/nEqOneTrial | R | false | false | 17,022 | r | # generate imputed time series values from sci diabetes extract data
# function to output values
# numericValueColumnIndex = the column that the numeric value of interest is in (hba1c, sbp etc)
generateImputedTimeSeriesData <- function(inputFrame, input_deathData, startTime, endTime, input_binLengthMonths, label, numericValueColumnIndex) {
# inputFrame = cleanBMIData; input_deathData = deathData; startTime = runInStartDate; endTime = runInEndDate, input_binLengthMonths = 2; label = 'BMI_test'; numericValueColumnIndex = 8
library(data.table)
library(imputeTS)
id_per_location <- function(ID) {
return(length(unique(ID)))
}
flagMove <- function(ID, charL) {
charLreport <- charL
charLnumeric <- as.numeric(factor(charL))
testFrame <- data.frame(charLreport, charLnumeric)
testFrame$flagMove <- 0
testFrame$flagMove[1:nrow(testFrame)-1] <- diff(testFrame$charLnumeric)
testFrame$nextL <- c("spacer")
testFrame$nextL[1:(nrow(testFrame)-1)] <- charLreport[2:length(charLreport)]
testFrame$charLreport <- as.character(factor(charL))
outputList <- list(testFrame$charLreport, testFrame$nextL, testFrame$flagMove)
return(outputList)
}
returnUnixDateTime<-function(date) {
returnVal<-as.numeric(as.POSIXct(date, format="%Y-%m-%d", tz="GMT"))
return(returnVal)
}
findSimilarDrugs <- function(inputFrame) {
# inputFrame <- interestSet
# inputFrame <- inputFrame[1:10000,]
inputFrame$DrugName.original <- inputFrame$DrugName
inputFrame$DrugNameNew <- inputFrame$DrugName
inputFrame <- subset(inputFrame, DrugNameNew != "Disposable")
inputFrame$DrugNameNew[grep("Glucose", inputFrame$DrugName, ignore.case = TRUE)] <- "Glucose"
inputFrame$DrugNameNew[grep("Glucogel", inputFrame$DrugName, ignore.case = TRUE)] <- "Glucose"
inputFrame$DrugNameNew[grep("Glucagen Hypokit", inputFrame$DrugName, ignore.case = TRUE)] <- "Glucagon"
inputFrame$DrugNameNew[grep("Optium Plus", inputFrame$DrugName, ignore.case = TRUE)] <- "Test Strips"
inputFrame$DrugNameNew[grep("Metformin", inputFrame$DrugName, ignore.case = TRUE)] <- "Metformin"
inputFrame$DrugNameNew[grep("Glucophage", inputFrame$DrugName, ignore.case = TRUE)] <- "Metformin"
inputFrame$DrugNameNew[grep("Gliclazide", inputFrame$DrugName, ignore.case = TRUE)] <- "Gliclazide"
inputFrame$DrugNameNew[grep("Diamicron", inputFrame$DrugName, ignore.case = TRUE)] <- "Gliclazide"
inputFrame$DrugNameNew[grep("Rosiglitazone", inputFrame$DrugName, ignore.case = TRUE)] <- "Rosiglitazone"
inputFrame$DrugNameNew[grep("Avandia", inputFrame$DrugName, ignore.case = TRUE)] <- "Rosiglitazone"
inputFrame$DrugNameNew[grep("Linagliptin", inputFrame$DrugName, ignore.case = TRUE)] <- "Linagliptin"
inputFrame$DrugNameNew[grep("Victoza", inputFrame$DrugName, ignore.case = TRUE)] <- "Liraglutide"
inputFrame$DrugNameNew[grep("Liraglutide", inputFrame$DrugName, ignore.case = TRUE)] <- "Liraglutide"
inputFrame$DrugNameNew[grep("Pioglitazone", inputFrame$DrugName, ignore.case = TRUE)] <- "Pioglitazone"
inputFrame$DrugNameNew[grep("Sitagliptin", inputFrame$DrugName, ignore.case = TRUE)] <- "Sitagliptin"
inputFrame$DrugNameNew[grep("Januvia", inputFrame$DrugName, ignore.case = TRUE)] <- "Sitagliptin"
inputFrame$DrugNameNew[grep("Dapagliflozin", inputFrame$DrugName, ignore.case = TRUE)] <- "Dapagliflozin"
inputFrame$DrugNameNew[grep("Humalog Mix25", inputFrame$DrugName, ignore.case = TRUE)] <- "Humalog Mix 25"
inputFrame$DrugNameNew[grep("Lantus", inputFrame$DrugName, ignore.case = TRUE)] <- "Insulin Glargine"
inputFrame$DrugNameNew[grep("Levemir", inputFrame$DrugName, ignore.case = TRUE)] <- "Insulin Detemir"
inputFrame$DrugNameNew[grep("Insulatard", inputFrame$DrugName, ignore.case = TRUE)] <- "Insulatard"
inputFrame$DrugNameNew[grep("Actrapid", inputFrame$DrugName, ignore.case = TRUE)] <- "Actrapid"
inputFrame$DrugNameNew[grep("Humalog 100units/ml solution", inputFrame$DrugName, ignore.case = TRUE)] <- "Humalog"
inputFrame$DrugNameNew[grep("Novorapid", inputFrame$DrugName, ignore.case = TRUE)] <- "Novorapid"
inputFrame$DrugNameNew[grep("Novomix 30", inputFrame$DrugName, ignore.case = TRUE)] <- "Novomix 30"
inputFrame$DrugNameNew[grep("Mixtard 30", inputFrame$DrugName, ignore.case = TRUE)] <- "Mixtard 30"
inputFrame$DrugNameNew[grep("Mixtard 20", inputFrame$DrugName, ignore.case = TRUE)] <- "Mixtard 20"
inputFrame$DrugNameNew[grep("Humulin M3", inputFrame$DrugName, ignore.case = TRUE)] <- "Humulin M3"
inputFrame$DrugNameNew[grep("Humalog Mix50", inputFrame$DrugName, ignore.case = TRUE)] <- "Humalog Mix50"
inputFrame$DrugNameNew[grep("strip", inputFrame$DrugName, ignore.case = TRUE)] <- "Test Strips"
inputFrame$DrugNameNew[grep("Bd-Microfine", inputFrame$DrugName, ignore.case = TRUE)] <- "Needle"
inputFrame$DrugNameNew[grep("Needle", inputFrame$DrugName, ignore.case = TRUE)] <- "Needle"
outputFrame <- inputFrame
outputFrame$DrugName.original <- NULL
outputFrame$DrugName <- outputFrame$DrugNameNew
outputFrame$DrugNameNew <- NULL
return(outputFrame)
}
# generate node and link files
cleanTSdata <- inputFrame
# cleanHbA1cData <- read.csv("~/R/GlCoSy/SD_workingSource/hba1cDTclean.csv", sep=",", header = TRUE, row.names = NULL)
cleanTSdata$timeSeriesDataPoint <- cleanTSdata[, numericValueColumnIndex]
timeSeriesData <- cleanTSdata
timeSeriesDataDT <- data.table(timeSeriesData)
# load and process mortality data
deathData <- input_deathData
# deathData <- read.csv("~/R/GlCoSy/SDsource/diagnosisDateDeathDate.txt", sep=",")
deathData$unix_deathDate <- returnUnixDateTime(deathData$DeathDate)
deathData$unix_deathDate[is.na(deathData$unix_deathDate)] <- 0
deathData$isDead <- ifelse(deathData$unix_deathDate > 0, 1, 0)
deathData$unix_diagnosisDate <- returnUnixDateTime(deathData$DateOfDiagnosisDiabetes_Date)
deathDataDT <- data.table(deathData)
# set runin period of interest
startRuninPeriod <- startTime
endRuninPeriod <- endTime
# startRuninPeriod <- '2010-01-01'
# endRuninPeriod <- '2015-01-01'
# testDeathDate <- '2013-01-01'
interestSetDT <- timeSeriesDataDT[dateplustime1 > returnUnixDateTime(startRuninPeriod) &
dateplustime1 < returnUnixDateTime(endRuninPeriod)]
interestSetDF <- data.frame(interestSetDT)
###############################
## start data manipulation
###############################
# scale time to 0 to 1 range
interestSetDT$dateplustime1.original <- interestSetDT$dateplustime1
interestSetDT$dateplustime1 <- (interestSetDT$dateplustime1 - min(interestSetDT$dateplustime1)) / (max(interestSetDT$dateplustime1) - min(interestSetDT$dateplustime1))
interestSetDT <- transform(interestSetDT,id=as.numeric(factor(LinkId)))
# set time bins
# bin length in months
binLengthMonths = input_binLengthMonths
binLengthSeconds = (60*60*24*(365.25 / 12)) * binLengthMonths
unixRunInDuration = returnUnixDateTime(endRuninPeriod) - returnUnixDateTime(startRuninPeriod)
unixRunInDurationYears = round(unixRunInDuration / (60*60*24*365.25), 0)
numberOfBins = round(unixRunInDuration / binLengthSeconds, 0)
sequence <- seq(0, 1 , (1/numberOfBins))
# generate bag of drugs frame
timesetWordFrame <- as.data.frame(matrix(nrow = length(unique(interestSetDT$LinkId)), ncol = (length(sequence)-1) ))
colnames(timesetWordFrame) <- c(1:(length(sequence)-1))
timesetWordFrame$LinkId <- 0
medianFrame <- as.data.frame(matrix(nrow = length(unique(interestSetDT$LinkId)), ncol = 1 ))
colnames(medianFrame) <- c("median")
# function to generate drugwords for each time interval
returnIntervals <- function(LinkId, timeSeriesDataPoint, dateplustime1, sequence, id) {
# timeSeriesDataPoint <- subset(interestSetDT, id == 2)$timeSeriesDataPoint; dateplustime1 <- subset(interestSetDT, id == 2)$dateplustime1; id = 2; LinkId <- subset(interestSetDT, id == 2)$LinkId
inputSet <- data.table(timeSeriesDataPoint, dateplustime1)
## add nil values to fill time slots without any drugs
nilFrame <- as.data.frame(matrix(nrow = length(sequence), ncol = ncol(inputSet)))
colnames(nilFrame) <- colnames(inputSet)
nilFrame$timeSeriesDataPoint <- 0
nilFrame$dateplustime1 <- sequence
outputSet <- rbind(nilFrame, inputSet)
dataBreaks <- split(outputSet$timeSeriesDataPoint, cut(outputSet$dateplustime1, breaks = sequence))
outputVector <- c(rep(0, length(sequence)- 1))
# returns either 0, or the median of all values in the time bin
for (kk in seq(1, length(dataBreaks), 1)) {
values <- dataBreaks[[kk]]
if (length(values) == 1) { outputVector[kk] = 0}
if (length(values) > 0) { outputVector[kk] = quantile(values[values > 0])[3]}
}
return(c(outputVector, LinkId[1]))
}
print(max(interestSetDT$id))
for (j in seq(1, max(interestSetDT$id), 1)) {
# for (j in seq(1 ,1000, )) {
if(j%%100 == 0) {print(j)}
injectionSet <- interestSetDT[id == j]
timesetWordFrame[j, ] <- returnIntervals(injectionSet$LinkId, injectionSet$timeSeriesDataPoint, injectionSet$dateplustime1, sequence, j)
medianFrame$median[j] <- quantile(injectionSet$hba1cNumeric)[3]
}
# write out timesetWordFrame for analysis
# write.table(timesetWordFrame, file = "~/R/_workingDirectory/bagOfDrugs/local_py/dataFiles/10y_30increments_2004-2014_hba1c_TS.csv", sep=",", row.names = FALSE)
# timesetWordFrame <- read.csv("~/R/_workingDirectory/bagOfDrugs/local_py/hba1c_TS.csv")
# last value carry forward imputation of values
timesetWordFrame[,1][is.na(timesetWordFrame[,1])] <- 0
interpolatedTS <- as.data.frame(matrix(nrow = nrow(timesetWordFrame), ncol = (ncol(timesetWordFrame) - 1)))
for (jj in seq(1, nrow(timesetWordFrame), 1)) {
if(jj%%1000 == 0) {print(jj)}
testVector <- c(0, timesetWordFrame[jj, 1:(ncol(timesetWordFrame) - 1)])
# interpolatedTS[jj, ] <- na.interpolation(as.numeric(timesetWordFrame[jj, 1:(ncol(timesetWordFrame) - 1)]), option ="linear")
# interpolatedTS[jj, ] <- na.interpolation(as.numeric(testVector), option ="linear")[2: length(testVector)]
interpolatedTS[jj, ] <- na.locf(as.numeric(testVector), option ="locf")[2: length(testVector)]
}
interpolatedTS$LinkId <- timesetWordFrame$LinkId
interpolatedTS$median <- medianFrame$median
# write out timesetWordFrame for analysis
# write.table(interpolatedTS, file = "~/R/_workingDirectory/bagOfDrugs/local_py/dataFiles/interpolatedTS_hba1c_10y_30increments_2004-2014_locf.csv", sep=",", row.names = FALSE)
# write.table(drugWordFrame, file = "~/R/GlCoSy/MLsource/drugWordFrame_withID_2005_2015.csv", sep=",")
# drugWordFrame <- read.csv("~/R/GlCoSy/MLsource/drugWordFrame.csv", stringsAsFactors = F, row.names = NULL); drugWordFrame$row.names <- NULL
# here do analysis to select rows (IDs) for later analysis
# mortality outcome at 2017-01-01
interpolatedTS_mortality <- merge(interpolatedTS, deathData, by.x = "LinkId", by.y= "LinkId")
# type 2 diabetes only
interpolatedTS_mortality <- subset(interpolatedTS_mortality, DiabetesMellitusType_Mapped == 'Type 2 Diabetes Mellitus')
# type 1 diabetes only
# interpolatedTS_mortality <- subset(interpolatedTS_mortality, DiabetesMellitusType_Mapped == 'Type 1 Diabetes Mellitus')
# remove those dead before end of FU
# analysis frame = those who are not dead, or those who have died after the end of the runin period. ie all individuals in analysis alive at the end of the runin period
interpolatedTS_mortality <- subset(interpolatedTS_mortality, isDead == 0 | (isDead == 1 & unix_deathDate > returnUnixDateTime(endRuninPeriod)) )
# remove those diagnosed after the end of the runin period
interpolatedTS_mortality <- subset(interpolatedTS_mortality, unix_diagnosisDate <= returnUnixDateTime(endRuninPeriod) )
# remove those diagnosed after the start of the runin period
interpolatedTS_mortality <- subset(interpolatedTS_mortality, unix_diagnosisDate <= returnUnixDateTime(startRuninPeriod) )
interpolatedTS_mortality$age_at_startOfFollowUp <- (returnUnixDateTime(startRuninPeriod) - returnUnixDateTime(as.character(interpolatedTS_mortality$BirthDate))) / (60*60*24*365.25)
# remove those diagnosed after the beginning of the runin period ie all in analysis have had DM throughout followup period
# drugWordFrame_mortality <- subset(drugWordFrame_mortality, unix_diagnosisDate <= returnUnixDateTime(startRuninPeriod) )
interpolatedTS_forAnalysis <- interpolatedTS_mortality[, 2:length(sequence)]
mean = apply(interpolatedTS_forAnalysis, 1, mean)
stdev = apply(interpolatedTS_forAnalysis, 1, sd)
cv = stdev / mean
values_plusID_forExport <- data.frame(interpolatedTS_forAnalysis, interpolatedTS_mortality$LinkId, interpolatedTS_mortality$unix_deathDate, interpolatedTS_mortality$age_at_startOfFollowUp, interpolatedTS_mortality$median, cv)
write.table(values_plusID_forExport, file = paste("~/R/_workingDirectory/nEqOneTrial/sourceData/interpolatedTS_", label, "_", unixRunInDurationYears,"y_", numberOfBins, "increments_", startRuninPeriod, "_to_", endRuninPeriod, "_locf.csv", sep=""), sep=",", row.names = FALSE)
y_vector <- interpolatedTS_mortality$isDead
y_vector_isType1 <- ifelse(interpolatedTS_mortality$DiabetesMellitusType_Mapped == 'Type 1 Diabetes Mellitus', 1, 0)
y_vector_deadAt_1_year <- ifelse(interpolatedTS_mortality$isDead == 1 & interpolatedTS_mortality$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (1 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_2_year <- ifelse(interpolatedTS_mortality$isDead == 1 & interpolatedTS_mortality$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (2 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_3_year <- ifelse(interpolatedTS_mortality$isDead == 1 & interpolatedTS_mortality$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (3 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_4_year <- ifelse(interpolatedTS_mortality$isDead == 1 & interpolatedTS_mortality$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (4 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_5_year <- ifelse(interpolatedTS_mortality$isDead == 1 & interpolatedTS_mortality$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (5 * 365.25 * 24 * 60 * 60)), 1, 0)
# write out sequence for analysis
# write.table(interpolatedTS_forAnalysis, file = "~/R/_workingDirectory/bagOfDrugs/local_py/hba1c_5y_30increments_2008-2013_locf_T1.csv", sep=",", row.names = FALSE)
# write out sequence for analysis with LinkId
# write.table(timesetWordFrame_mortality, file = "~/R/GlCoSy/MLsource/hba1c_5y_30increments_2008-2013_chained_y_rawWithId.csv", sep=",", row.names = FALSE)
# write out dep variable (y)
#write.table(y_vector, file = "~/R/GlCoSy/MLsource/hba1c_5y_mortality_y_10y_2002to2012_6mBins_10y_chained_y.csv", sep = ",", row.names = FALSE)
#write.table(y_vector_isType1, file = "~/R/GlCoSy/MLsource/isType1_for_hb1ac_10y_2002to2012_6mBins_10y_chained_y.csv", sep = ",", row.names = FALSE)
#
write.table(y_vector_deadAt_1_year, paste("~/R/_workingDirectory/nEqOneTrial/sourceData/", label, "_1y_mortality", unixRunInDurationYears,"y_", numberOfBins, "increments_", startRuninPeriod, "_to_", endRuninPeriod, "_locf.csv", sep=""), sep = ",", row.names = FALSE)
#write.table(y_vector_deadAt_2_year, file = "~/R/GlCoSy/MLsource/hba1c_2y_mortality_y_10y_2002to2012_6mBins_10y_chained_y.csv", sep = ",", row.names = FALSE)
write.table(y_vector_deadAt_3_year, paste("~/R/_workingDirectory/nEqOneTrial/sourceData/", label, "_3y_mortality", unixRunInDurationYears,"y_", numberOfBins, "increments_", startRuninPeriod, "_to_", endRuninPeriod, "_locf.csv", sep=""), sep = ",", row.names = FALSE)
#
write.table(y_vector_deadAt_4_year, paste("~/R/_workingDirectory/nEqOneTrial/sourceData/", label,"_4y_mortality", unixRunInDurationYears,"y_", numberOfBins, "increments_", startRuninPeriod, "_to_", endRuninPeriod, "_locf.csv", sep=""), sep = ",", row.names = FALSE)
}
##
# execution code
# load in raw data required
deathData <- read.csv("~/R/GlCoSy/SDsource/diagnosisDateDeathDate.txt", sep=",")
cleanHbA1cData <- read.csv("~/R/GlCoSy/SD_workingSource/hba1cDTclean.csv", sep=",", header = TRUE, row.names = NULL)
cleanSBPData <- read.csv("~/R/GlCoSy/SD_workingSource/SBPsetDTclean.csv", sep=",", header = TRUE, row.names = NULL)
cleanDBPData <- read.csv("~/R/GlCoSy/SD_workingSource/DBPsetDTclean.csv", sep=",", header = TRUE, row.names = NULL)
cleanBMIData <- read.csv("~/R/GlCoSy/SD_workingSource/BMIsetDTclean.csv", sep=",", header = TRUE, row.names = NULL)
##
# set variables
runInStartDate = "2010-01-01"
runInEndDate = "2015-01-01"
binLength_months = 2
generateImputedTimeSeriesData(cleanHbA1cData, deathData, runInStartDate, runInEndDate, binLength_months, "hba1c", 8)
generateImputedTimeSeriesData(cleanSBPData, deathData, runInStartDate, runInEndDate, binLength_months, "SBP", 8)
generateImputedTimeSeriesData(cleanDBPData, deathData, runInStartDate, runInEndDate, binLength_months, "DBP", 8)
generateImputedTimeSeriesData(cleanBMIData, deathData, runInStartDate, runInEndDate, binLength_months, "BMI_test", 6)
|
# install.packages("ggdendro")
# install.packages("reshape2")
# install.packages("grid")
# install.packages("dendextend")
library(dplyr)
projects <- data.frame(
#id.s = c(1:58),
pro_involved = c("+C", "1D", "+D", "+C", "+C", "+D", "+D", "+D", "1D", "1D", "+C", "+D", "+C", "+D", "+C", "+C", "+C", "+D", "+C", "+C", "+C", "1D", "+D", "+C", "1D", "+C", "+D", "+C", "+C", "+C", "+C", "+D", "+C", "+D", "+D", "+D", "+D", "+C", "+D", "+C", "+C", "+C", "+C", "+C", "+C", "+D", "+C", "+D", "+C", "+C", "+C", "+C", "+C", "+C", "+C", "+C", "+C", "+C"),
pro_focus = c("P&S", "OE&A", "I&DM", "I&DM", "C", "OE&A", "OE&A", "OE&A", "OE&A", "OE&A", "P&S", "P&S", "C", "OE&A", "P&S", "OE&A", "C", "I&DM", "C", "I&DM", "OE&A", "I&DM", "C", "P&S", "I&DM", "C", "OE&A", "OE&A", "I&DM", "I&DM", "OE&A", "I&DM", "I&DM", "OE&A", "OE&A", "OE&A", "OE&A", "C", "I&DM", "I&DM", "I&DM", "P&S", "I&DM", "P&S", "I&DM", "OE&A", "C", "P&S", "P&S", "OE&A", "OE&A", "C", "OE&A", "I&DM", "OE&A", "OE&A", "I&DM", "P&S"),
pro_complexity = c("H/H", "L/L", "L/H", "H/H", "H/H", "H/L", "H/L", "H/H", "H/H", "H/H", "L/H", "H/L", "L/H", "H/H", "L/H", "H/H", "L/H", "H/H", "H/H", "H/H", "L/H", "H/H", "H/L", "H/H", "H/L", "H/H", "H/H", "H/L", "H/L", "H/H", "L/L", "L/H", "H/L", "L/L", "H/H", "L/L", "H/H", "H/H", "L/H", "L/H", "H/H", "H/H", "H/H", "L/H", "H/H", "H/H", "H/L", "H/L", "H/H", "H/L", "L/L", "L/L", "H/L", "L/L", "H/L", "H/L", "L/H", "H/H"),
pro_impact = c("+R", "-C", "+R", "-C", "+R", "-C", "+R", "-C", "O", "O", "+R", "O", "+R", "O", "O", "-C", "+R", "O", "+R", "O", "-C", "O", "+R", "+R", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "-C", "-C", "-C", "+R", "O", "-C", "O", "+R", "O", "O", "O", "-C", "+R", "+R", "+R", "+R", "O", "-C", "-C", "-C", "-C", "-C", "O", "+R"),
pro_mode = c("explore", "explore", "explore", "exploit", "exploit", "exploit", "explore", "exploit", "exploit", "exploit", "explore", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "explore", "explore", "exploit", "exploit", "explore", "explore", "exploit", "explore", "exploit", "exploit", "exploit", "explore", "explore", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "explore", "exploit", "exploit", "explore", "exploit", "exploit", "exploit", "explore", "exploit", "explore", "explore", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "explore"),
pro_pathway = c("CX", "CX", "I&S", "I&S", "CX", "iterate", "new", "I&S", "I&S", "I&S", "CX", "CX", "CX", "I&S", "new", "new", "new", "I&S", "new", "I&S", "I&S", "I&S", "CX", "I&S", "I&S", "new", "I&S", "I&S", "new", "new", "I&S", "I&S", "I&S", "I&S", "I&S", "I&S", "I&S", "CX", "CX", "I&S", "CX", "CX", "new", "CX", "CX", "I&S", "CX", "CX", "CX", "I&S", "CX", "I&S", "new", "I&S", "I&S", "I&S", "I&S", "iterate"),
stringsAsFactors=TRUE
)
#----- Dissimilarity Matrix -----#
library(cluster)
# to perform different types of hierarchical clustering
# package functions used: daisy(), diana(), clusplot()
gower.dist <- daisy(projects[ ,1:6], metric = c("gower"))
# class(gower.dist)
## dissimilarity , dist
#------------ DIVISIVE CLUSTERING ------------#
divisive.clust <- diana(as.matrix(gower.dist),
diss = TRUE, keep.diss = TRUE)
plot(divisive.clust,
main = "Divisive")
#------------ AGGLOMERATIVE CLUSTERING ------------#
aggl.clust.c <- hclust(gower.dist, method = "complete")
plot(aggl.clust.c,
main = "Agglomerative, complete linkages")
# Cluster stats comes in a list form, it is more convenient to look at it as a table
# This code below will produce a dataframe with observations in columns and variables in row
# Not quite tidy data, but it's nicer to look at
library(fpc)
cstats.table <- function(dist, tree, k) {
clust.assess <- c("cluster.number","n","within.cluster.ss","average.within","average.between",
"wb.ratio","dunn2","avg.silwidth")
clust.size <- c("cluster.size")
stats.names <- c()
row.clust <- c()
output.stats <- matrix(ncol = k, nrow = length(clust.assess))
cluster.sizes <- matrix(ncol = k, nrow = k)
for(i in c(1:k)){
row.clust[i] <- paste("Cluster-", i, " size")
}
for(i in c(2:k)){
stats.names[i] <- paste("Test", i-1)
for(j in seq_along(clust.assess)){
output.stats[j, i] <- unlist(cluster.stats(d = dist, clustering = cutree(tree, k = i))[clust.assess])[j]
}
for(d in 1:k) {
cluster.sizes[d, i] <- unlist(cluster.stats(d = dist, clustering = cutree(tree, k = i))[clust.size])[d]
dim(cluster.sizes[d, i]) <- c(length(cluster.sizes[i]), 1)
cluster.sizes[d, i]
}
}
output.stats.df <- data.frame(output.stats)
cluster.sizes <- data.frame(cluster.sizes)
cluster.sizes[is.na(cluster.sizes)] <- 0
rows.all <- c(clust.assess, row.clust)
# rownames(output.stats.df) <- clust.assess
output <- rbind(output.stats.df, cluster.sizes)[ ,-1]
colnames(output) <- stats.names[2:k]
rownames(output) <- rows.all
is.num <- sapply(output, is.numeric)
output[is.num] <- lapply(output[is.num], round, 2)
output
}
# I am capping the maximum amout of clusters by 7
# but for sure, we can do more
# I want to choose a reasonable number, based on which I will be able to see basic differences between customer groups
stats.df.divisive <- cstats.table(gower.dist, divisive.clust, 10)
stats.df.divisive
stats.df.aggl <- cstats.table(gower.dist, aggl.clust.c, 10)
stats.df.aggl
# --------- Choosing the number of clusters ---------#
# Using "Elbow" and "Silhouette" methods to identify the best number of clusters
library(ggplot2)
# Elbow
# Divisive clustering
ggplot(data = data.frame(t(stats.df.divisive)), aes(x=cluster.number, y=within.cluster.ss)) + geom_point()+
geom_line()+
ggtitle("") +
labs(x = "Num.of clusters", y = "Within sum of squares") +
theme(plot.title = element_text(hjust = 0.5)) +
theme_bw(base_size=20)
# Silhouette
ggplot(data = data.frame(t(stats.df.divisive)), aes(x=cluster.number, y=avg.silwidth)) + geom_point()+
geom_line()+
ggtitle("Divisive clustering") +
labs(x = "Num.of clusters", y = "Average silhouette width") +
theme(plot.title = element_text(hjust = 0.5))
# Agglomorative clustering
# Elbow
ggplot(data = data.frame(t(stats.df.aggl)), aes(x=cluster.number, y=within.cluster.ss)) + geom_point()+
geom_line()+
ggtitle("") +
labs(x = "Num.of clusters", y = "Within sum of squares") +
theme(plot.title = element_text(hjust = 0.5)) +
theme_bw(base_size=20)
# Silhouette
ggplot(data = data.frame(t(stats.df.aggl)), aes(x=cluster.number, y=avg.silwidth)) + geom_point()+
geom_line()+
ggtitle("Agglomorative clustering") +
labs(x = "Num.of clusters", y = "Average silhouette width") +
theme(plot.title = element_text(hjust = 0.5))
# Finally, assigning the cluster number to the observation
clust.num <- cutree(divisive.clust, k = 3)
id.s = c(1:58)
projects.cl <- cbind(id.s, projects, clust.num)
clust.aggl.num <- cutree(aggl.clust.c, k = 3)
id.s = c(1:58)
projects.aggl.cl <- cbind(id.s, projects, clust.aggl.num)
#projects.cl <- cbind(projects, clust.num)
library("ggplot2")
library("reshape2")
library("purrr")
library("dplyr")
library("dendextend")
dendro <- as.dendrogram(aggl.clust.c)
dendro.col <- dendro %>%
set("branches_k_color", k = 3,
value = c("gold3", "darkcyan", "cyan3")) %>%
set("branches_lwd", 0.6) %>%
set("labels_colors",
value = c("darkslategray")) %>%
set("labels_cex", 0.5)
ggd1 <- as.ggdend(dendro.col)
ggplot(ggd1, theme = theme_minimal()) +
labs(x = "Num. observations", y = "Height", title = "Dendrogram (aggl), k = 3")
# Create a radial plot
ggplot(ggd1, labels = T) +
scale_y_reverse(expand = c(0.2, 0)) +
coord_polar(theta="x")
# cust.order <- order.dendrogram(dendro)
# projects.cl.ord <- projects.cl[cust.order, ]
# 1 variable per row
# factors have to be converted to characters in order not to be dropped
cust.long <- melt(data.frame(lapply(projects.cl, as.character), stringsAsFactors=FALSE),
id.vars = c("id.s", "clust.num"), factorsAsStrings=T)
cust.aggl.long <- melt(data.frame(lapply(projects.aggl.cl, as.character), stringsAsFactors=FALSE),
id.vars = c("id.s", "clust.aggl.num"), factorsAsStrings=T)
cust.long.q <- cust.long %>%
group_by(clust.num, variable, value) %>%
mutate(count = n_distinct(id.s)) %>%
distinct(clust.num, variable, value, count)
cust.aggl.long.q <- cust.aggl.long %>%
group_by(clust.aggl.num, variable, value) %>%
mutate(count = n_distinct(id.s)) %>%
distinct(clust.aggl.num, variable, value, count)
cust.long.p <- cust.long.q %>%
group_by(clust.num, variable) %>%
mutate(perc = count / sum(count)) %>%
arrange(clust.num)
cust.aggl.long.p <- cust.aggl.long.q %>%
group_by(clust.aggl.num, variable) %>%
mutate(perc = count / sum(count)) %>%
arrange(clust.aggl.num)
heatmap.p <- ggplot(cust.long.p, aes(x = clust.num, y = factor(value, levels = c("1D","+D","+C",
"C", "P&S", "I&DM", "OE&A",
"L/L","L/H", "H/L", "H/H",
"+R","-C","O",
"exploit","explore",
"I&S","CX","iterate","new"),
ordered = T))) +
geom_tile(aes(fill = perc), alpha = 0.85)+
labs(title = "Distribution of characteristics across clusters", x = "Cluster number", y = NULL) +
geom_hline(yintercept = 3.5) +
geom_hline(yintercept = 7.5) +
geom_hline(yintercept = 11.5) +
geom_hline(yintercept = 14.5) +
geom_hline(yintercept = 16.5) +
scale_fill_gradient2(low = "darkslategray1", mid = "yellow", high = "turquoise4")
heatmap.p
heatmap.aggl.p <- ggplot(cust.aggl.long.p, aes(x = clust.aggl.num, y = factor(value, levels = c("1D","+D","+C",
"C", "P&S", "I&DM", "OE&A",
"L/L","L/H", "H/L", "H/H",
"+R","-C","O",
"exploit","explore",
"I&S","CX","iterate","new"),
ordered = T))) +
geom_tile(aes(fill = perc), alpha = 0.85)+
labs(title = "Distribution of characteristics across (aggl) clusters", x = "Cluster number", y = NULL) +
geom_hline(yintercept = 3.5) +
geom_hline(yintercept = 7.5) +
geom_hline(yintercept = 11.5) +
geom_hline(yintercept = 14.5) +
geom_hline(yintercept = 16.5) +
scale_fill_gradient2(low = "darkslategray1", mid = "yellow", high = "turquoise4")
heatmap.aggl.p
| /app.R | no_license | tobias-heuser/classification-digitalisation-projects | R | false | false | 11,201 | r | # install.packages("ggdendro")
# install.packages("reshape2")
# install.packages("grid")
# install.packages("dendextend")
library(dplyr)
projects <- data.frame(
#id.s = c(1:58),
pro_involved = c("+C", "1D", "+D", "+C", "+C", "+D", "+D", "+D", "1D", "1D", "+C", "+D", "+C", "+D", "+C", "+C", "+C", "+D", "+C", "+C", "+C", "1D", "+D", "+C", "1D", "+C", "+D", "+C", "+C", "+C", "+C", "+D", "+C", "+D", "+D", "+D", "+D", "+C", "+D", "+C", "+C", "+C", "+C", "+C", "+C", "+D", "+C", "+D", "+C", "+C", "+C", "+C", "+C", "+C", "+C", "+C", "+C", "+C"),
pro_focus = c("P&S", "OE&A", "I&DM", "I&DM", "C", "OE&A", "OE&A", "OE&A", "OE&A", "OE&A", "P&S", "P&S", "C", "OE&A", "P&S", "OE&A", "C", "I&DM", "C", "I&DM", "OE&A", "I&DM", "C", "P&S", "I&DM", "C", "OE&A", "OE&A", "I&DM", "I&DM", "OE&A", "I&DM", "I&DM", "OE&A", "OE&A", "OE&A", "OE&A", "C", "I&DM", "I&DM", "I&DM", "P&S", "I&DM", "P&S", "I&DM", "OE&A", "C", "P&S", "P&S", "OE&A", "OE&A", "C", "OE&A", "I&DM", "OE&A", "OE&A", "I&DM", "P&S"),
pro_complexity = c("H/H", "L/L", "L/H", "H/H", "H/H", "H/L", "H/L", "H/H", "H/H", "H/H", "L/H", "H/L", "L/H", "H/H", "L/H", "H/H", "L/H", "H/H", "H/H", "H/H", "L/H", "H/H", "H/L", "H/H", "H/L", "H/H", "H/H", "H/L", "H/L", "H/H", "L/L", "L/H", "H/L", "L/L", "H/H", "L/L", "H/H", "H/H", "L/H", "L/H", "H/H", "H/H", "H/H", "L/H", "H/H", "H/H", "H/L", "H/L", "H/H", "H/L", "L/L", "L/L", "H/L", "L/L", "H/L", "H/L", "L/H", "H/H"),
pro_impact = c("+R", "-C", "+R", "-C", "+R", "-C", "+R", "-C", "O", "O", "+R", "O", "+R", "O", "O", "-C", "+R", "O", "+R", "O", "-C", "O", "+R", "+R", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "-C", "-C", "-C", "+R", "O", "-C", "O", "+R", "O", "O", "O", "-C", "+R", "+R", "+R", "+R", "O", "-C", "-C", "-C", "-C", "-C", "O", "+R"),
pro_mode = c("explore", "explore", "explore", "exploit", "exploit", "exploit", "explore", "exploit", "exploit", "exploit", "explore", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "explore", "explore", "exploit", "exploit", "explore", "explore", "exploit", "explore", "exploit", "exploit", "exploit", "explore", "explore", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "explore", "exploit", "exploit", "explore", "exploit", "exploit", "exploit", "explore", "exploit", "explore", "explore", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "exploit", "explore"),
pro_pathway = c("CX", "CX", "I&S", "I&S", "CX", "iterate", "new", "I&S", "I&S", "I&S", "CX", "CX", "CX", "I&S", "new", "new", "new", "I&S", "new", "I&S", "I&S", "I&S", "CX", "I&S", "I&S", "new", "I&S", "I&S", "new", "new", "I&S", "I&S", "I&S", "I&S", "I&S", "I&S", "I&S", "CX", "CX", "I&S", "CX", "CX", "new", "CX", "CX", "I&S", "CX", "CX", "CX", "I&S", "CX", "I&S", "new", "I&S", "I&S", "I&S", "I&S", "iterate"),
stringsAsFactors=TRUE
)
#----- Dissimilarity Matrix -----#
library(cluster)
# to perform different types of hierarchical clustering
# package functions used: daisy(), diana(), clusplot()
gower.dist <- daisy(projects[ ,1:6], metric = c("gower"))
# class(gower.dist)
## dissimilarity , dist
#------------ DIVISIVE CLUSTERING ------------#
divisive.clust <- diana(as.matrix(gower.dist),
diss = TRUE, keep.diss = TRUE)
plot(divisive.clust,
main = "Divisive")
#------------ AGGLOMERATIVE CLUSTERING ------------#
aggl.clust.c <- hclust(gower.dist, method = "complete")
plot(aggl.clust.c,
main = "Agglomerative, complete linkages")
# Cluster stats comes in a list form, it is more convenient to look at it as a table
# This code below will produce a dataframe with observations in columns and variables in row
# Not quite tidy data, but it's nicer to look at
library(fpc)
cstats.table <- function(dist, tree, k) {
clust.assess <- c("cluster.number","n","within.cluster.ss","average.within","average.between",
"wb.ratio","dunn2","avg.silwidth")
clust.size <- c("cluster.size")
stats.names <- c()
row.clust <- c()
output.stats <- matrix(ncol = k, nrow = length(clust.assess))
cluster.sizes <- matrix(ncol = k, nrow = k)
for(i in c(1:k)){
row.clust[i] <- paste("Cluster-", i, " size")
}
for(i in c(2:k)){
stats.names[i] <- paste("Test", i-1)
for(j in seq_along(clust.assess)){
output.stats[j, i] <- unlist(cluster.stats(d = dist, clustering = cutree(tree, k = i))[clust.assess])[j]
}
for(d in 1:k) {
cluster.sizes[d, i] <- unlist(cluster.stats(d = dist, clustering = cutree(tree, k = i))[clust.size])[d]
dim(cluster.sizes[d, i]) <- c(length(cluster.sizes[i]), 1)
cluster.sizes[d, i]
}
}
output.stats.df <- data.frame(output.stats)
cluster.sizes <- data.frame(cluster.sizes)
cluster.sizes[is.na(cluster.sizes)] <- 0
rows.all <- c(clust.assess, row.clust)
# rownames(output.stats.df) <- clust.assess
output <- rbind(output.stats.df, cluster.sizes)[ ,-1]
colnames(output) <- stats.names[2:k]
rownames(output) <- rows.all
is.num <- sapply(output, is.numeric)
output[is.num] <- lapply(output[is.num], round, 2)
output
}
# I am capping the maximum amout of clusters by 7
# but for sure, we can do more
# I want to choose a reasonable number, based on which I will be able to see basic differences between customer groups
stats.df.divisive <- cstats.table(gower.dist, divisive.clust, 10)
stats.df.divisive
stats.df.aggl <- cstats.table(gower.dist, aggl.clust.c, 10)
stats.df.aggl
# --------- Choosing the number of clusters ---------#
# Using "Elbow" and "Silhouette" methods to identify the best number of clusters
library(ggplot2)
# Elbow
# Divisive clustering
ggplot(data = data.frame(t(stats.df.divisive)), aes(x=cluster.number, y=within.cluster.ss)) + geom_point()+
geom_line()+
ggtitle("") +
labs(x = "Num.of clusters", y = "Within sum of squares") +
theme(plot.title = element_text(hjust = 0.5)) +
theme_bw(base_size=20)
# Silhouette
ggplot(data = data.frame(t(stats.df.divisive)), aes(x=cluster.number, y=avg.silwidth)) + geom_point()+
geom_line()+
ggtitle("Divisive clustering") +
labs(x = "Num.of clusters", y = "Average silhouette width") +
theme(plot.title = element_text(hjust = 0.5))
# Agglomorative clustering
# Elbow
ggplot(data = data.frame(t(stats.df.aggl)), aes(x=cluster.number, y=within.cluster.ss)) + geom_point()+
geom_line()+
ggtitle("") +
labs(x = "Num.of clusters", y = "Within sum of squares") +
theme(plot.title = element_text(hjust = 0.5)) +
theme_bw(base_size=20)
# Silhouette
ggplot(data = data.frame(t(stats.df.aggl)), aes(x=cluster.number, y=avg.silwidth)) + geom_point()+
geom_line()+
ggtitle("Agglomorative clustering") +
labs(x = "Num.of clusters", y = "Average silhouette width") +
theme(plot.title = element_text(hjust = 0.5))
# Finally, assigning the cluster number to the observation
clust.num <- cutree(divisive.clust, k = 3)
id.s = c(1:58)
projects.cl <- cbind(id.s, projects, clust.num)
clust.aggl.num <- cutree(aggl.clust.c, k = 3)
id.s = c(1:58)
projects.aggl.cl <- cbind(id.s, projects, clust.aggl.num)
#projects.cl <- cbind(projects, clust.num)
library("ggplot2")
library("reshape2")
library("purrr")
library("dplyr")
library("dendextend")
dendro <- as.dendrogram(aggl.clust.c)
dendro.col <- dendro %>%
set("branches_k_color", k = 3,
value = c("gold3", "darkcyan", "cyan3")) %>%
set("branches_lwd", 0.6) %>%
set("labels_colors",
value = c("darkslategray")) %>%
set("labels_cex", 0.5)
ggd1 <- as.ggdend(dendro.col)
ggplot(ggd1, theme = theme_minimal()) +
labs(x = "Num. observations", y = "Height", title = "Dendrogram (aggl), k = 3")
# Create a radial plot
ggplot(ggd1, labels = T) +
scale_y_reverse(expand = c(0.2, 0)) +
coord_polar(theta="x")
# cust.order <- order.dendrogram(dendro)
# projects.cl.ord <- projects.cl[cust.order, ]
# 1 variable per row
# factors have to be converted to characters in order not to be dropped
cust.long <- melt(data.frame(lapply(projects.cl, as.character), stringsAsFactors=FALSE),
id.vars = c("id.s", "clust.num"), factorsAsStrings=T)
cust.aggl.long <- melt(data.frame(lapply(projects.aggl.cl, as.character), stringsAsFactors=FALSE),
id.vars = c("id.s", "clust.aggl.num"), factorsAsStrings=T)
cust.long.q <- cust.long %>%
group_by(clust.num, variable, value) %>%
mutate(count = n_distinct(id.s)) %>%
distinct(clust.num, variable, value, count)
cust.aggl.long.q <- cust.aggl.long %>%
group_by(clust.aggl.num, variable, value) %>%
mutate(count = n_distinct(id.s)) %>%
distinct(clust.aggl.num, variable, value, count)
cust.long.p <- cust.long.q %>%
group_by(clust.num, variable) %>%
mutate(perc = count / sum(count)) %>%
arrange(clust.num)
cust.aggl.long.p <- cust.aggl.long.q %>%
group_by(clust.aggl.num, variable) %>%
mutate(perc = count / sum(count)) %>%
arrange(clust.aggl.num)
heatmap.p <- ggplot(cust.long.p, aes(x = clust.num, y = factor(value, levels = c("1D","+D","+C",
"C", "P&S", "I&DM", "OE&A",
"L/L","L/H", "H/L", "H/H",
"+R","-C","O",
"exploit","explore",
"I&S","CX","iterate","new"),
ordered = T))) +
geom_tile(aes(fill = perc), alpha = 0.85)+
labs(title = "Distribution of characteristics across clusters", x = "Cluster number", y = NULL) +
geom_hline(yintercept = 3.5) +
geom_hline(yintercept = 7.5) +
geom_hline(yintercept = 11.5) +
geom_hline(yintercept = 14.5) +
geom_hline(yintercept = 16.5) +
scale_fill_gradient2(low = "darkslategray1", mid = "yellow", high = "turquoise4")
heatmap.p
heatmap.aggl.p <- ggplot(cust.aggl.long.p, aes(x = clust.aggl.num, y = factor(value, levels = c("1D","+D","+C",
"C", "P&S", "I&DM", "OE&A",
"L/L","L/H", "H/L", "H/H",
"+R","-C","O",
"exploit","explore",
"I&S","CX","iterate","new"),
ordered = T))) +
geom_tile(aes(fill = perc), alpha = 0.85)+
labs(title = "Distribution of characteristics across (aggl) clusters", x = "Cluster number", y = NULL) +
geom_hline(yintercept = 3.5) +
geom_hline(yintercept = 7.5) +
geom_hline(yintercept = 11.5) +
geom_hline(yintercept = 14.5) +
geom_hline(yintercept = 16.5) +
scale_fill_gradient2(low = "darkslategray1", mid = "yellow", high = "turquoise4")
heatmap.aggl.p
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary.R
\name{sampleGenotypesHWE}
\alias{sampleGenotypesHWE}
\title{Sample genotypes}
\usage{
sampleGenotypesHWE(numberOfContributors, populationLadder,
contributorNames = NULL)
}
\arguments{
\item{numberOfContributors}{The number of profiles which should be sampled.}
\item{populationLadder}{A \link{tibble} containing the markers, regions, and allele frequencies of the population.}
\item{contributorNames}{A vector of names. The length should be the number of profiles.}
}
\value{
A list of tibbles, one for each sampled profile.
}
\description{
Sample genotypes, given the number of profiles and a population, assuming the population was in HWE.
}
| /man/sampleGenotypesHWE.Rd | permissive | svilsen/MPSMixtures | R | false | true | 738 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary.R
\name{sampleGenotypesHWE}
\alias{sampleGenotypesHWE}
\title{Sample genotypes}
\usage{
sampleGenotypesHWE(numberOfContributors, populationLadder,
contributorNames = NULL)
}
\arguments{
\item{numberOfContributors}{The number of profiles which should be sampled.}
\item{populationLadder}{A \link{tibble} containing the markers, regions, and allele frequencies of the population.}
\item{contributorNames}{A vector of names. The length should be the number of profiles.}
}
\value{
A list of tibbles, one for each sampled profile.
}
\description{
Sample genotypes, given the number of profiles and a population, assuming the population was in HWE.
}
|
talk_once <- function(.f, msg = "") {
talk <- TRUE
function(...) {
if (talk) {
talk <<- FALSE
cat_red_bullet(msg)
}
.f(...)
}
}
#' Create a Dockerfile for your App
#'
#' Build a container containing your Shiny App. `add_dockerfile()` and
#' `add_dockerfile_with_renv()` and `add_dockerfile_with_renv()` creates
#' a generic Dockerfile, while `add_dockerfile_shinyproxy()`,
#' `add_dockerfile_with_renv_shinyproxy()` , `add_dockerfile_with_renv_shinyproxy()` and
#' `add_dockerfile_heroku()` creates platform specific Dockerfile.
#'
#' @inheritParams add_module
#'
#' @param path path to the DESCRIPTION file to use as an input.
#' @param output name of the Dockerfile output.
#' @param from The FROM of the Dockerfile. Default is
#'
#' FROM rocker/verse
#'
#' without renv.lock file passed
#' `R.Version()$major`.`R.Version()$minor` is used as tag
#'
#' @param as The AS of the Dockerfile. Default it NULL.
#' @param port The `options('shiny.port')` on which to run the App.
#' Default is 80.
#' @param host The `options('shiny.host')` on which to run the App.
#' Default is 0.0.0.0.
#' @param sysreqs boolean. If TRUE, RUN statements to install packages
#' system requirements will be included in the Dockerfile.
#' @param repos character. The URL(s) of the repositories to use for `options("repos")`.
#' @param expand boolean. If `TRUE` each system requirement will have its own `RUN` line.
#' @param open boolean. Should the Dockerfile/README/README be open after creation? Default is `TRUE`.
#' @param build_golem_from_source boolean. If `TRUE` no tar.gz is created and
#' the Dockerfile directly mount the source folder.
#' @param update_tar_gz boolean. If `TRUE` and `build_golem_from_source` is also `TRUE`,
#' an updated tar.gz is created.
#' @param extra_sysreqs character vector. Extra debian system requirements.
#'
#' @export
#' @rdname dockerfiles
#'
#'
#' @examples
#' \donttest{
#' # Add a standard Dockerfile
#' if (interactive() & requireNamespace("dockerfiler")) {
#' add_dockerfile()
#' }
#' # Crete a 'deploy' folder containing everything needed to deploy
#' # the golem using docker based on {renv}
#' if (interactive() & requireNamespace("dockerfiler")) {
#' add_dockerfile_with_renv(
#' # lockfile = "renv.lock", # uncomment to use existing renv.lock file
#' output_dir = "deploy"
#' )
#' }
#' # Add a Dockerfile for ShinyProxy
#' if (interactive() & requireNamespace("dockerfiler")) {
#' add_dockerfile_shinyproxy()
#' }
#'
#' # Crete a 'deploy' folder containing everything needed to deploy
#' # the golem with ShinyProxy using docker based on {renv}
#' if (interactive() & requireNamespace("dockerfiler")) {
#' add_dockerfile_with_renv(
#' # lockfile = "renv.lock",# uncomment to use existing renv.lock file
#' output_dir = "deploy"
#' )
#' }
#'
#' # Add a Dockerfile for Heroku
#' if (interactive() & requireNamespace("dockerfiler")) {
#' add_dockerfile_heroku()
#' }
#' }
#' @return The `{dockerfiler}` object, invisibly.
add_dockerfile <- function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
port = 80,
host = "0.0.0.0",
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
add_dockerfile_(
path = path,
output = output,
pkg = pkg,
from = from,
as = as,
port = port,
host = host,
sysreqs = sysreqs,
repos = repos,
expand = expand,
open = open,
update_tar_gz = update_tar_gz,
build_golem_from_source = build_golem_from_source,
extra_sysreqs = extra_sysreqs
)
}
add_dockerfile_ <- talk_once(
function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
port = 80,
host = "0.0.0.0",
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
where <- fs_path(pkg, output)
usethis_use_build_ignore(
basename(where)
)
dock <- dockerfiler_dock_from_desc(
path = path,
FROM = from,
AS = as,
sysreqs = sysreqs,
repos = repos,
expand = expand,
build_from_source = build_golem_from_source,
update_tar_gz = update_tar_gz,
extra_sysreqs = extra_sysreqs
)
dock$EXPOSE(port)
dock$CMD(
sprintf(
"R -e \"options('shiny.port'=%s,shiny.host='%s');library(%3$s);%3$s::run_app()\"",
port,
host,
read.dcf(path)[1]
)
)
dock$write(output)
if (open) {
rstudioapi_navigateToFile(output)
}
alert_build(
path = path,
output = output,
build_golem_from_source = build_golem_from_source
)
return(invisible(dock))
},
"golem::add_dockerfile() is not recommended anymore.\nPlease use golem::add_dockerfile_with_renv() instead."
)
#' @export
#' @rdname dockerfiles
add_dockerfile_shinyproxy <- function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
add_dockerfile_shinyproxy_(
path = path,
output = output,
pkg = pkg,
from = from,
as = as,
sysreqs = sysreqs,
repos = repos,
expand = expand,
open = open,
update_tar_gz = update_tar_gz,
build_golem_from_source = build_golem_from_source,
extra_sysreqs = extra_sysreqs
)
}
add_dockerfile_shinyproxy_ <- talk_once(
function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
where <- fs_path(pkg, output)
usethis_use_build_ignore(output)
dock <- dockerfiler_dock_from_desc(
path = path,
FROM = from,
AS = as,
sysreqs = sysreqs,
repos = repos,
expand = expand,
build_from_source = build_golem_from_source,
update_tar_gz = update_tar_gz,
extra_sysreqs = extra_sysreqs
)
dock$EXPOSE(3838)
dock$CMD(sprintf(
" [\"R\", \"-e\", \"options('shiny.port'=3838,shiny.host='0.0.0.0');library(%1$s);%1$s::run_app()\"]",
read.dcf(path)[1]
))
dock$write(output)
if (open) {
rstudioapi_navigateToFile(output)
}
alert_build(
path,
output,
build_golem_from_source = build_golem_from_source
)
return(invisible(dock))
},
"golem::add_dockerfile_shinyproxy() is not recommended anymore.\nPlease use golem::add_dockerfile_with_renv_shinyproxy() instead."
)
#' @export
#' @rdname dockerfiles
add_dockerfile_heroku <- function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
add_dockerfile_heroku_(
path = path,
output = output,
pkg = pkg,
from = from,
as = as,
sysreqs = sysreqs,
repos = repos,
expand = expand,
open = open,
update_tar_gz = update_tar_gz,
build_golem_from_source = build_golem_from_source,
extra_sysreqs = extra_sysreqs
)
}
add_dockerfile_heroku_ <- talk_once(
function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
where <- fs_path(pkg, output)
usethis_use_build_ignore(output)
dock <- dockerfiler_dock_from_desc(
path = path,
FROM = from,
AS = as,
sysreqs = sysreqs,
repos = repos,
expand = expand,
build_from_source = build_golem_from_source,
update_tar_gz = update_tar_gz,
extra_sysreqs = extra_sysreqs
)
dock$CMD(
sprintf(
"R -e \"options('shiny.port'=$PORT,shiny.host='0.0.0.0');library(%1$s);%1$s::run_app()\"",
read.dcf(path)[1]
)
)
dock$write(output)
alert_build(
path = path,
output = output,
build_golem_from_source = build_golem_from_source
)
apps_h <- gsub(
"\\.",
"-",
sprintf(
"%s-%s",
read.dcf(path)[1],
read.dcf(path)[1, ][["Version"]]
)
)
cli_cat_rule("From your command line, run:")
cli_cat_line("heroku container:login")
cli_cat_line(
sprintf("heroku create %s", apps_h)
)
cli_cat_line(
sprintf("heroku container:push web --app %s", apps_h)
)
cli_cat_line(
sprintf("heroku container:release web --app %s", apps_h)
)
cli_cat_line(
sprintf("heroku open --app %s", apps_h)
)
cat_red_bullet("Be sure to have the heroku CLI installed.")
cat_red_bullet(
sprintf("You can replace %s with another app name.", apps_h)
)
if (open) {
rstudioapi_navigateToFile(output)
}
usethis_use_build_ignore(files = output)
return(invisible(dock))
},
"
golem::add_dockerfile_heroku() is not recommended anymore.\nPlease use golem::add_dockerfile_with_renv_heroku() instead.
"
)
alert_build <- function(
path,
output,
build_golem_from_source) {
cat_created(output, "Dockerfile")
if (!build_golem_from_source) {
cat_red_bullet(
sprintf(
"Be sure to keep your %s_%s.tar.gz file (generated using `pkgbuild::build(vignettes = FALSE)` ) in the same folder as the %s file generated",
read.dcf(path)[1],
read.dcf(path)[1, ][["Version"]],
basename(output)
)
)
}
}
| /R/add_dockerfiles.R | permissive | ThinkR-open/golem | R | false | false | 10,906 | r | talk_once <- function(.f, msg = "") {
talk <- TRUE
function(...) {
if (talk) {
talk <<- FALSE
cat_red_bullet(msg)
}
.f(...)
}
}
#' Create a Dockerfile for your App
#'
#' Build a container containing your Shiny App. `add_dockerfile()` and
#' `add_dockerfile_with_renv()` and `add_dockerfile_with_renv()` creates
#' a generic Dockerfile, while `add_dockerfile_shinyproxy()`,
#' `add_dockerfile_with_renv_shinyproxy()` , `add_dockerfile_with_renv_shinyproxy()` and
#' `add_dockerfile_heroku()` creates platform specific Dockerfile.
#'
#' @inheritParams add_module
#'
#' @param path path to the DESCRIPTION file to use as an input.
#' @param output name of the Dockerfile output.
#' @param from The FROM of the Dockerfile. Default is
#'
#' FROM rocker/verse
#'
#' without renv.lock file passed
#' `R.Version()$major`.`R.Version()$minor` is used as tag
#'
#' @param as The AS of the Dockerfile. Default it NULL.
#' @param port The `options('shiny.port')` on which to run the App.
#' Default is 80.
#' @param host The `options('shiny.host')` on which to run the App.
#' Default is 0.0.0.0.
#' @param sysreqs boolean. If TRUE, RUN statements to install packages
#' system requirements will be included in the Dockerfile.
#' @param repos character. The URL(s) of the repositories to use for `options("repos")`.
#' @param expand boolean. If `TRUE` each system requirement will have its own `RUN` line.
#' @param open boolean. Should the Dockerfile/README/README be open after creation? Default is `TRUE`.
#' @param build_golem_from_source boolean. If `TRUE` no tar.gz is created and
#' the Dockerfile directly mount the source folder.
#' @param update_tar_gz boolean. If `TRUE` and `build_golem_from_source` is also `TRUE`,
#' an updated tar.gz is created.
#' @param extra_sysreqs character vector. Extra debian system requirements.
#'
#' @export
#' @rdname dockerfiles
#'
#'
#' @examples
#' \donttest{
#' # Add a standard Dockerfile
#' if (interactive() & requireNamespace("dockerfiler")) {
#' add_dockerfile()
#' }
#' # Crete a 'deploy' folder containing everything needed to deploy
#' # the golem using docker based on {renv}
#' if (interactive() & requireNamespace("dockerfiler")) {
#' add_dockerfile_with_renv(
#' # lockfile = "renv.lock", # uncomment to use existing renv.lock file
#' output_dir = "deploy"
#' )
#' }
#' # Add a Dockerfile for ShinyProxy
#' if (interactive() & requireNamespace("dockerfiler")) {
#' add_dockerfile_shinyproxy()
#' }
#'
#' # Crete a 'deploy' folder containing everything needed to deploy
#' # the golem with ShinyProxy using docker based on {renv}
#' if (interactive() & requireNamespace("dockerfiler")) {
#' add_dockerfile_with_renv(
#' # lockfile = "renv.lock",# uncomment to use existing renv.lock file
#' output_dir = "deploy"
#' )
#' }
#'
#' # Add a Dockerfile for Heroku
#' if (interactive() & requireNamespace("dockerfiler")) {
#' add_dockerfile_heroku()
#' }
#' }
#' @return The `{dockerfiler}` object, invisibly.
add_dockerfile <- function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
port = 80,
host = "0.0.0.0",
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
add_dockerfile_(
path = path,
output = output,
pkg = pkg,
from = from,
as = as,
port = port,
host = host,
sysreqs = sysreqs,
repos = repos,
expand = expand,
open = open,
update_tar_gz = update_tar_gz,
build_golem_from_source = build_golem_from_source,
extra_sysreqs = extra_sysreqs
)
}
add_dockerfile_ <- talk_once(
function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
port = 80,
host = "0.0.0.0",
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
where <- fs_path(pkg, output)
usethis_use_build_ignore(
basename(where)
)
dock <- dockerfiler_dock_from_desc(
path = path,
FROM = from,
AS = as,
sysreqs = sysreqs,
repos = repos,
expand = expand,
build_from_source = build_golem_from_source,
update_tar_gz = update_tar_gz,
extra_sysreqs = extra_sysreqs
)
dock$EXPOSE(port)
dock$CMD(
sprintf(
"R -e \"options('shiny.port'=%s,shiny.host='%s');library(%3$s);%3$s::run_app()\"",
port,
host,
read.dcf(path)[1]
)
)
dock$write(output)
if (open) {
rstudioapi_navigateToFile(output)
}
alert_build(
path = path,
output = output,
build_golem_from_source = build_golem_from_source
)
return(invisible(dock))
},
"golem::add_dockerfile() is not recommended anymore.\nPlease use golem::add_dockerfile_with_renv() instead."
)
#' @export
#' @rdname dockerfiles
add_dockerfile_shinyproxy <- function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
add_dockerfile_shinyproxy_(
path = path,
output = output,
pkg = pkg,
from = from,
as = as,
sysreqs = sysreqs,
repos = repos,
expand = expand,
open = open,
update_tar_gz = update_tar_gz,
build_golem_from_source = build_golem_from_source,
extra_sysreqs = extra_sysreqs
)
}
add_dockerfile_shinyproxy_ <- talk_once(
function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
where <- fs_path(pkg, output)
usethis_use_build_ignore(output)
dock <- dockerfiler_dock_from_desc(
path = path,
FROM = from,
AS = as,
sysreqs = sysreqs,
repos = repos,
expand = expand,
build_from_source = build_golem_from_source,
update_tar_gz = update_tar_gz,
extra_sysreqs = extra_sysreqs
)
dock$EXPOSE(3838)
dock$CMD(sprintf(
" [\"R\", \"-e\", \"options('shiny.port'=3838,shiny.host='0.0.0.0');library(%1$s);%1$s::run_app()\"]",
read.dcf(path)[1]
))
dock$write(output)
if (open) {
rstudioapi_navigateToFile(output)
}
alert_build(
path,
output,
build_golem_from_source = build_golem_from_source
)
return(invisible(dock))
},
"golem::add_dockerfile_shinyproxy() is not recommended anymore.\nPlease use golem::add_dockerfile_with_renv_shinyproxy() instead."
)
#' @export
#' @rdname dockerfiles
add_dockerfile_heroku <- function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
add_dockerfile_heroku_(
path = path,
output = output,
pkg = pkg,
from = from,
as = as,
sysreqs = sysreqs,
repos = repos,
expand = expand,
open = open,
update_tar_gz = update_tar_gz,
build_golem_from_source = build_golem_from_source,
extra_sysreqs = extra_sysreqs
)
}
add_dockerfile_heroku_ <- talk_once(
function(
path = "DESCRIPTION",
output = "Dockerfile",
pkg = get_golem_wd(),
from = paste0(
"rocker/verse:",
R.Version()$major,
".",
R.Version()$minor
),
as = NULL,
sysreqs = TRUE,
repos = c(CRAN = "https://cran.rstudio.com/"),
expand = FALSE,
open = TRUE,
update_tar_gz = TRUE,
build_golem_from_source = TRUE,
extra_sysreqs = NULL) {
where <- fs_path(pkg, output)
usethis_use_build_ignore(output)
dock <- dockerfiler_dock_from_desc(
path = path,
FROM = from,
AS = as,
sysreqs = sysreqs,
repos = repos,
expand = expand,
build_from_source = build_golem_from_source,
update_tar_gz = update_tar_gz,
extra_sysreqs = extra_sysreqs
)
dock$CMD(
sprintf(
"R -e \"options('shiny.port'=$PORT,shiny.host='0.0.0.0');library(%1$s);%1$s::run_app()\"",
read.dcf(path)[1]
)
)
dock$write(output)
alert_build(
path = path,
output = output,
build_golem_from_source = build_golem_from_source
)
apps_h <- gsub(
"\\.",
"-",
sprintf(
"%s-%s",
read.dcf(path)[1],
read.dcf(path)[1, ][["Version"]]
)
)
cli_cat_rule("From your command line, run:")
cli_cat_line("heroku container:login")
cli_cat_line(
sprintf("heroku create %s", apps_h)
)
cli_cat_line(
sprintf("heroku container:push web --app %s", apps_h)
)
cli_cat_line(
sprintf("heroku container:release web --app %s", apps_h)
)
cli_cat_line(
sprintf("heroku open --app %s", apps_h)
)
cat_red_bullet("Be sure to have the heroku CLI installed.")
cat_red_bullet(
sprintf("You can replace %s with another app name.", apps_h)
)
if (open) {
rstudioapi_navigateToFile(output)
}
usethis_use_build_ignore(files = output)
return(invisible(dock))
},
"
golem::add_dockerfile_heroku() is not recommended anymore.\nPlease use golem::add_dockerfile_with_renv_heroku() instead.
"
)
alert_build <- function(
path,
output,
build_golem_from_source) {
cat_created(output, "Dockerfile")
if (!build_golem_from_source) {
cat_red_bullet(
sprintf(
"Be sure to keep your %s_%s.tar.gz file (generated using `pkgbuild::build(vignettes = FALSE)` ) in the same folder as the %s file generated",
read.dcf(path)[1],
read.dcf(path)[1, ][["Version"]],
basename(output)
)
)
}
}
|
#' Empirical samples from response propensity
#'
#' Draw samples from power prior and updated posterior and later used to
#' evaluate quality indicators.
#'
#' @param n.dat new survey data sets at wave levels
#' @param h.dat historical survey data sets
#' @param s.score historcial-level similarity scores dependent on feature-level weights
#' @param a shape1 parameter with default value 1
#' @param b shape2 parameter with default value 1
#' @param N the number of observations (replication) with default 10000
#' @param svy.ref historical reference exists with 1 and 0 otherwise
#'
#' @return a list from wave 0 to the end
rho.sample<- function(n.dat,h.dat,s.score,a=1,b=1,svy.ref,N=10000){
p.para<- pprior(n.hat,h.dat,s.score,a,b,svy.ref)
res.0<- as.data.frame(do.call(rbind,lapply(1:nrow(p.para),
function(i) stats::rbeta(N, shape1 = p.para[i,"shape1"],
shape2 = p.para[i,"shape2"], ncp = 0))))
colnames(res.0)<- c(1:N)
post.para<- pposterior(n.dat,h.dat,s.score,a,b,svy.ref)
lst.w<- lapply(split(post.para,post.para$Wave),function(i){
res<- as.data.frame(do.call(rbind,
lapply(1:max(i$Strata),function(j) stats::rbeta(N, shape1 = i[j,"shape1"],
shape2 = i[j,"shape2"],ncp=0))))
colnames(res)<- c(1:N)
return(res)
}
)
rho.w0<- list(w0=res.0)
rho.w<- lst.w
lst<- c(rho.w0,rho.w)
return(lst)
}
| /R/03sampling.R | no_license | ShiyaWu/ElicitExpertPrior | R | false | false | 1,675 | r | #' Empirical samples from response propensity
#'
#' Draw samples from power prior and updated posterior and later used to
#' evaluate quality indicators.
#'
#' @param n.dat new survey data sets at wave levels
#' @param h.dat historical survey data sets
#' @param s.score historcial-level similarity scores dependent on feature-level weights
#' @param a shape1 parameter with default value 1
#' @param b shape2 parameter with default value 1
#' @param N the number of observations (replication) with default 10000
#' @param svy.ref historical reference exists with 1 and 0 otherwise
#'
#' @return a list from wave 0 to the end
rho.sample<- function(n.dat,h.dat,s.score,a=1,b=1,svy.ref,N=10000){
p.para<- pprior(n.hat,h.dat,s.score,a,b,svy.ref)
res.0<- as.data.frame(do.call(rbind,lapply(1:nrow(p.para),
function(i) stats::rbeta(N, shape1 = p.para[i,"shape1"],
shape2 = p.para[i,"shape2"], ncp = 0))))
colnames(res.0)<- c(1:N)
post.para<- pposterior(n.dat,h.dat,s.score,a,b,svy.ref)
lst.w<- lapply(split(post.para,post.para$Wave),function(i){
res<- as.data.frame(do.call(rbind,
lapply(1:max(i$Strata),function(j) stats::rbeta(N, shape1 = i[j,"shape1"],
shape2 = i[j,"shape2"],ncp=0))))
colnames(res)<- c(1:N)
return(res)
}
)
rho.w0<- list(w0=res.0)
rho.w<- lst.w
lst<- c(rho.w0,rho.w)
return(lst)
}
|
samp_int_vec<-function(x=1,y=1:10){
#x is an integer, y is a vector
out<-c()
for (i in 1:length(y)){
if (x!=y[i]){
out[i]<-sample(x:y[i],1,replace=T)
}else{
out[i]<-x
}
}
return(out)
}
samp_vec_int<-function(x=1:10,y=1){
#x is a vector, y is an integer
out<-c()
for (i in 1:length(x)){
if (x[i]!=y){
out[i]<-sample(x[i]:y,1,replace=T)
}else{
out[i]<-x[i]
}
}
return(out)
}
samp_vec_vec<-function(x=1:10,y=1:10){
#x is a vector, y is a vector
out<-c()
for (i in 1:length(y)){
if (x[i]!=y[i]){
out[i]<-sample(x[i]:y[i],1,replace=T)
}else{
out[i]<-x[i]
}
}
return(out)
}
args<-commandArgs(trailingOnly=TRUE)
ms<-"/opt/software/genetics/ms/ms"
cpd<-"./"
mod<-"mod_2a"
nchr<-as.character(args[1])
tgen<-25
mu<-1.5e-8
recomb<-as.numeric(args[4])
ll<-as.numeric(args[2])#locus length
nsims<-as.numeric(args[5])#number of ABC simulations
nloci<-as.numeric(args[3])#loci to simulate in each sim
out<-paste(mod,"_ll",as.character(ll),"_nl",as.character(nloci),"_r",as.character(recomb),"_nc",nchr,sep="")
##PARAMETERS
#Ne Present Time
Ne1BO<-sample(300:32000,nsims,replace=T)
Ne2BO<-sample(300:32000,nsims,replace=T)
Ne3BO<-sample(300:32000,nsims,replace=T)
Ne4BO<-sample(300:32000,nsims,replace=T)
NeST<-sample(300:32000,nsims,replace=T)
Ne1NT<-sample(300:32000,nsims,replace=T)
Ne2NT<-sample(300:32000,nsims,replace=T)
#Migrations
#Mig SUBPOP BO; SUBPOP NT
MigBO<-runif(nsims,min=exp(log(10^-4)), max=exp(log(0.1))) #loguniform
MigNT<-runif(nsims,min=exp(log(10^-4)), max=exp(log(0.1)))
#Mig ST-subPop NT
Mig56<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig65<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig57<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig75<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
#Mig ST popAnc NT
MigSTNT<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
MigNTST<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
#Mig BO-ST
MigBOST<-runif(nsims,min=exp(log(10^-6)), max=exp(log(10^-2)))
MigSTBO<-runif(nsims,min=exp(log(10^-6)), max=exp(log(10^-2)))
#Bottleneck Intensity Borneo
NeancBO<-samp_vec_int(Ne1BO,320000)
rBO<-Ne1BO/NeancBO
#Ne Ancient
NeancST<-samp_vec_int(NeST,100000)
Neanc1NT<-samp_vec_int(Ne1NT,320000)
Neanc2NT<-samp_vec_int(Ne2NT,320000)
NeancNT<-sample(1000:100000,nsims,replace=T)
#Events Times
tsep4BO<-sample(8750:400000,nsims,replace=T) #T end Bott. BO
BottDur<-sample(250:100000,nsims,replace=T) #Duration of Bottleneck
tbottend<-tsep4BO+BottDur
tsepBOST<-sample(1500000:4000000,nsims,replace=T)
tStopMig<-samp_vec_vec(tbottend,tsepBOST)
tBotNT<-sample(250:100000,nsims,replace=T)
tBotST<-tBotNT
tstrNT<-sample(100000:1500000,nsims,replace=T)
tsepNTST<-samp_vec_vec(tstrNT,tsepBOST)
##SCALED PARAMETERS
theta<-4*Ne1BO*mu*ll
srec<-4*Ne1BO*(recomb*(ll-1))
sNe1BO<-Ne1BO*4*mu*ll/theta
sNe2BO<-Ne2BO*4*mu*ll/theta
sNe3BO<-Ne3BO*4*mu*ll/theta
sNe4BO<-Ne4BO*4*mu*ll/theta
sNeST<-NeST*4*mu*ll/theta
sNe1NT<-Ne1NT*4*mu*ll/theta
sNe2NT<-Ne2NT*4*mu*ll/theta
sMigBO<-MigBO*4*Ne1BO
sMigNT<-MigNT*4*Ne1BO
sMig56<-Mig56*4*Ne1BO
sMig65<-Mig65*4*Ne1BO
sMig57<-Mig57*4*Ne1BO
sMig75<-Mig75*4*Ne1BO
sMigSTNT<-MigSTNT*4*Ne1BO
sMigNTST<-MigNTST*4*Ne1BO
sMigBOST<-MigBOST*4*Ne1BO
sMigSTBO<-MigSTBO*4*Ne1BO
sNeancBO<-NeancBO*4*mu*ll/theta
sNeancST<-NeancST*4*mu*ll/theta
sNeancNT<-NeancNT*4*mu*ll/theta
sNeanc1NT<-Neanc1NT*4*mu*ll/theta
sNeanc2NT<-Neanc2NT*4*mu*ll/theta
stsep4BO<-(tsep4BO/tgen)/(4*Ne1BO)
stbottend<-(tbottend/tgen)/(4*Ne1BO)
stsepBOST<-(tsepBOST/tgen)/(4*Ne1BO)
stStopMig<-(tStopMig/tgen)/(4*Ne1BO)
stBotNT<-(tBotNT/tgen)/(4*Ne1BO)
stBotST<-stBotNT
ststrNT<-(tstrNT/tgen)/(4*Ne1BO)
stsepNTST<-(tsepNTST/tgen)/(4*Ne1BO)
partable<-cbind(Ne1BO,Ne2BO,Ne3BO,Ne4BO,NeST,Ne1NT,Ne2NT,MigBO,MigNT,Mig56,Mig65,Mig57,Mig75,MigSTNT,MigNTST,MigBOST,MigSTBO,NeancBO,rBO,NeancST,Neanc1NT,Neanc2NT,
tsep4BO,BottDur,tbottend,tsepBOST,tStopMig,tBotNT,tBotST,tstrNT,tsepNTST)
colnames(partable)<-c("Ne1BO","Ne2BO","Ne3BO","Ne4BO","NeST","Ne1NT","Ne2NT","MigBO","MigNT","Mig56","Mig65","Mig57","Mig75","MigSTNT","MigNTST","MigBOST","MigSTBO","NeancBO","rBO","NeancST","Neanc1NT","Neanc2NT",
"tsep4BO","BottDur","tbottend","tsepBOST","tStopMig","tBotNT","tBotST","tstrNT","tsepNTST")
partablescaled<-cbind(sNe1BO,sNe2BO,sNe3BO,sNe4BO,sNeST,sNe1NT,sNe2NT,sMigBO,sMigNT,sMig56,sMig65,sMig57,sMig75,sMigSTNT,sMigNTST,sMigBOST,sMigSTBO,sNeancBO,rBO,sNeancST,sNeanc1NT,sNeanc2NT,stsep4BO,stbottend,stsepBOST,stStopMig,stBotNT,stBotST,ststrNT,stsepNTST)
write.table(partable,paste(out,".1.param",sep=""),row.names=F,quote=F,sep="\t")
write.table(partablescaled,paste(out,".1.paramscaled",sep=""),row.names=F,col.names=T,quote=F,sep="\t")
i<-1
for (i in 1:nsims){
s<-c()
s[1]<-paste(" -ej ",as.character(stsep4BO[i])," 4 1 ",sep="")
s[2]<-paste(" -ej ",as.character(stsep4BO[i])," 3 1 ",sep="")
s[3]<-paste(" -ej ",as.character(stsep4BO[i])," 2 1 ",sep="")
s[4]<-paste(" -en ",as.character(stsep4BO[i])," 1 ", as.character(rBO[i]),sep="")
s[5]<-paste(" -em ",as.character(stStopMig[i])," 1 5 ", as.character(sMigBOST[i]),sep="")
s[6]<-paste(" -em ",as.character(stStopMig[i])," 5 1 ", as.character(sMigSTBO[i]),sep="")
s[7]<-paste(" -en ",as.character(stBotST[i])," 5 ", as.character(sNeancST[i]),sep="")
s[8]<-paste(" -en ",as.character(stBotNT[i])," 6 ", as.character(sNeanc1NT[i]),sep="")
s[9]<-paste(" -en ",as.character(stBotNT[i])," 7 ", as.character(sNeanc2NT[i]),sep="")
s[10]<-paste(" -en ",as.character(stbottend[i])," 1 ", as.character(sNeancBO[i]),sep="")
s[11]<-paste(" -ej ",as.character(stsepBOST[i])," 1 5 ",sep="")
s[12]<-paste(" -ej ",as.character(ststrNT[i])," 7 6 ",sep="")
s[13]<-paste(" -en ",as.character(ststrNT[i])," 6 ", as.character(sNeancNT[i]),sep="")
s[14]<-paste(" -em ",as.character(ststrNT[i])," 5 6 ", as.character(sMigSTNT[i]),sep="")
s[15]<-paste(" -em ",as.character(ststrNT[i])," 6 5 ", as.character(sMigNTST[i]),sep="")
s[16]<-paste(" -ej ",as.character(stsepNTST[i])," 6 5 ",sep="")
s1<-c()
s1[1]<-stsep4BO[i]
s1[2]<-stsep4BO[i]
s1[3]<-stsep4BO[i]
s1[4]<-stsep4BO[i]
s1[5]<-stStopMig[i]
s1[6]<-stStopMig[i]
s1[7]<-stBotST[i]
s1[8]<-stBotNT[i]
s1[9]<-stBotNT[i]
s1[10]<-stbottend[i]
s1[11]<-stsepBOST[i]
s1[12]<-ststrNT[i]
s1[13]<-ststrNT[i]
s1[14]<-ststrNT[i]
s1[15]<-ststrNT[i]
s1[16]<-stsepNTST[i]
sid<-sort(s1,index.return=T)
s_sort<-s[sid$ix]
part1<-paste(s_sort,collapse="")
li1<-paste(ms," ",as.character(7*as.numeric(nchr))," ",as.character(nloci)," -t ",as.character(theta[i])," -r ",as.character(srec[i])," ",as.character(ll)," -I 7 ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," -n 1 ",as.character(sNe1BO[i])," -n 2 ",as.character(sNe2BO[i])," -n 3 ",as.character(sNe3BO[i])," -n 4 ",as.character(sNe4BO[i])," -n 5 ",as.character(sNeST[i])," -n 6 ",as.character(sNe1NT[i])," -n 7 ",as.character(sNe2NT[i])," -m 1 2 ",as.character(sMigBO[i])," -m 2 1 ",as.character(sMigBO[i])," -m 1 3 ",as.character(sMigBO[i])," -m 3 1 ",as.character(sMigBO[i])," -m 1 4 ",as.character(sMigBO[i])," -m 4 1 ",as.character(sMigBO[i])," -m 2 3 ",as.character(sMigBO[i])," -m 3 2 ",as.character(sMigBO[i])," -m 2 4 ",as.character(sMigBO[i])," -m 4 2 ",as.character(sMigBO[i])," -m 3 4 ",as.character(sMigBO[i])," -m 4 3 ",as.character(sMigBO[i])," -m 5 6 ",as.character(sMig56[i])," -m 6 5 ",as.character(sMig65[i])," -m 5 7 ",as.character(sMig57[i])," -m 7 5 ",as.character(sMig75[i])," -m 6 7 ",as.character(sMigNT[i])," -m 7 6 ",as.character(sMigNT[i]), part1, sep="")
print(i)
#print(li1)
if (i==1){
system(paste(li1," | ",cpd,"compute_ss.py -np 7 -nc ",nchr," -w 30 -b 100 -s > ",out,".1.tab",sep=""))
}else{
system(paste(li1," | ",cpd,"compute_ss.py -np 7 -nc ",nchr," -w 30 -b 100 -s >> ",out,".1.tab",sep=""))
}
}
| /Orangos/mod_2a.r | no_license | jiangchb/ABC-FDSS | R | false | false | 7,776 | r | samp_int_vec<-function(x=1,y=1:10){
#x is an integer, y is a vector
out<-c()
for (i in 1:length(y)){
if (x!=y[i]){
out[i]<-sample(x:y[i],1,replace=T)
}else{
out[i]<-x
}
}
return(out)
}
samp_vec_int<-function(x=1:10,y=1){
#x is a vector, y is an integer
out<-c()
for (i in 1:length(x)){
if (x[i]!=y){
out[i]<-sample(x[i]:y,1,replace=T)
}else{
out[i]<-x[i]
}
}
return(out)
}
samp_vec_vec<-function(x=1:10,y=1:10){
#x is a vector, y is a vector
out<-c()
for (i in 1:length(y)){
if (x[i]!=y[i]){
out[i]<-sample(x[i]:y[i],1,replace=T)
}else{
out[i]<-x[i]
}
}
return(out)
}
args<-commandArgs(trailingOnly=TRUE)
ms<-"/opt/software/genetics/ms/ms"
cpd<-"./"
mod<-"mod_2a"
nchr<-as.character(args[1])
tgen<-25
mu<-1.5e-8
recomb<-as.numeric(args[4])
ll<-as.numeric(args[2])#locus length
nsims<-as.numeric(args[5])#number of ABC simulations
nloci<-as.numeric(args[3])#loci to simulate in each sim
out<-paste(mod,"_ll",as.character(ll),"_nl",as.character(nloci),"_r",as.character(recomb),"_nc",nchr,sep="")
##PARAMETERS
#Ne Present Time
Ne1BO<-sample(300:32000,nsims,replace=T)
Ne2BO<-sample(300:32000,nsims,replace=T)
Ne3BO<-sample(300:32000,nsims,replace=T)
Ne4BO<-sample(300:32000,nsims,replace=T)
NeST<-sample(300:32000,nsims,replace=T)
Ne1NT<-sample(300:32000,nsims,replace=T)
Ne2NT<-sample(300:32000,nsims,replace=T)
#Migrations
#Mig SUBPOP BO; SUBPOP NT
MigBO<-runif(nsims,min=exp(log(10^-4)), max=exp(log(0.1))) #loguniform
MigNT<-runif(nsims,min=exp(log(10^-4)), max=exp(log(0.1)))
#Mig ST-subPop NT
Mig56<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig65<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig57<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
Mig75<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
#Mig ST popAnc NT
MigSTNT<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
MigNTST<-runif(nsims,min=exp(log(10^-5)), max=exp(log(0.1)))
#Mig BO-ST
MigBOST<-runif(nsims,min=exp(log(10^-6)), max=exp(log(10^-2)))
MigSTBO<-runif(nsims,min=exp(log(10^-6)), max=exp(log(10^-2)))
#Bottleneck Intensity Borneo
NeancBO<-samp_vec_int(Ne1BO,320000)
rBO<-Ne1BO/NeancBO
#Ne Ancient
NeancST<-samp_vec_int(NeST,100000)
Neanc1NT<-samp_vec_int(Ne1NT,320000)
Neanc2NT<-samp_vec_int(Ne2NT,320000)
NeancNT<-sample(1000:100000,nsims,replace=T)
#Events Times
tsep4BO<-sample(8750:400000,nsims,replace=T) #T end Bott. BO
BottDur<-sample(250:100000,nsims,replace=T) #Duration of Bottleneck
tbottend<-tsep4BO+BottDur
tsepBOST<-sample(1500000:4000000,nsims,replace=T)
tStopMig<-samp_vec_vec(tbottend,tsepBOST)
tBotNT<-sample(250:100000,nsims,replace=T)
tBotST<-tBotNT
tstrNT<-sample(100000:1500000,nsims,replace=T)
tsepNTST<-samp_vec_vec(tstrNT,tsepBOST)
##SCALED PARAMETERS
theta<-4*Ne1BO*mu*ll
srec<-4*Ne1BO*(recomb*(ll-1))
sNe1BO<-Ne1BO*4*mu*ll/theta
sNe2BO<-Ne2BO*4*mu*ll/theta
sNe3BO<-Ne3BO*4*mu*ll/theta
sNe4BO<-Ne4BO*4*mu*ll/theta
sNeST<-NeST*4*mu*ll/theta
sNe1NT<-Ne1NT*4*mu*ll/theta
sNe2NT<-Ne2NT*4*mu*ll/theta
sMigBO<-MigBO*4*Ne1BO
sMigNT<-MigNT*4*Ne1BO
sMig56<-Mig56*4*Ne1BO
sMig65<-Mig65*4*Ne1BO
sMig57<-Mig57*4*Ne1BO
sMig75<-Mig75*4*Ne1BO
sMigSTNT<-MigSTNT*4*Ne1BO
sMigNTST<-MigNTST*4*Ne1BO
sMigBOST<-MigBOST*4*Ne1BO
sMigSTBO<-MigSTBO*4*Ne1BO
sNeancBO<-NeancBO*4*mu*ll/theta
sNeancST<-NeancST*4*mu*ll/theta
sNeancNT<-NeancNT*4*mu*ll/theta
sNeanc1NT<-Neanc1NT*4*mu*ll/theta
sNeanc2NT<-Neanc2NT*4*mu*ll/theta
stsep4BO<-(tsep4BO/tgen)/(4*Ne1BO)
stbottend<-(tbottend/tgen)/(4*Ne1BO)
stsepBOST<-(tsepBOST/tgen)/(4*Ne1BO)
stStopMig<-(tStopMig/tgen)/(4*Ne1BO)
stBotNT<-(tBotNT/tgen)/(4*Ne1BO)
stBotST<-stBotNT
ststrNT<-(tstrNT/tgen)/(4*Ne1BO)
stsepNTST<-(tsepNTST/tgen)/(4*Ne1BO)
partable<-cbind(Ne1BO,Ne2BO,Ne3BO,Ne4BO,NeST,Ne1NT,Ne2NT,MigBO,MigNT,Mig56,Mig65,Mig57,Mig75,MigSTNT,MigNTST,MigBOST,MigSTBO,NeancBO,rBO,NeancST,Neanc1NT,Neanc2NT,
tsep4BO,BottDur,tbottend,tsepBOST,tStopMig,tBotNT,tBotST,tstrNT,tsepNTST)
colnames(partable)<-c("Ne1BO","Ne2BO","Ne3BO","Ne4BO","NeST","Ne1NT","Ne2NT","MigBO","MigNT","Mig56","Mig65","Mig57","Mig75","MigSTNT","MigNTST","MigBOST","MigSTBO","NeancBO","rBO","NeancST","Neanc1NT","Neanc2NT",
"tsep4BO","BottDur","tbottend","tsepBOST","tStopMig","tBotNT","tBotST","tstrNT","tsepNTST")
partablescaled<-cbind(sNe1BO,sNe2BO,sNe3BO,sNe4BO,sNeST,sNe1NT,sNe2NT,sMigBO,sMigNT,sMig56,sMig65,sMig57,sMig75,sMigSTNT,sMigNTST,sMigBOST,sMigSTBO,sNeancBO,rBO,sNeancST,sNeanc1NT,sNeanc2NT,stsep4BO,stbottend,stsepBOST,stStopMig,stBotNT,stBotST,ststrNT,stsepNTST)
write.table(partable,paste(out,".1.param",sep=""),row.names=F,quote=F,sep="\t")
write.table(partablescaled,paste(out,".1.paramscaled",sep=""),row.names=F,col.names=T,quote=F,sep="\t")
i<-1
for (i in 1:nsims){
s<-c()
s[1]<-paste(" -ej ",as.character(stsep4BO[i])," 4 1 ",sep="")
s[2]<-paste(" -ej ",as.character(stsep4BO[i])," 3 1 ",sep="")
s[3]<-paste(" -ej ",as.character(stsep4BO[i])," 2 1 ",sep="")
s[4]<-paste(" -en ",as.character(stsep4BO[i])," 1 ", as.character(rBO[i]),sep="")
s[5]<-paste(" -em ",as.character(stStopMig[i])," 1 5 ", as.character(sMigBOST[i]),sep="")
s[6]<-paste(" -em ",as.character(stStopMig[i])," 5 1 ", as.character(sMigSTBO[i]),sep="")
s[7]<-paste(" -en ",as.character(stBotST[i])," 5 ", as.character(sNeancST[i]),sep="")
s[8]<-paste(" -en ",as.character(stBotNT[i])," 6 ", as.character(sNeanc1NT[i]),sep="")
s[9]<-paste(" -en ",as.character(stBotNT[i])," 7 ", as.character(sNeanc2NT[i]),sep="")
s[10]<-paste(" -en ",as.character(stbottend[i])," 1 ", as.character(sNeancBO[i]),sep="")
s[11]<-paste(" -ej ",as.character(stsepBOST[i])," 1 5 ",sep="")
s[12]<-paste(" -ej ",as.character(ststrNT[i])," 7 6 ",sep="")
s[13]<-paste(" -en ",as.character(ststrNT[i])," 6 ", as.character(sNeancNT[i]),sep="")
s[14]<-paste(" -em ",as.character(ststrNT[i])," 5 6 ", as.character(sMigSTNT[i]),sep="")
s[15]<-paste(" -em ",as.character(ststrNT[i])," 6 5 ", as.character(sMigNTST[i]),sep="")
s[16]<-paste(" -ej ",as.character(stsepNTST[i])," 6 5 ",sep="")
s1<-c()
s1[1]<-stsep4BO[i]
s1[2]<-stsep4BO[i]
s1[3]<-stsep4BO[i]
s1[4]<-stsep4BO[i]
s1[5]<-stStopMig[i]
s1[6]<-stStopMig[i]
s1[7]<-stBotST[i]
s1[8]<-stBotNT[i]
s1[9]<-stBotNT[i]
s1[10]<-stbottend[i]
s1[11]<-stsepBOST[i]
s1[12]<-ststrNT[i]
s1[13]<-ststrNT[i]
s1[14]<-ststrNT[i]
s1[15]<-ststrNT[i]
s1[16]<-stsepNTST[i]
sid<-sort(s1,index.return=T)
s_sort<-s[sid$ix]
part1<-paste(s_sort,collapse="")
li1<-paste(ms," ",as.character(7*as.numeric(nchr))," ",as.character(nloci)," -t ",as.character(theta[i])," -r ",as.character(srec[i])," ",as.character(ll)," -I 7 ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," ",nchr," -n 1 ",as.character(sNe1BO[i])," -n 2 ",as.character(sNe2BO[i])," -n 3 ",as.character(sNe3BO[i])," -n 4 ",as.character(sNe4BO[i])," -n 5 ",as.character(sNeST[i])," -n 6 ",as.character(sNe1NT[i])," -n 7 ",as.character(sNe2NT[i])," -m 1 2 ",as.character(sMigBO[i])," -m 2 1 ",as.character(sMigBO[i])," -m 1 3 ",as.character(sMigBO[i])," -m 3 1 ",as.character(sMigBO[i])," -m 1 4 ",as.character(sMigBO[i])," -m 4 1 ",as.character(sMigBO[i])," -m 2 3 ",as.character(sMigBO[i])," -m 3 2 ",as.character(sMigBO[i])," -m 2 4 ",as.character(sMigBO[i])," -m 4 2 ",as.character(sMigBO[i])," -m 3 4 ",as.character(sMigBO[i])," -m 4 3 ",as.character(sMigBO[i])," -m 5 6 ",as.character(sMig56[i])," -m 6 5 ",as.character(sMig65[i])," -m 5 7 ",as.character(sMig57[i])," -m 7 5 ",as.character(sMig75[i])," -m 6 7 ",as.character(sMigNT[i])," -m 7 6 ",as.character(sMigNT[i]), part1, sep="")
print(i)
#print(li1)
if (i==1){
system(paste(li1," | ",cpd,"compute_ss.py -np 7 -nc ",nchr," -w 30 -b 100 -s > ",out,".1.tab",sep=""))
}else{
system(paste(li1," | ",cpd,"compute_ss.py -np 7 -nc ",nchr," -w 30 -b 100 -s >> ",out,".1.tab",sep=""))
}
}
|
#' Server side for rendering cases plot (main plot)
#'
#'@import plotly
#'
#'@noRd
#'
render_case <- function(){
renderPlotly({
total_case <- covid_asean_df %>%
group_by(country) %>%
summarise(confirmed_cases = sum(new_cases))
total_case %>%
plot_ly(x = ~country, y = ~confirmed_cases) %>%
config(displayModeBar = F)
})
}
| /covidasean.Rcheck/00_pkg_src/covidasean/R/render_case.R | permissive | etc5523-2020/r-package-assessment-dedi0003 | R | false | false | 345 | r | #' Server side for rendering cases plot (main plot)
#'
#'@import plotly
#'
#'@noRd
#'
render_case <- function(){
renderPlotly({
total_case <- covid_asean_df %>%
group_by(country) %>%
summarise(confirmed_cases = sum(new_cases))
total_case %>%
plot_ly(x = ~country, y = ~confirmed_cases) %>%
config(displayModeBar = F)
})
}
|
library(shiny)
library(data.table)
library(NLP)
library(tm)
setwd("/Users/christiankukuk/Documents/R workspace/capstone/result")
#Read in Dataset
Dataset <- fread("ngram.txt")
setkeyv(Dataset, c('w1', 'w2', 'w3', 'w4', 'freq'))
Translate_Input <- function(Text){
Mod_Input <- tolower(Text)
Mod_Input <- stripWhitespace(Mod_Input)
Mod_Input <- gsub("[^\\p{L}\\s]+", "", Mod_Input, ignore.case=F, perl=T)
return(Mod_Input)
}
Split_Translate_Input <- function(Text){
Mod_Input <- tolower(Text)
Mod_Input <- stripWhitespace(Mod_Input)
Mod_Input <- gsub("[^\\p{L}\\s]+", "", Mod_Input, ignore.case=F, perl=T)
Split_Trans_Input <- unlist(strsplit(Mod_Input, " "))
return(Split_Trans_Input)
}
Word_Count1 <- function(TextInputA){
NgramsTable <<- Dataset[list("<s>", TextInputA[1])]
NgramsTable <<- NgramsTable[NgramsTable$w3!="<s>", ]
NgramsTable <<- NgramsTable[order(NgramsTable$freq, decreasing=TRUE), ]
#List Alternatives
AlternativeGuess <<- as.data.frame(NgramsTable)
AlternativeGuess <<- AlternativeGuess[1:5, c("w3", "freq")]
AlternativeGuess <<- AlternativeGuess[!is.na(AlternativeGuess$freq), ]
AlternativeGuess <<- AlternativeGuess[!duplicated(AlternativeGuess), ]
if(nrow(AlternativeGuess)==0){
AlternativeGuess <<- data.frame(Word=NA, Likelihood=NA)
}else{
AlternativeGuess$freq <- round(AlternativeGuess$freq/sum(AlternativeGuess$freq)*100, 1)
AlternativeGuess <<- AlternativeGuess
colnames(AlternativeGuess) <<- c("Word", "Likelihood")
rownames(AlternativeGuess) <<- NULL
}
Guess_Output <- NgramsTable$w3[1]
if(is.na(Guess_Output)|is.null(Guess_Output)){
Guess_Output <- "We're sorry. This app could not predict the next word. You either picked a rare word or possibly mispelled it."
}
return(Guess_Output)
}
Word_Count2 <- function(TextInputB){
NgramsTable <<- Dataset[list("<s>", TextInputB[1], TextInputB[2])]
NgramsTable <<- NgramsTable[NgramsTable$w4!="<s>", ]
NgramsTable <<- NgramsTable[order(NgramsTable$freq, decreasing=TRUE), ]
#List Alternatives
AlternativeGuess <<- as.data.frame(NgramsTable)
AlternativeGuess <<- AlternativeGuess[1:5, c("w4", "freq")]
AlternativeGuess <<- AlternativeGuess[!is.na(AlternativeGuess$freq), ]
AlternativeGuess <<- AlternativeGuess[!duplicated(AlternativeGuess), ]
if(nrow(AlternativeGuess)==0){
AlternativeGuess <<- data.frame(Word=NA, Likelihood=NA)
}else{
AlternativeGuess$freq <- round(AlternativeGuess$freq/sum(AlternativeGuess$freq)*100, 1)
AlternativeGuess <<- AlternativeGuess
colnames(AlternativeGuess) <<- c("Word", "Likelihood")
rownames(AlternativeGuess) <<- NULL
}
Guess_Output <- NgramsTable$w4[1]
if(is.na(Guess_Output)|is.null(Guess_Output)){
Guess_Output <- Word_Count1(TextInputB[2])
}
return(Guess_Output)
}
Word_Count3 <- function(TextInputC){
NgramsTable <<- Dataset[list("<s>", TextInputC[1], TextInputC[2], TextInputC[3])]
NgramsTable <<- NgramsTable[NgramsTable$w5!="<s>", ]
NgramsTable <<- NgramsTable[order(NgramsTable$freq, decreasing=TRUE), ]
#List Alternatives
AlternativeGuess <<- as.data.frame(NgramsTable)
AlternativeGuess <<- AlternativeGuess[1:5, c("w5", "freq")]
AlternativeGuess <<- AlternativeGuess[!is.na(AlternativeGuess$freq), ]
AlternativeGuess <<- AlternativeGuess[!duplicated(AlternativeGuess), ]
if(nrow(AlternativeGuess)==0){
AlternativeGuess <<- data.frame(Word=NA, Likelihood=NA)
}else{
AlternativeGuess$freq <- round(AlternativeGuess$freq/sum(AlternativeGuess$freq)*100, 1)
AlternativeGuess <<- AlternativeGuess
colnames(AlternativeGuess) <<- c("Word", "Likelihood")
rownames(AlternativeGuess) <<- NULL
}
Guess_Output <- NgramsTable$w5[1]
if(is.na(Guess_Output)|is.null(Guess_Output)){
Shortened_Input <- c(TextInputC[2], TextInputC[3])
Guess_Output <- Word_Count2(Shortened_Input)
if(is.na(Guess_Output)|is.null(Guess_Output)){
Guess_Output <- Word_Count1(TextInputC[3])
}
}
return(Guess_Output)
} | /global.R | no_license | Cocu23/final_project | R | false | false | 4,033 | r | library(shiny)
library(data.table)
library(NLP)
library(tm)
setwd("/Users/christiankukuk/Documents/R workspace/capstone/result")
#Read in Dataset
Dataset <- fread("ngram.txt")
setkeyv(Dataset, c('w1', 'w2', 'w3', 'w4', 'freq'))
Translate_Input <- function(Text){
Mod_Input <- tolower(Text)
Mod_Input <- stripWhitespace(Mod_Input)
Mod_Input <- gsub("[^\\p{L}\\s]+", "", Mod_Input, ignore.case=F, perl=T)
return(Mod_Input)
}
Split_Translate_Input <- function(Text){
Mod_Input <- tolower(Text)
Mod_Input <- stripWhitespace(Mod_Input)
Mod_Input <- gsub("[^\\p{L}\\s]+", "", Mod_Input, ignore.case=F, perl=T)
Split_Trans_Input <- unlist(strsplit(Mod_Input, " "))
return(Split_Trans_Input)
}
Word_Count1 <- function(TextInputA){
NgramsTable <<- Dataset[list("<s>", TextInputA[1])]
NgramsTable <<- NgramsTable[NgramsTable$w3!="<s>", ]
NgramsTable <<- NgramsTable[order(NgramsTable$freq, decreasing=TRUE), ]
#List Alternatives
AlternativeGuess <<- as.data.frame(NgramsTable)
AlternativeGuess <<- AlternativeGuess[1:5, c("w3", "freq")]
AlternativeGuess <<- AlternativeGuess[!is.na(AlternativeGuess$freq), ]
AlternativeGuess <<- AlternativeGuess[!duplicated(AlternativeGuess), ]
if(nrow(AlternativeGuess)==0){
AlternativeGuess <<- data.frame(Word=NA, Likelihood=NA)
}else{
AlternativeGuess$freq <- round(AlternativeGuess$freq/sum(AlternativeGuess$freq)*100, 1)
AlternativeGuess <<- AlternativeGuess
colnames(AlternativeGuess) <<- c("Word", "Likelihood")
rownames(AlternativeGuess) <<- NULL
}
Guess_Output <- NgramsTable$w3[1]
if(is.na(Guess_Output)|is.null(Guess_Output)){
Guess_Output <- "We're sorry. This app could not predict the next word. You either picked a rare word or possibly mispelled it."
}
return(Guess_Output)
}
Word_Count2 <- function(TextInputB){
NgramsTable <<- Dataset[list("<s>", TextInputB[1], TextInputB[2])]
NgramsTable <<- NgramsTable[NgramsTable$w4!="<s>", ]
NgramsTable <<- NgramsTable[order(NgramsTable$freq, decreasing=TRUE), ]
#List Alternatives
AlternativeGuess <<- as.data.frame(NgramsTable)
AlternativeGuess <<- AlternativeGuess[1:5, c("w4", "freq")]
AlternativeGuess <<- AlternativeGuess[!is.na(AlternativeGuess$freq), ]
AlternativeGuess <<- AlternativeGuess[!duplicated(AlternativeGuess), ]
if(nrow(AlternativeGuess)==0){
AlternativeGuess <<- data.frame(Word=NA, Likelihood=NA)
}else{
AlternativeGuess$freq <- round(AlternativeGuess$freq/sum(AlternativeGuess$freq)*100, 1)
AlternativeGuess <<- AlternativeGuess
colnames(AlternativeGuess) <<- c("Word", "Likelihood")
rownames(AlternativeGuess) <<- NULL
}
Guess_Output <- NgramsTable$w4[1]
if(is.na(Guess_Output)|is.null(Guess_Output)){
Guess_Output <- Word_Count1(TextInputB[2])
}
return(Guess_Output)
}
Word_Count3 <- function(TextInputC){
NgramsTable <<- Dataset[list("<s>", TextInputC[1], TextInputC[2], TextInputC[3])]
NgramsTable <<- NgramsTable[NgramsTable$w5!="<s>", ]
NgramsTable <<- NgramsTable[order(NgramsTable$freq, decreasing=TRUE), ]
#List Alternatives
AlternativeGuess <<- as.data.frame(NgramsTable)
AlternativeGuess <<- AlternativeGuess[1:5, c("w5", "freq")]
AlternativeGuess <<- AlternativeGuess[!is.na(AlternativeGuess$freq), ]
AlternativeGuess <<- AlternativeGuess[!duplicated(AlternativeGuess), ]
if(nrow(AlternativeGuess)==0){
AlternativeGuess <<- data.frame(Word=NA, Likelihood=NA)
}else{
AlternativeGuess$freq <- round(AlternativeGuess$freq/sum(AlternativeGuess$freq)*100, 1)
AlternativeGuess <<- AlternativeGuess
colnames(AlternativeGuess) <<- c("Word", "Likelihood")
rownames(AlternativeGuess) <<- NULL
}
Guess_Output <- NgramsTable$w5[1]
if(is.na(Guess_Output)|is.null(Guess_Output)){
Shortened_Input <- c(TextInputC[2], TextInputC[3])
Guess_Output <- Word_Count2(Shortened_Input)
if(is.na(Guess_Output)|is.null(Guess_Output)){
Guess_Output <- Word_Count1(TextInputC[3])
}
}
return(Guess_Output)
} |
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "backache")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "col_33")
lrn = makeLearner("classif.ranger", par.vals = list(), predict.type = "prob")
#:# hash
#:# bb626f22af71a6bf4ece11acc43a4d4d
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_backache/classification_col_33/bb626f22af71a6bf4ece11acc43a4d4d/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 683 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "backache")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "col_33")
lrn = makeLearner("classif.ranger", par.vals = list(), predict.type = "prob")
#:# hash
#:# bb626f22af71a6bf4ece11acc43a4d4d
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
# Pre-defined classes of columns to make reading file faster */
classes <- c("character", "character", "numeric","numeric","numeric","numeric","numeric","numeric","numeric")
# Reading data file to tabAll variable
tabAll <- read.table(file="household_power_consumption.txt", header=TRUE, colClasses=classes, sep=";", na.strings="?")
# Subsetting required data
maindata <- subset(tabAll, Date=="1/2/2007" | Date=="2/2/2007")
remove(tabAll)
# Merge Date and Time columns to convert to POSIXlt POSIXt
maindata$DateTime <- strptime(paste(maindata$Date,maindata$Time), "%d/%m/%Y %H:%M:%S")
# Saving plot to PNG file at current working directory
png("plot2.png", width=480, height=480)
# Plotting the graph
with(maindata, plot(DateTime, Global_active_power,type="l", xlab="", ylab="Global Active Power (kilowatts)"))
# Completing
dev.off()
remove(maindata) | /plot2.R | no_license | thtranvn/ExData_Plotting1 | R | false | false | 855 | r | # Pre-defined classes of columns to make reading file faster */
classes <- c("character", "character", "numeric","numeric","numeric","numeric","numeric","numeric","numeric")
# Reading data file to tabAll variable
tabAll <- read.table(file="household_power_consumption.txt", header=TRUE, colClasses=classes, sep=";", na.strings="?")
# Subsetting required data
maindata <- subset(tabAll, Date=="1/2/2007" | Date=="2/2/2007")
remove(tabAll)
# Merge Date and Time columns to convert to POSIXlt POSIXt
maindata$DateTime <- strptime(paste(maindata$Date,maindata$Time), "%d/%m/%Y %H:%M:%S")
# Saving plot to PNG file at current working directory
png("plot2.png", width=480, height=480)
# Plotting the graph
with(maindata, plot(DateTime, Global_active_power,type="l", xlab="", ylab="Global Active Power (kilowatts)"))
# Completing
dev.off()
remove(maindata) |
\name{twodranduphill}
\alias{twodranduphill}
\title{uphill search on matrix function}
\usage{
twodranduphill(f, x, y, s, n = 100, t = 100)
}
\arguments{
\item{f}{matrix of data set}
\item{y}{is the starting point for the search}
\item{x}{is the starting point for the search}
\item{n}{is the number of iteration for the search}
\item{s}{standard devation for each search jump, default
at 1}
\item{t}{is the tempearture, where its default at 100}
}
\description{
This function is use to do a random uphill search on any
2 demision data set
}
\examples{
twodranduphill(twodfunction, 3, 2, 1, 10, 1)
}
\author{
Ernest Chan \email{faiernest418@gmail.com}
}
| /man/twodranduphill.Rd | no_license | JackStat/CompPack | R | false | false | 681 | rd | \name{twodranduphill}
\alias{twodranduphill}
\title{uphill search on matrix function}
\usage{
twodranduphill(f, x, y, s, n = 100, t = 100)
}
\arguments{
\item{f}{matrix of data set}
\item{y}{is the starting point for the search}
\item{x}{is the starting point for the search}
\item{n}{is the number of iteration for the search}
\item{s}{standard devation for each search jump, default
at 1}
\item{t}{is the tempearture, where its default at 100}
}
\description{
This function is use to do a random uphill search on any
2 demision data set
}
\examples{
twodranduphill(twodfunction, 3, 2, 1, 10, 1)
}
\author{
Ernest Chan \email{faiernest418@gmail.com}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkgcheck-fn.R
\name{pkgcheck}
\alias{pkgcheck}
\title{Generate report on package compliance with rOpenSci Statistical Software
requirements}
\usage{
pkgcheck(
path = ".",
goodpractice = TRUE,
use_cache = TRUE,
extra_env = .GlobalEnv
)
}
\arguments{
\item{path}{Path to local repository}
\item{goodpractice}{If \code{FALSE}, skip goodpractice checks. May be useful in
development stages to more quickly check other aspects.}
\item{use_cache}{Checks are cached for rapid retrieval, and only re-run if
the git hash of the local repository changes. Setting \code{use_cache} to \code{FALSE}
will for checks to be re-run even if the git hash has not changed.}
\item{extra_env}{Additional environments from which to collate checks. Other
package names may be appended using \code{c}, as in \code{c(.GlobalEnv, "mypkg")}.}
}
\value{
A \code{pkgcheck} object detailing all package assessments automatically
applied to packages submitted for peer review.
}
\description{
Generate report on package compliance with rOpenSci Statistical Software
requirements
}
\examples{
\dontrun{
checks <- pkgcheck ("/path/to/my/package") # default full check
summary (checks)
# Or to run only checks implemented in 'pkgcheck' and not the
# additional \pkg{goodpractice} checks:
checks <- pkgcheck ("/path/to/my/package", goodpractice = FALSE)
summary (checks)
}
}
\seealso{
Other pkgcheck_fns:
\code{\link{pkgcheck_bg}()},
\code{\link{print.pkgcheck}()}
}
\concept{pkgcheck_fns}
| /man/pkgcheck.Rd | no_license | ropensci-review-tools/pkgcheck | R | false | true | 1,544 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkgcheck-fn.R
\name{pkgcheck}
\alias{pkgcheck}
\title{Generate report on package compliance with rOpenSci Statistical Software
requirements}
\usage{
pkgcheck(
path = ".",
goodpractice = TRUE,
use_cache = TRUE,
extra_env = .GlobalEnv
)
}
\arguments{
\item{path}{Path to local repository}
\item{goodpractice}{If \code{FALSE}, skip goodpractice checks. May be useful in
development stages to more quickly check other aspects.}
\item{use_cache}{Checks are cached for rapid retrieval, and only re-run if
the git hash of the local repository changes. Setting \code{use_cache} to \code{FALSE}
will for checks to be re-run even if the git hash has not changed.}
\item{extra_env}{Additional environments from which to collate checks. Other
package names may be appended using \code{c}, as in \code{c(.GlobalEnv, "mypkg")}.}
}
\value{
A \code{pkgcheck} object detailing all package assessments automatically
applied to packages submitted for peer review.
}
\description{
Generate report on package compliance with rOpenSci Statistical Software
requirements
}
\examples{
\dontrun{
checks <- pkgcheck ("/path/to/my/package") # default full check
summary (checks)
# Or to run only checks implemented in 'pkgcheck' and not the
# additional \pkg{goodpractice} checks:
checks <- pkgcheck ("/path/to/my/package", goodpractice = FALSE)
summary (checks)
}
}
\seealso{
Other pkgcheck_fns:
\code{\link{pkgcheck_bg}()},
\code{\link{print.pkgcheck}()}
}
\concept{pkgcheck_fns}
|
library(magrittr)
# this script analyzes depth of coverage between males and females
# in fundulus grandis. it identifies two copy-number variable regions
# that seem to be associated with sex, although not absolutely
# determinative of it.
smother <- function(x,winsize){
vec <- 1:length(x)
start <- vec - winsize; start[start < 1] <- 1
end <- vec + winsize; end[end > length(x)] <- length(x)
win <- cbind(start,end)
out <- apply(win, MAR=1,FUN=function(z){sum(x[z[1]:z[2]],na.rm=TRUE)})
return(out)
}
cname <- c(
"CHROM",
"POS",
"ID",
"REF",
"ALT",
"QUAL",
"FILTER",
"INFO",
"FORMAT",
"BP-10",
"BP-11",
"BP-12",
"BP-13",
"BP-14",
"BP-15",
"BP-16",
"BP-17",
"BP-18",
"BP-19",
"BP-1",
"BP-20",
"BP-21",
"BP-22",
"BP-23",
"BP-24",
"BP-25",
"BP-26",
"BP-27",
"BP-28",
"BP-29",
"BP-2",
"BP-30",
"BP-31",
"BP-32",
"BP-33",
"BP-34",
"BP-38",
"BP-39",
"BP-3",
"BP-40",
"BP-41",
"BP-42",
"BP-43",
"BP-44",
"BP-45",
"BP-46",
"BP-47",
"BP-48",
"BP-49",
"BP-4",
"BP-50",
"BP-51",
"BP-52",
"BP-53",
"BP-5",
"BP-6",
"BP-7",
"BP-8",
"BP-9",
"ER-11",
"ER-12",
"ER-13",
"ER-14",
"ER-15",
"ER-16",
"ER-17",
"ER-18",
"ER-19",
"ER-20",
"ER-21",
"ER-22",
"ER-23",
"ER-24",
"ER-25",
"ER-26",
"ER-27",
"ER-28",
"ER-29",
"ER-30",
"ER-31",
"ER-32",
"ER-33",
"ER-34",
"ER-35",
"ER-36",
"ER-38",
"ER-39",
"ER-40",
"ER-41",
"ER-42",
"ER-43",
"ER-44",
"ER-45",
"ER-46",
"ER-47",
"ER-48",
"ER-49",
"ER-50",
"ER-51",
"ER-52",
"ER-53",
"ER-54",
"ER-55",
"ER-56",
"ER-57",
"ER-58",
"ER-59",
"ER-60",
"F-10",
"F-11",
"F-12",
"F-13",
"F-14",
"F-15",
"F-16",
"F-17",
"F-18",
"F-19",
"F-1",
"F-20",
"F-21",
"F-22",
"F-23",
"F-24",
"F-25",
"F-26",
"F-27",
"F-28",
"F-29",
"F-2",
"F-30",
"F-31",
"F-32",
"F-33",
"F-34",
"F-35",
"F-39",
"F-40",
"F-41",
"F-42",
"F-43",
"F-44",
"F-45",
"F-46",
"F-47",
"F-49",
"F-4",
"F-50",
"F-51",
"F-52",
"F-53",
"F-54",
"F-5",
"F-6",
"F-7",
"F-8",
"F-9",
"KC-11",
"KC-13",
"KC-14",
"KC-15",
"KC-16",
"KC-18",
"KC-19",
"KC-1",
"KC-20",
"KC-22",
"KC-23",
"KC-26",
"KC-27",
"KC-28",
"KC-29",
"KC-2",
"KC-30",
"KC-32",
"KC-33",
"KC-34",
"KC-35",
"KC-36",
"KC-37",
"KC-38",
"KC-39",
"KC-3",
"KC-40",
"KC-41",
"KC-42",
"KC-43",
"KC-44",
"KC-45",
"KC-46",
"KC-47",
"KC-48",
"KC-49",
"KC-4",
"KC-50",
"KC-51",
"KC-52",
"KC-54",
"KC-55",
"KC-56",
"KC-5",
"KC-6",
"KC-7",
"KC-9",
"NYC-10",
"NYC-11",
"NYC-12",
"NYC-13",
"NYC-14",
"NYC-15",
"NYC-16",
"NYC-17",
"NYC-18",
"NYC-19",
"NYC-20",
"NYC-21",
"NYC-22",
"NYC-23",
"NYC-24",
"NYC-25",
"NYC-26",
"NYC-27",
"NYC-28",
"NYC-29",
"NYC-30",
"NYC-31",
"NYC-32",
"NYC-33",
"NYC-34",
"NYC-40",
"NYC-41",
"NYC-42",
"NYC-43",
"NYC-44",
"NYC-45",
"NYC-46",
"NYC-47",
"NYC-48",
"NYC-49",
"NYC-50",
"NYC-51",
"NYC-52",
"NYC-53",
"NYC-54",
"NYC-55",
"NYC-8",
"NYC-9",
"SH-14",
"SH-15",
"SH-16",
"SH-17",
"SH-18",
"SH-19",
"SH-1",
"SH-201",
"SH-202",
"SH-203",
"SH-204",
"SH-205",
"SH-206",
"SH-207",
"SH-208",
"SH-209",
"SH-20",
"SH-210",
"SH-211",
"SH-212",
"SH-213",
"SH-21",
"SH-22",
"SH-23",
"SH-24",
"SH-25",
"SH-26",
"SH-27",
"SH-28",
"SH-29",
"SH-2",
"SH-30",
"SH-31",
"SH-32",
"SH-33",
"SH-34",
"SH-35",
"SH-36",
"SH-37",
"SH-38",
"SH-39",
"SH-3",
"SH-40",
"SH-41",
"SH-42",
"SH-4",
"SH-5",
"SH-6",
"SH-7",
"SH-8",
"BU000004.VB_B",
"BU000005.VB_B",
"BU000006.VB_B",
"BU000007.VB_B",
"BU000008.VB_B",
"BU000012.SP",
"BU000014.SP",
"BU000017.SP",
"BU000018.SP",
"BU000023.SP",
"BU000024.SP",
"BU000025.SP",
"BU000031.SP",
"BU000032.SP",
"BU000033.SP",
"BU000035.SP",
"BU000036.SP",
"BU000037.SP",
"BU000039.SP",
"BU000041.SP",
"BU000046.SP",
"BU000048.SP",
"BU000049.SP",
"BU000052.SP",
"BU000053.SP",
"BU000054.SP",
"BU000055.SP",
"BU000056.SP",
"BU000057.SP",
"BU000062.GB",
"BU000063.GB",
"BU000064.GB",
"BU000065.GB",
"BU000066.GB",
"BU000067.GB",
"BU000068.GB",
"BU000069.GB",
"BU000070.GB",
"BU000071.GB",
"BU000072.GB",
"BU000073.GB",
"BU000074.GB",
"BU000075.GB",
"BU000076.GB",
"BU000077.GB",
"BU000078.GB",
"BU000081.GB",
"BU000082.GB",
"BU000083.GB",
"BU000084.GB",
"BU000085.GB",
"BU000086.GB",
"BU000087.GB",
"BU000088.GB",
"BU000089.GB",
"BU000090.GB",
"BU000092.GB",
"BU000093.GB",
"BU000094.GB",
"BU000095.GB",
"BU000097.GB",
"BU000100.GB",
"BU000101.GB",
"BU000102.GB",
"BU000103.GB",
"BU000104.GB",
"BU000105.GB",
"BU000106.GB",
"BU000110.GB",
"BU000116.GB",
"BU000120.GB",
"BU000121.GB",
"BU000123.GB",
"BU000124.GB",
"BU000125.GB",
"BU000126.GB",
"BU000127.GB",
"BU000129.VB_A",
"BU000130.VB_A",
"BU000131.VB_A",
"BU000132.VB_A",
"BU000133.VB_A",
"BU000134.VB_A",
"BU000135.VB_A",
"BU000136.VB_A",
"BU000137.VB_A",
"BU000138.VB_A",
"BU000139.VB_A",
"BU000140.VB_A",
"BU000141.VB_A",
"BU000142.VB_A",
"BU000144.VB_A",
"BU000145.VB_A",
"BU000148.VB_A",
"BU000149.VB_A",
"BU000150.VB_A",
"BU000151.VB_A",
"BU000152.VB_A",
"BU000153.VB_A",
"BU000155.VB_A",
"BU000157.VB_A",
"BU000158.VB_A",
"BU000160.VB_A",
"BU000161.VB_A",
"BU000164.VB_A",
"BU000165.VB_A",
"BU000166.VB_A",
"BU000167.VB_A",
"BU000168.VB_B",
"BU000169.VB_B",
"BU000170.VB_B",
"BU000171.VB_B",
"BU000172.VB_B",
"BU000173.VB_B",
"BU000174.VB_B",
"BU000175.VB_B",
"BU000176.VB_B",
"BU000177.VB_B",
"BU000178.VB_B",
"BU000179.VB_B",
"BU000180.VB_B",
"BU000182.PB_A",
"BU000183.PB_A",
"BU000184.PB_A",
"BU000185.PB_A",
"BU000186.PB_A",
"BU000187.PB_A",
"BU000188.PB_A",
"BU000190.PB_A",
"BU000191.PB_A",
"BU000192.PB_A",
"BU000193.PB_A",
"BU000194.PB_A",
"BU000195.PB_A",
"BU000196.PB_A",
"BU000197.PB_A",
"BU000198.PB_A",
"BU000199.PB_A",
"BU000200.PB_A",
"BU000201.PB_A",
"BU000202.PB_A",
"BU000203.PB_A",
"BU000204.PB_A",
"BU000205.PB_A",
"BU000206.PB_A",
"BU000207.PB_B",
"BU000209.PB_B",
"BU000210.PB_B",
"BU000211.PB_B",
"BU000212.PB_B",
"BU000213.PB_B",
"BU000214.PB_B",
"BU000215.PB_B",
"BU000217.PB_B",
"BU000219.PB_B",
"BU000223.PB_B",
"BU000225.PB_B",
"BU000226.PB_B",
"BU000227.PB_B",
"BU000228.PB_B",
"BU000229.PB_B",
"BU000230.PB_B",
"BU000231.PB_B",
"BU000233.PB_B",
"BU000234.PB_B",
"BU000235.PB_B",
"BU000237.PB_B",
"BU000242.PB_B",
"BU000244.SP",
"BU000245.SP",
"BU000246.SP",
"BU000248.SP",
"BU000249.SP",
"BU000250.SP",
"BU000252.SP",
"BU000253.SP",
"BU000254.SP",
"BU000255.SP",
"BU000256.SP",
"BU000257.SP",
"BU000259.SP",
"BU000260.SP",
"BU000261.SP",
"BU000262.SP",
"BU000263.SP",
"BU000264.SP",
"BU000265.SP",
"BU000266.SP",
"BU000269.SP",
"BU000270.SP",
"BU000271.SP",
"BU000272.SP",
"BU000318.BNP",
"BU000319.BNP",
"BU000320.BNP",
"BU000321.BNP",
"BU000322.BNP",
"BU000323.BNP",
"BU000324.BNP",
"BU000325.BNP",
"BU000326.BNP",
"BU000327.BNP",
"BU000328.BNP",
"BU000329.BNP",
"BU000330.BNP",
"BU000331.BNP",
"BU000332.BNP",
"BU000333.BNP",
"BU000334.BNP",
"BU000335.BNP",
"BU000336.BNP",
"BU000337.BNP",
"BU000338.BNP",
"BU000339.BNP",
"BU000340.BNP",
"BU000341.BNP",
"BU000343.BNP",
"BU000344.BNP",
"BU000345.BNP",
"BU000346.BNP",
"BU000347.BNP",
"BU000348.BNP",
"BU000349.BNP",
"BU000350.BNP",
"BU000351.BNP",
"BU000352.BNP",
"BU000354.BNP",
"BU000355.BNP",
"BU000356.BNP",
"BU000357.BNP",
"BU000358.BNP",
"BU000359.BNP",
"BU000361.BNP",
"BU000362.BNP",
"BU000364.BNP",
"BU000366.BNP",
"BU000367.BNP",
"BU000372.BNP",
"BU000375.BNP",
"BU000382.BB",
"BU000383.BB",
"BU000384.BB",
"BU000386.BB",
"BU000390.BB",
"BU000391.BB",
"BU000392.BB",
"BU000393.BNP",
"BU000397.BB",
"BU000398.BB",
"BU000400.BB",
"BU000402.BB",
"BU000403.BB",
"BU000405.BB",
"BU000406.BB",
"BU000407.BB",
"BU000408.BB",
"BU000409.BB",
"BU000410.BB",
"BU000411.BB",
"BU000413.BB",
"BU000414.BB",
"BU000415.BB",
"BU000416.BB",
"BU000417.BB",
"BU000418.SJSP",
"BU000419.SJSP",
"BU000420.SJSP",
"BU000421.SJSP",
"BU000424.SJSP",
"BU000425.SJSP",
"BU000426.SJSP",
"BU000427.SJSP",
"BU000431.SJSP",
"BU000432.SJSP",
"BU000433.SJSP",
"BU000439.SJSP",
"BU000441.SJSP",
"BU000442.SJSP",
"BU000443.SJSP",
"BU000444.SJSP",
"BU000446.SJSP",
"BU000449.SJSP",
"BU000451.SJSP",
"BU000454.SJSP",
"BU000458.SJSP",
"BU000460.SJSP",
"BU000463.SJSP",
"BU000464.SJSP")
lift <- read.table("fst_dxy_allpops_liftover.txt",stringsAsFactors=FALSE)
lord <- order(lift[,1],lift[,2])
lift <- lift[lord,]
# coverage in 1kb windows
gsex <- read.table("grand.sex.coverage.bed",stringsAsFactors=FALSE)
gsex[,5] <- as.numeric(gsex[,5])
gsex[,7] <- as.numeric(gsex[,7])
# change to per site coverage
gsex[,5] <- gsex[,5]/gsex[,4]
gsex[,7] <- gsex[,7]/gsex[,6]
gsex <- gsex[lord,]
# window subset
subw <- (gsex[,5]+gsex[,7] < 1000 & gsex[,5]+gsex[,7] > 8)
subw <- (gsex[,4] > 400 | gsex[,6] > 400) & gsex[,5]+gsex[,7] > 30
# average windows
gsex2 <- gsex
gsex2[which(subw),5] <- smother(gsex2[which(subw),5],2)
gsex2[which(subw),7] <- smother(gsex2[which(subw),7],2)
# get median coverages
medrat <- median(log(gsex[subw,7]/gsex[subw,5],base=2),na.rm=TRUE)
medf <- median(log(gsex[subw,7],base=2),na.rm=TRUE)
medm <- median(log(gsex[subw,5],base=2),na.rm=TRUE)
medrat2 <- median(log(gsex2[subw,7]/gsex2[subw,5],base=2),na.rm=TRUE)
# plot F/M coverage ratios, adding median coverage to each in the second plot to tamp down noise
par(mfrow=c(2,1),mar=c(0,0,0,0),oma=c(3,3,1,1))
plot(log(gsex[subw,7]/gsex[subw,5],base=2)-medrat,pch=20,cex=.2,col=factor(lift[subw,1]),ylim=c(-3,3))
plot(log((gsex[subw,7]+58.7)/(gsex[subw,5]+52.8),base=2)-medrat,pch=20,cex=.2,col=factor(lift[subw,1]),ylim=c(-3,3))
ratvec <- log(gsex[,7]/gsex[,5],base=2)-medrat
gsex[ratvec < -0.5 & subw,1] %>% table() %>% sort()
gsex[ratvec > 0.5 & subw,1] %>% table() %>% sort()
ratvec <- log((gsex2[,7]+58.7)/(gsex2[,5]+52.8),base=2)-medrat2
gsex2[ratvec < -0.25 & subw,1] %>% table() %>% sort()
# "NW_012234400.1" promising scaffold.
# both sexes have higher coverage
# male has higher coverage than female. around 50%
# both have higher coverage than expected.
# contains a BTB/POZ domain containing protein
# fhet linkage group 21 (old scaffold 10000)
subc <- gsex[,1]=="NW_012234400.1"
par(mfrow=c(2,1),mar=c(0,0,0,0),oma=c(3,3,1,1))
plot(gsex[subc,2],gsex[subc,5],pch=20,xlim=c(500000,800000))
points(gsex[subc,2],gsex[subc,7],pch=20,col="red")
plot(gsex[subc,2],log(gsex[subc,7]/gsex[subc,5],base=2)-medrat,pch=20,ylim=c(-1,1),xlim=c(500000,800000),ylab="male/female log2 fold coverage")
plot(gsex[subc,2],log(gsex[subc,5],base=2)-medm,pch=20,xlim=c(500000,800000),ylim=c(-0.5,2.5))
points(gsex[subc,2],log(gsex[subc,7],base=2)-medf,pch=20,col="red")
plot(gsex[subc,2],2^(log(gsex[subc,5],base=2)-medm),pch=20,xlim=c(500000,800000),ylim=c(0,4))
points(gsex[subc,2],2^(log(gsex[subc,7],base=2)-medf),pch=20,col="red")
subc <- gsex[,1]=="NW_012224610.1"
par(mfrow=c(2,1),mar=c(0,0,0,0),oma=c(3,3,1,1))
plot(gsex[subc,2],gsex[subc,5],pch=20,ylim=c(0,200))
points(gsex[subc,2],gsex[subc,7],pch=20,col="red")
plot(gsex[subc,2],log(gsex[subc,7]/gsex[subc,5],base=2)-medrat,pch=20,ylim=c(-1,1))
plot(gsex[subc,2],log(gsex[subc,5],base=2)-medm,pch=20,ylim=c(-0.5,2.5))
points(gsex[subc,2],log(gsex[subc,7],base=2)-medf,pch=20,col="red")
plot(gsex[subc,2],2^(log(gsex[subc,5],base=2)-medm),pch=20,ylim=c(0,4))
points(gsex[subc,2],2^(log(gsex[subc,7],base=2)-medf),pch=20,col="red")
# AMH
subc <- gsex[,1]=="NW_012234285.1"
par(mfrow=c(2,1),mar=c(0,0,0,0),oma=c(3,3,1,1))
plot(gsex[subc,2],gsex[subc,5],pch=20,xlim=c(100000,200000))
points(gsex[subc,2],gsex[subc,7],pch=20,col="red")
plot(gsex[subc,2],log(gsex[subc,7]/gsex[subc,5],base=2)-medrat,pch=20,ylim=c(-1,1),xlim=c(100000,200000),ylab="male/female log2 fold coverage")
plot(gsex[subc,2],log(gsex[subc,5],base=2)-medm,pch=20,xlim=c(100000,200000),ylim=c(-0.5,2.5))
points(gsex[subc,2],log(gsex[subc,7],base=2)-medf,pch=20,col="red")
plot(gsex[subc,2],2^(log(gsex[subc,5],base=2)-medm),pch=20,xlim=c(100000,200000),ylim=c(0,4))
points(gsex[subc,2],2^(log(gsex[subc,7],base=2)-medf),pch=20,col="red")
# read in a table of sex-sample associations
sex <- read.table("../het_grand/sexes.txt",stringsAsFactors=FALSE)
rownames(sex) <- paste(sex[,1],sex[,3],sep=".")
# read in a table of depth per site per sample
dep <- read.table("NW_012234400.1.depth.gz",stringsAsFactors=FALSE)
colnames(dep) <- c("scaffold","position",cname[grep("BU",cname)])
# throw out individuals with very low coverage
dep2 <- dep[,-c(1,2)]
dep2 <- dep2[,colSums(dep2)/dim(dep2)[1] > 0.3]
# get male and female column IDs
male <- sex[colnames(dep2),4] == "M"
female <- sex[colnames(dep2),4] == "F"
plot(dep[,2],rowSums(dep2),pch=20,cex=.2,ylim=c(0,350),xlim=c(640000,720000))
plot(dep[,2],rowSums(dep2[,male]),pch=20,cex=.2,ylim=c(0,350),xlim=c(640000,720000))
points(dep[,2],rowSums(dep2[,female]),pch=20,cex=.2,ylim=c(0,350),xlim=c(640000,720000),col="red")
subin <- (dep[,2] > 644000 & dep[,2] < 646000) | (dep[,2] > 665000 & dep[,2] < 667000) | (dep[,2] > 659000 & dep[,2] < 661000) | (dep[,2] > 652000 & dep[,2] < 653000) | (dep[,2] > 655000 & dep[,2] < 656000) | (dep[,2] > 669000 & dep[,2] < 672000)
subout <- ((dep[,2] < 638000 | dep[,2] > 640000) | (dep[,2] < 632000 | dep[,2] > 636000) | (dep[,2] < 570000 | dep[,2] > 574000))
mein <- apply(dep2[subin,],MAR=2,FUN=mean)
meout <- apply(dep2[subout,],MAR=2,FUN=mean)
me1 <- mein/meout
names(me1) <- colnames(dep2)
plot(mein/meout,col=(sex[colnames(dep2),4]=="F")+1,pch=20)
boxplot(mein/meout ~ sex[colnames(dep2),4],ylab="Fold change coverage over adjacent region",notch=TRUE)
points(jitter(rep(1,sum(sex[colnames(dep2),4]=="F")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[colnames(dep2),4]=="M")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
wilcox.test(mein/meout ~ sex[colnames(dep2),4])
# start again with new scaffold...
# it shows the same pattern as above! whoah!
# 74kb to 103kb
# don's annotation says there's a D1B receptor in here. not NCBI. otherwise, riboflavin transporters
# fhet linkage group 6, old scaffold209
# read in a table of depth per site per sample
dep <- read.table("NW_012224610.1.depth.gz",stringsAsFactors=FALSE)
colnames(dep) <- c("scaffold","position",cname[grep("BU",cname)])
# throw out individuals with very low coverage
dep2 <- dep[,-c(1,2)]
dep2 <- dep2[,colSums(dep2)/dim(dep2)[1] > 0.3]
# get male and female column IDs
male <- sex[colnames(dep2),4] == "M"
female <- sex[colnames(dep2),4] == "F"
plot(dep[,2],rowSums(dep2),pch=20,cex=.2,ylim=c(0,350),xlim=c(70000,110000))
plot(dep[,2],rowSums(dep2[,male]),pch=20,cex=.2,ylim=c(0,350),xlim=c(50000,150000))
points(dep[,2],rowSums(dep2[,female]),pch=20,cex=.2,ylim=c(0,350),col=rgb(1,0,0,.2))
subin <- (dep[,2] > 98000 & dep[,2] < 102000) | (dep[,2] > 78000 & dep[,2] < 80000)
subout <- ((dep[,2] < 110000 | dep[,2] > 118000))
mein <- apply(dep2[subin,],MAR=2,FUN=mean)
meout <- apply(dep2[subout,],MAR=2,FUN=mean)
me2 <- mein/meout
names(me2) <- colnames(dep2)
plot(mein/meout,col=(sex[colnames(dep2),4]=="M")+1,pch=20)
boxplot(mein/meout ~ sex[colnames(dep2),4],ylab="Fold change coverage over adjacent region",notch=TRUE)
points(jitter(rep(1,sum(sex[colnames(dep2),4]=="F")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[colnames(dep2),4]=="M")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
wilcox.test(mein/meout ~ sex[colnames(dep2),4])
# read in a table of depth per site per sample. AMH!!!
dep <- read.table("NW_012234285.1:140000-180000.depth.gz",stringsAsFactors=FALSE)
colnames(dep) <- c("scaffold","position",cname[grep("BU",cname)])
# throw out individuals with very low coverage
dep2 <- dep[,-c(1,2)]
dep2 <- dep2[,colSums(dep2)/dim(dep2)[1] > 0.3]
# get male and female column IDs
male <- sex[colnames(dep2),4] == "M"
female <- sex[colnames(dep2),4] == "F"
plot(dep[,2],rowSums(dep2),pch=20,cex=.2)
plot(dep[,2],rowSums(dep2[,male]),pch=20,cex=.2,xlim=c(155000,170000))
points(dep[,2],rowSums(dep2[,female]),pch=20,cex=.2,col=rgb(1,0,0,.2))
plot(dep[,2],rowSums(dep2[,male])-rowSums(dep2[,female]),pch=20,cex=.2,xlim=c(155000,170000))
subin <- (dep[,2] > 160000 & dep[,2] < 161400) | (dep[,2] > 78000 & dep[,2] < 80000)
subout <- ((dep[,2] < 150000 | dep[,2] > 159000))
mein <- apply(dep2[subin,],MAR=2,FUN=mean)
meout <- apply(dep2[subout,],MAR=2,FUN=mean)
me3 <- mein/meout
names(me3) <- colnames(dep2)
plot(mein/meout,col=(sex[colnames(dep2),4]=="M")+1,pch=20)
boxplot(mein/meout ~ sex[colnames(dep2),4],ylab="Fold change coverage over adjacent region",notch=TRUE)
points(jitter(rep(1,sum(sex[colnames(dep2),4]=="F")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[colnames(dep2),4]=="M")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
wilcox.test(mein/meout ~ sex[colnames(dep2),4])
u <- intersect(names(me1),names(me2)) %>% intersect(.,names(me3))
me1 <- me1[u]
me2 <- me2[u]
me3 <- me3[u]
plot(me1,me2,col=(sex[names(me1),4]=="F")+1,pch=20,ylab="fold coverage over expected, NW_012234400",xlab="fold coverage over expected, NW_012224610")
abline(0,1)
plot(me1,me3,col=(sex[names(me1),4]=="F")+1,pch=20,ylab="fold coverage over expected, NW_012234400",xlab="fold coverage over expected, NW_012224610")
abline(0,1)
# both scaffolds are on different heteroclitus linkage groups:
boxplot(me1 ~ sex[names(me1),4],ylab="Fold change coverage over adjacent region",notch=TRUE,ylim=c(0,5))
points(jitter(rep(1,sum(sex[names(me1),4]=="F")),amount=.05),(me1)[sex[names(me1),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[names(me1),4]=="M")),amount=.05),(me1)[sex[names(me1),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
boxplot(me2 ~ sex[names(me2),4],ylab="Fold change coverage over adjacent region",notch=TRUE,ylim=c(0,5))
points(jitter(rep(1,sum(sex[names(me2),4]=="F")),amount=.05),(me2)[sex[names(me2),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[names(me2),4]=="M")),amount=.05),(me2)[sex[names(me2),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
boxplot(me3 ~ sex[names(me3),4],ylab="Fold change coverage over adjacent region",notch=TRUE,ylim=c(0,10))
points(jitter(rep(1,sum(sex[names(me3),4]=="F")),amount=.05),(me3)[sex[names(me3),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[names(me3),4]=="M")),amount=.05),(me3)[sex[names(me3),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
me4 <- me1+me2+me3
boxplot(me3 ~ sex[names(me3),4],ylab="Fold change coverage over adjacent region",notch=TRUE,ylim=c(0,10))
points(jitter(rep(1,sum(sex[names(me3),4]=="F")),amount=.05),(me3)[sex[names(me3),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[names(me3),4]=="M")),amount=.05),(me3)[sex[names(me3),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
| /genome_stats/grandsex_coverage_analysis.R | no_license | nreid/het_grand | R | false | false | 19,051 | r | library(magrittr)
# this script analyzes depth of coverage between males and females
# in fundulus grandis. it identifies two copy-number variable regions
# that seem to be associated with sex, although not absolutely
# determinative of it.
smother <- function(x,winsize){
vec <- 1:length(x)
start <- vec - winsize; start[start < 1] <- 1
end <- vec + winsize; end[end > length(x)] <- length(x)
win <- cbind(start,end)
out <- apply(win, MAR=1,FUN=function(z){sum(x[z[1]:z[2]],na.rm=TRUE)})
return(out)
}
cname <- c(
"CHROM",
"POS",
"ID",
"REF",
"ALT",
"QUAL",
"FILTER",
"INFO",
"FORMAT",
"BP-10",
"BP-11",
"BP-12",
"BP-13",
"BP-14",
"BP-15",
"BP-16",
"BP-17",
"BP-18",
"BP-19",
"BP-1",
"BP-20",
"BP-21",
"BP-22",
"BP-23",
"BP-24",
"BP-25",
"BP-26",
"BP-27",
"BP-28",
"BP-29",
"BP-2",
"BP-30",
"BP-31",
"BP-32",
"BP-33",
"BP-34",
"BP-38",
"BP-39",
"BP-3",
"BP-40",
"BP-41",
"BP-42",
"BP-43",
"BP-44",
"BP-45",
"BP-46",
"BP-47",
"BP-48",
"BP-49",
"BP-4",
"BP-50",
"BP-51",
"BP-52",
"BP-53",
"BP-5",
"BP-6",
"BP-7",
"BP-8",
"BP-9",
"ER-11",
"ER-12",
"ER-13",
"ER-14",
"ER-15",
"ER-16",
"ER-17",
"ER-18",
"ER-19",
"ER-20",
"ER-21",
"ER-22",
"ER-23",
"ER-24",
"ER-25",
"ER-26",
"ER-27",
"ER-28",
"ER-29",
"ER-30",
"ER-31",
"ER-32",
"ER-33",
"ER-34",
"ER-35",
"ER-36",
"ER-38",
"ER-39",
"ER-40",
"ER-41",
"ER-42",
"ER-43",
"ER-44",
"ER-45",
"ER-46",
"ER-47",
"ER-48",
"ER-49",
"ER-50",
"ER-51",
"ER-52",
"ER-53",
"ER-54",
"ER-55",
"ER-56",
"ER-57",
"ER-58",
"ER-59",
"ER-60",
"F-10",
"F-11",
"F-12",
"F-13",
"F-14",
"F-15",
"F-16",
"F-17",
"F-18",
"F-19",
"F-1",
"F-20",
"F-21",
"F-22",
"F-23",
"F-24",
"F-25",
"F-26",
"F-27",
"F-28",
"F-29",
"F-2",
"F-30",
"F-31",
"F-32",
"F-33",
"F-34",
"F-35",
"F-39",
"F-40",
"F-41",
"F-42",
"F-43",
"F-44",
"F-45",
"F-46",
"F-47",
"F-49",
"F-4",
"F-50",
"F-51",
"F-52",
"F-53",
"F-54",
"F-5",
"F-6",
"F-7",
"F-8",
"F-9",
"KC-11",
"KC-13",
"KC-14",
"KC-15",
"KC-16",
"KC-18",
"KC-19",
"KC-1",
"KC-20",
"KC-22",
"KC-23",
"KC-26",
"KC-27",
"KC-28",
"KC-29",
"KC-2",
"KC-30",
"KC-32",
"KC-33",
"KC-34",
"KC-35",
"KC-36",
"KC-37",
"KC-38",
"KC-39",
"KC-3",
"KC-40",
"KC-41",
"KC-42",
"KC-43",
"KC-44",
"KC-45",
"KC-46",
"KC-47",
"KC-48",
"KC-49",
"KC-4",
"KC-50",
"KC-51",
"KC-52",
"KC-54",
"KC-55",
"KC-56",
"KC-5",
"KC-6",
"KC-7",
"KC-9",
"NYC-10",
"NYC-11",
"NYC-12",
"NYC-13",
"NYC-14",
"NYC-15",
"NYC-16",
"NYC-17",
"NYC-18",
"NYC-19",
"NYC-20",
"NYC-21",
"NYC-22",
"NYC-23",
"NYC-24",
"NYC-25",
"NYC-26",
"NYC-27",
"NYC-28",
"NYC-29",
"NYC-30",
"NYC-31",
"NYC-32",
"NYC-33",
"NYC-34",
"NYC-40",
"NYC-41",
"NYC-42",
"NYC-43",
"NYC-44",
"NYC-45",
"NYC-46",
"NYC-47",
"NYC-48",
"NYC-49",
"NYC-50",
"NYC-51",
"NYC-52",
"NYC-53",
"NYC-54",
"NYC-55",
"NYC-8",
"NYC-9",
"SH-14",
"SH-15",
"SH-16",
"SH-17",
"SH-18",
"SH-19",
"SH-1",
"SH-201",
"SH-202",
"SH-203",
"SH-204",
"SH-205",
"SH-206",
"SH-207",
"SH-208",
"SH-209",
"SH-20",
"SH-210",
"SH-211",
"SH-212",
"SH-213",
"SH-21",
"SH-22",
"SH-23",
"SH-24",
"SH-25",
"SH-26",
"SH-27",
"SH-28",
"SH-29",
"SH-2",
"SH-30",
"SH-31",
"SH-32",
"SH-33",
"SH-34",
"SH-35",
"SH-36",
"SH-37",
"SH-38",
"SH-39",
"SH-3",
"SH-40",
"SH-41",
"SH-42",
"SH-4",
"SH-5",
"SH-6",
"SH-7",
"SH-8",
"BU000004.VB_B",
"BU000005.VB_B",
"BU000006.VB_B",
"BU000007.VB_B",
"BU000008.VB_B",
"BU000012.SP",
"BU000014.SP",
"BU000017.SP",
"BU000018.SP",
"BU000023.SP",
"BU000024.SP",
"BU000025.SP",
"BU000031.SP",
"BU000032.SP",
"BU000033.SP",
"BU000035.SP",
"BU000036.SP",
"BU000037.SP",
"BU000039.SP",
"BU000041.SP",
"BU000046.SP",
"BU000048.SP",
"BU000049.SP",
"BU000052.SP",
"BU000053.SP",
"BU000054.SP",
"BU000055.SP",
"BU000056.SP",
"BU000057.SP",
"BU000062.GB",
"BU000063.GB",
"BU000064.GB",
"BU000065.GB",
"BU000066.GB",
"BU000067.GB",
"BU000068.GB",
"BU000069.GB",
"BU000070.GB",
"BU000071.GB",
"BU000072.GB",
"BU000073.GB",
"BU000074.GB",
"BU000075.GB",
"BU000076.GB",
"BU000077.GB",
"BU000078.GB",
"BU000081.GB",
"BU000082.GB",
"BU000083.GB",
"BU000084.GB",
"BU000085.GB",
"BU000086.GB",
"BU000087.GB",
"BU000088.GB",
"BU000089.GB",
"BU000090.GB",
"BU000092.GB",
"BU000093.GB",
"BU000094.GB",
"BU000095.GB",
"BU000097.GB",
"BU000100.GB",
"BU000101.GB",
"BU000102.GB",
"BU000103.GB",
"BU000104.GB",
"BU000105.GB",
"BU000106.GB",
"BU000110.GB",
"BU000116.GB",
"BU000120.GB",
"BU000121.GB",
"BU000123.GB",
"BU000124.GB",
"BU000125.GB",
"BU000126.GB",
"BU000127.GB",
"BU000129.VB_A",
"BU000130.VB_A",
"BU000131.VB_A",
"BU000132.VB_A",
"BU000133.VB_A",
"BU000134.VB_A",
"BU000135.VB_A",
"BU000136.VB_A",
"BU000137.VB_A",
"BU000138.VB_A",
"BU000139.VB_A",
"BU000140.VB_A",
"BU000141.VB_A",
"BU000142.VB_A",
"BU000144.VB_A",
"BU000145.VB_A",
"BU000148.VB_A",
"BU000149.VB_A",
"BU000150.VB_A",
"BU000151.VB_A",
"BU000152.VB_A",
"BU000153.VB_A",
"BU000155.VB_A",
"BU000157.VB_A",
"BU000158.VB_A",
"BU000160.VB_A",
"BU000161.VB_A",
"BU000164.VB_A",
"BU000165.VB_A",
"BU000166.VB_A",
"BU000167.VB_A",
"BU000168.VB_B",
"BU000169.VB_B",
"BU000170.VB_B",
"BU000171.VB_B",
"BU000172.VB_B",
"BU000173.VB_B",
"BU000174.VB_B",
"BU000175.VB_B",
"BU000176.VB_B",
"BU000177.VB_B",
"BU000178.VB_B",
"BU000179.VB_B",
"BU000180.VB_B",
"BU000182.PB_A",
"BU000183.PB_A",
"BU000184.PB_A",
"BU000185.PB_A",
"BU000186.PB_A",
"BU000187.PB_A",
"BU000188.PB_A",
"BU000190.PB_A",
"BU000191.PB_A",
"BU000192.PB_A",
"BU000193.PB_A",
"BU000194.PB_A",
"BU000195.PB_A",
"BU000196.PB_A",
"BU000197.PB_A",
"BU000198.PB_A",
"BU000199.PB_A",
"BU000200.PB_A",
"BU000201.PB_A",
"BU000202.PB_A",
"BU000203.PB_A",
"BU000204.PB_A",
"BU000205.PB_A",
"BU000206.PB_A",
"BU000207.PB_B",
"BU000209.PB_B",
"BU000210.PB_B",
"BU000211.PB_B",
"BU000212.PB_B",
"BU000213.PB_B",
"BU000214.PB_B",
"BU000215.PB_B",
"BU000217.PB_B",
"BU000219.PB_B",
"BU000223.PB_B",
"BU000225.PB_B",
"BU000226.PB_B",
"BU000227.PB_B",
"BU000228.PB_B",
"BU000229.PB_B",
"BU000230.PB_B",
"BU000231.PB_B",
"BU000233.PB_B",
"BU000234.PB_B",
"BU000235.PB_B",
"BU000237.PB_B",
"BU000242.PB_B",
"BU000244.SP",
"BU000245.SP",
"BU000246.SP",
"BU000248.SP",
"BU000249.SP",
"BU000250.SP",
"BU000252.SP",
"BU000253.SP",
"BU000254.SP",
"BU000255.SP",
"BU000256.SP",
"BU000257.SP",
"BU000259.SP",
"BU000260.SP",
"BU000261.SP",
"BU000262.SP",
"BU000263.SP",
"BU000264.SP",
"BU000265.SP",
"BU000266.SP",
"BU000269.SP",
"BU000270.SP",
"BU000271.SP",
"BU000272.SP",
"BU000318.BNP",
"BU000319.BNP",
"BU000320.BNP",
"BU000321.BNP",
"BU000322.BNP",
"BU000323.BNP",
"BU000324.BNP",
"BU000325.BNP",
"BU000326.BNP",
"BU000327.BNP",
"BU000328.BNP",
"BU000329.BNP",
"BU000330.BNP",
"BU000331.BNP",
"BU000332.BNP",
"BU000333.BNP",
"BU000334.BNP",
"BU000335.BNP",
"BU000336.BNP",
"BU000337.BNP",
"BU000338.BNP",
"BU000339.BNP",
"BU000340.BNP",
"BU000341.BNP",
"BU000343.BNP",
"BU000344.BNP",
"BU000345.BNP",
"BU000346.BNP",
"BU000347.BNP",
"BU000348.BNP",
"BU000349.BNP",
"BU000350.BNP",
"BU000351.BNP",
"BU000352.BNP",
"BU000354.BNP",
"BU000355.BNP",
"BU000356.BNP",
"BU000357.BNP",
"BU000358.BNP",
"BU000359.BNP",
"BU000361.BNP",
"BU000362.BNP",
"BU000364.BNP",
"BU000366.BNP",
"BU000367.BNP",
"BU000372.BNP",
"BU000375.BNP",
"BU000382.BB",
"BU000383.BB",
"BU000384.BB",
"BU000386.BB",
"BU000390.BB",
"BU000391.BB",
"BU000392.BB",
"BU000393.BNP",
"BU000397.BB",
"BU000398.BB",
"BU000400.BB",
"BU000402.BB",
"BU000403.BB",
"BU000405.BB",
"BU000406.BB",
"BU000407.BB",
"BU000408.BB",
"BU000409.BB",
"BU000410.BB",
"BU000411.BB",
"BU000413.BB",
"BU000414.BB",
"BU000415.BB",
"BU000416.BB",
"BU000417.BB",
"BU000418.SJSP",
"BU000419.SJSP",
"BU000420.SJSP",
"BU000421.SJSP",
"BU000424.SJSP",
"BU000425.SJSP",
"BU000426.SJSP",
"BU000427.SJSP",
"BU000431.SJSP",
"BU000432.SJSP",
"BU000433.SJSP",
"BU000439.SJSP",
"BU000441.SJSP",
"BU000442.SJSP",
"BU000443.SJSP",
"BU000444.SJSP",
"BU000446.SJSP",
"BU000449.SJSP",
"BU000451.SJSP",
"BU000454.SJSP",
"BU000458.SJSP",
"BU000460.SJSP",
"BU000463.SJSP",
"BU000464.SJSP")
lift <- read.table("fst_dxy_allpops_liftover.txt",stringsAsFactors=FALSE)
lord <- order(lift[,1],lift[,2])
lift <- lift[lord,]
# coverage in 1kb windows
gsex <- read.table("grand.sex.coverage.bed",stringsAsFactors=FALSE)
gsex[,5] <- as.numeric(gsex[,5])
gsex[,7] <- as.numeric(gsex[,7])
# change to per site coverage
gsex[,5] <- gsex[,5]/gsex[,4]
gsex[,7] <- gsex[,7]/gsex[,6]
gsex <- gsex[lord,]
# window subset
subw <- (gsex[,5]+gsex[,7] < 1000 & gsex[,5]+gsex[,7] > 8)
subw <- (gsex[,4] > 400 | gsex[,6] > 400) & gsex[,5]+gsex[,7] > 30
# average windows
gsex2 <- gsex
gsex2[which(subw),5] <- smother(gsex2[which(subw),5],2)
gsex2[which(subw),7] <- smother(gsex2[which(subw),7],2)
# get median coverages
medrat <- median(log(gsex[subw,7]/gsex[subw,5],base=2),na.rm=TRUE)
medf <- median(log(gsex[subw,7],base=2),na.rm=TRUE)
medm <- median(log(gsex[subw,5],base=2),na.rm=TRUE)
medrat2 <- median(log(gsex2[subw,7]/gsex2[subw,5],base=2),na.rm=TRUE)
# plot F/M coverage ratios, adding median coverage to each in the second plot to tamp down noise
par(mfrow=c(2,1),mar=c(0,0,0,0),oma=c(3,3,1,1))
plot(log(gsex[subw,7]/gsex[subw,5],base=2)-medrat,pch=20,cex=.2,col=factor(lift[subw,1]),ylim=c(-3,3))
plot(log((gsex[subw,7]+58.7)/(gsex[subw,5]+52.8),base=2)-medrat,pch=20,cex=.2,col=factor(lift[subw,1]),ylim=c(-3,3))
ratvec <- log(gsex[,7]/gsex[,5],base=2)-medrat
gsex[ratvec < -0.5 & subw,1] %>% table() %>% sort()
gsex[ratvec > 0.5 & subw,1] %>% table() %>% sort()
ratvec <- log((gsex2[,7]+58.7)/(gsex2[,5]+52.8),base=2)-medrat2
gsex2[ratvec < -0.25 & subw,1] %>% table() %>% sort()
# "NW_012234400.1" promising scaffold.
# both sexes have higher coverage
# male has higher coverage than female. around 50%
# both have higher coverage than expected.
# contains a BTB/POZ domain containing protein
# fhet linkage group 21 (old scaffold 10000)
subc <- gsex[,1]=="NW_012234400.1"
par(mfrow=c(2,1),mar=c(0,0,0,0),oma=c(3,3,1,1))
plot(gsex[subc,2],gsex[subc,5],pch=20,xlim=c(500000,800000))
points(gsex[subc,2],gsex[subc,7],pch=20,col="red")
plot(gsex[subc,2],log(gsex[subc,7]/gsex[subc,5],base=2)-medrat,pch=20,ylim=c(-1,1),xlim=c(500000,800000),ylab="male/female log2 fold coverage")
plot(gsex[subc,2],log(gsex[subc,5],base=2)-medm,pch=20,xlim=c(500000,800000),ylim=c(-0.5,2.5))
points(gsex[subc,2],log(gsex[subc,7],base=2)-medf,pch=20,col="red")
plot(gsex[subc,2],2^(log(gsex[subc,5],base=2)-medm),pch=20,xlim=c(500000,800000),ylim=c(0,4))
points(gsex[subc,2],2^(log(gsex[subc,7],base=2)-medf),pch=20,col="red")
subc <- gsex[,1]=="NW_012224610.1"
par(mfrow=c(2,1),mar=c(0,0,0,0),oma=c(3,3,1,1))
plot(gsex[subc,2],gsex[subc,5],pch=20,ylim=c(0,200))
points(gsex[subc,2],gsex[subc,7],pch=20,col="red")
plot(gsex[subc,2],log(gsex[subc,7]/gsex[subc,5],base=2)-medrat,pch=20,ylim=c(-1,1))
plot(gsex[subc,2],log(gsex[subc,5],base=2)-medm,pch=20,ylim=c(-0.5,2.5))
points(gsex[subc,2],log(gsex[subc,7],base=2)-medf,pch=20,col="red")
plot(gsex[subc,2],2^(log(gsex[subc,5],base=2)-medm),pch=20,ylim=c(0,4))
points(gsex[subc,2],2^(log(gsex[subc,7],base=2)-medf),pch=20,col="red")
# AMH
subc <- gsex[,1]=="NW_012234285.1"
par(mfrow=c(2,1),mar=c(0,0,0,0),oma=c(3,3,1,1))
plot(gsex[subc,2],gsex[subc,5],pch=20,xlim=c(100000,200000))
points(gsex[subc,2],gsex[subc,7],pch=20,col="red")
plot(gsex[subc,2],log(gsex[subc,7]/gsex[subc,5],base=2)-medrat,pch=20,ylim=c(-1,1),xlim=c(100000,200000),ylab="male/female log2 fold coverage")
plot(gsex[subc,2],log(gsex[subc,5],base=2)-medm,pch=20,xlim=c(100000,200000),ylim=c(-0.5,2.5))
points(gsex[subc,2],log(gsex[subc,7],base=2)-medf,pch=20,col="red")
plot(gsex[subc,2],2^(log(gsex[subc,5],base=2)-medm),pch=20,xlim=c(100000,200000),ylim=c(0,4))
points(gsex[subc,2],2^(log(gsex[subc,7],base=2)-medf),pch=20,col="red")
# read in a table of sex-sample associations
sex <- read.table("../het_grand/sexes.txt",stringsAsFactors=FALSE)
rownames(sex) <- paste(sex[,1],sex[,3],sep=".")
# read in a table of depth per site per sample
dep <- read.table("NW_012234400.1.depth.gz",stringsAsFactors=FALSE)
colnames(dep) <- c("scaffold","position",cname[grep("BU",cname)])
# throw out individuals with very low coverage
dep2 <- dep[,-c(1,2)]
dep2 <- dep2[,colSums(dep2)/dim(dep2)[1] > 0.3]
# get male and female column IDs
male <- sex[colnames(dep2),4] == "M"
female <- sex[colnames(dep2),4] == "F"
plot(dep[,2],rowSums(dep2),pch=20,cex=.2,ylim=c(0,350),xlim=c(640000,720000))
plot(dep[,2],rowSums(dep2[,male]),pch=20,cex=.2,ylim=c(0,350),xlim=c(640000,720000))
points(dep[,2],rowSums(dep2[,female]),pch=20,cex=.2,ylim=c(0,350),xlim=c(640000,720000),col="red")
subin <- (dep[,2] > 644000 & dep[,2] < 646000) | (dep[,2] > 665000 & dep[,2] < 667000) | (dep[,2] > 659000 & dep[,2] < 661000) | (dep[,2] > 652000 & dep[,2] < 653000) | (dep[,2] > 655000 & dep[,2] < 656000) | (dep[,2] > 669000 & dep[,2] < 672000)
subout <- ((dep[,2] < 638000 | dep[,2] > 640000) | (dep[,2] < 632000 | dep[,2] > 636000) | (dep[,2] < 570000 | dep[,2] > 574000))
mein <- apply(dep2[subin,],MAR=2,FUN=mean)
meout <- apply(dep2[subout,],MAR=2,FUN=mean)
me1 <- mein/meout
names(me1) <- colnames(dep2)
plot(mein/meout,col=(sex[colnames(dep2),4]=="F")+1,pch=20)
boxplot(mein/meout ~ sex[colnames(dep2),4],ylab="Fold change coverage over adjacent region",notch=TRUE)
points(jitter(rep(1,sum(sex[colnames(dep2),4]=="F")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[colnames(dep2),4]=="M")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
wilcox.test(mein/meout ~ sex[colnames(dep2),4])
# start again with new scaffold...
# it shows the same pattern as above! whoah!
# 74kb to 103kb
# don's annotation says there's a D1B receptor in here. not NCBI. otherwise, riboflavin transporters
# fhet linkage group 6, old scaffold209
# read in a table of depth per site per sample
dep <- read.table("NW_012224610.1.depth.gz",stringsAsFactors=FALSE)
colnames(dep) <- c("scaffold","position",cname[grep("BU",cname)])
# throw out individuals with very low coverage
dep2 <- dep[,-c(1,2)]
dep2 <- dep2[,colSums(dep2)/dim(dep2)[1] > 0.3]
# get male and female column IDs
male <- sex[colnames(dep2),4] == "M"
female <- sex[colnames(dep2),4] == "F"
plot(dep[,2],rowSums(dep2),pch=20,cex=.2,ylim=c(0,350),xlim=c(70000,110000))
plot(dep[,2],rowSums(dep2[,male]),pch=20,cex=.2,ylim=c(0,350),xlim=c(50000,150000))
points(dep[,2],rowSums(dep2[,female]),pch=20,cex=.2,ylim=c(0,350),col=rgb(1,0,0,.2))
subin <- (dep[,2] > 98000 & dep[,2] < 102000) | (dep[,2] > 78000 & dep[,2] < 80000)
subout <- ((dep[,2] < 110000 | dep[,2] > 118000))
mein <- apply(dep2[subin,],MAR=2,FUN=mean)
meout <- apply(dep2[subout,],MAR=2,FUN=mean)
me2 <- mein/meout
names(me2) <- colnames(dep2)
plot(mein/meout,col=(sex[colnames(dep2),4]=="M")+1,pch=20)
boxplot(mein/meout ~ sex[colnames(dep2),4],ylab="Fold change coverage over adjacent region",notch=TRUE)
points(jitter(rep(1,sum(sex[colnames(dep2),4]=="F")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[colnames(dep2),4]=="M")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
wilcox.test(mein/meout ~ sex[colnames(dep2),4])
# read in a table of depth per site per sample. AMH!!!
dep <- read.table("NW_012234285.1:140000-180000.depth.gz",stringsAsFactors=FALSE)
colnames(dep) <- c("scaffold","position",cname[grep("BU",cname)])
# throw out individuals with very low coverage
dep2 <- dep[,-c(1,2)]
dep2 <- dep2[,colSums(dep2)/dim(dep2)[1] > 0.3]
# get male and female column IDs
male <- sex[colnames(dep2),4] == "M"
female <- sex[colnames(dep2),4] == "F"
plot(dep[,2],rowSums(dep2),pch=20,cex=.2)
plot(dep[,2],rowSums(dep2[,male]),pch=20,cex=.2,xlim=c(155000,170000))
points(dep[,2],rowSums(dep2[,female]),pch=20,cex=.2,col=rgb(1,0,0,.2))
plot(dep[,2],rowSums(dep2[,male])-rowSums(dep2[,female]),pch=20,cex=.2,xlim=c(155000,170000))
subin <- (dep[,2] > 160000 & dep[,2] < 161400) | (dep[,2] > 78000 & dep[,2] < 80000)
subout <- ((dep[,2] < 150000 | dep[,2] > 159000))
mein <- apply(dep2[subin,],MAR=2,FUN=mean)
meout <- apply(dep2[subout,],MAR=2,FUN=mean)
me3 <- mein/meout
names(me3) <- colnames(dep2)
plot(mein/meout,col=(sex[colnames(dep2),4]=="M")+1,pch=20)
boxplot(mein/meout ~ sex[colnames(dep2),4],ylab="Fold change coverage over adjacent region",notch=TRUE)
points(jitter(rep(1,sum(sex[colnames(dep2),4]=="F")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[colnames(dep2),4]=="M")),amount=.05),(mein/meout)[sex[colnames(dep2),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
wilcox.test(mein/meout ~ sex[colnames(dep2),4])
u <- intersect(names(me1),names(me2)) %>% intersect(.,names(me3))
me1 <- me1[u]
me2 <- me2[u]
me3 <- me3[u]
plot(me1,me2,col=(sex[names(me1),4]=="F")+1,pch=20,ylab="fold coverage over expected, NW_012234400",xlab="fold coverage over expected, NW_012224610")
abline(0,1)
plot(me1,me3,col=(sex[names(me1),4]=="F")+1,pch=20,ylab="fold coverage over expected, NW_012234400",xlab="fold coverage over expected, NW_012224610")
abline(0,1)
# both scaffolds are on different heteroclitus linkage groups:
boxplot(me1 ~ sex[names(me1),4],ylab="Fold change coverage over adjacent region",notch=TRUE,ylim=c(0,5))
points(jitter(rep(1,sum(sex[names(me1),4]=="F")),amount=.05),(me1)[sex[names(me1),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[names(me1),4]=="M")),amount=.05),(me1)[sex[names(me1),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
boxplot(me2 ~ sex[names(me2),4],ylab="Fold change coverage over adjacent region",notch=TRUE,ylim=c(0,5))
points(jitter(rep(1,sum(sex[names(me2),4]=="F")),amount=.05),(me2)[sex[names(me2),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[names(me2),4]=="M")),amount=.05),(me2)[sex[names(me2),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
boxplot(me3 ~ sex[names(me3),4],ylab="Fold change coverage over adjacent region",notch=TRUE,ylim=c(0,10))
points(jitter(rep(1,sum(sex[names(me3),4]=="F")),amount=.05),(me3)[sex[names(me3),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[names(me3),4]=="M")),amount=.05),(me3)[sex[names(me3),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
me4 <- me1+me2+me3
boxplot(me3 ~ sex[names(me3),4],ylab="Fold change coverage over adjacent region",notch=TRUE,ylim=c(0,10))
points(jitter(rep(1,sum(sex[names(me3),4]=="F")),amount=.05),(me3)[sex[names(me3),4]=="F"],pch=20,cex=1,col=rgb(0,0,0,.3))
points(jitter(rep(2,sum(sex[names(me3),4]=="M")),amount=.05),(me3)[sex[names(me3),4]=="M"],pch=20,cex=1,col=rgb(0,0,0,.3))
|
#' Bump labels up, but drops some if too crammed
#'
#' Label positioning strategy similar to \code{\link{bumpup}}, but with a cap
#' on the maximum vertical displacement that can be applied to a label. If
#' the required displacement overshoots the cap, the label is dropped. This
#' creates an effect similar to the labelling strategy seen in Tableau,
#' except with better results.
#'
#' @param d the directlabels positioning dataframe.
#'
#' @param max_bump the maximum bump factor allowed before a label is dropped.
#' Expressed as a factor of the label's height.
#'
#' @export
bumpup_bounded <- function(d, max_bump = 1.1, ...) {
d <- calc.boxes(d)[order(d$y),]
# keeps track of
# 1. cumulative nudge
cumulative_nudge <- 0
# 2. dropped labels
dropped <- c()
# 3. index of lowest undropped label
j <- 1
# scans from the bottom up
# TODO this does not check if boxes overlap horizontally or not. Like bumpup,
# we have to check for that and leave them alone in these cases.
for (i in 2:nrow(d)) {
nudge_limit <- d$h[i] * max_bump
nudge <- min(d$bottom[i] - d$top[j], 0)
# No overlap, nothing to do.
if (nudge == 0) {
j <- i
next
}
# If the cumulative nudge gets too large, drops the current label to
# try and make room.
if (cumulative_nudge + abs(nudge) > nudge_limit) {
dropped <- c(dropped, i)
next
}
# Otherwise, we've got room. Applies nudge.
d$bottom[i] <- d$bottom[i] - nudge
d$top[i] <- d$top[i] - nudge
d$y[i] <- d$y[i] - nudge
j <- i
cumulative_nudge <- cumulative_nudge + abs(nudge)
}
d[!(1:nrow(d) %in% dropped),]
}
| /R/directlabels.R | no_license | gmega/playaxr | R | false | false | 1,671 | r | #' Bump labels up, but drops some if too crammed
#'
#' Label positioning strategy similar to \code{\link{bumpup}}, but with a cap
#' on the maximum vertical displacement that can be applied to a label. If
#' the required displacement overshoots the cap, the label is dropped. This
#' creates an effect similar to the labelling strategy seen in Tableau,
#' except with better results.
#'
#' @param d the directlabels positioning dataframe.
#'
#' @param max_bump the maximum bump factor allowed before a label is dropped.
#' Expressed as a factor of the label's height.
#'
#' @export
bumpup_bounded <- function(d, max_bump = 1.1, ...) {
d <- calc.boxes(d)[order(d$y),]
# keeps track of
# 1. cumulative nudge
cumulative_nudge <- 0
# 2. dropped labels
dropped <- c()
# 3. index of lowest undropped label
j <- 1
# scans from the bottom up
# TODO this does not check if boxes overlap horizontally or not. Like bumpup,
# we have to check for that and leave them alone in these cases.
for (i in 2:nrow(d)) {
nudge_limit <- d$h[i] * max_bump
nudge <- min(d$bottom[i] - d$top[j], 0)
# No overlap, nothing to do.
if (nudge == 0) {
j <- i
next
}
# If the cumulative nudge gets too large, drops the current label to
# try and make room.
if (cumulative_nudge + abs(nudge) > nudge_limit) {
dropped <- c(dropped, i)
next
}
# Otherwise, we've got room. Applies nudge.
d$bottom[i] <- d$bottom[i] - nudge
d$top[i] <- d$top[i] - nudge
d$y[i] <- d$y[i] - nudge
j <- i
cumulative_nudge <- cumulative_nudge + abs(nudge)
}
d[!(1:nrow(d) %in% dropped),]
}
|
data(iris)
library(randomForest)
#cross validation
partition <- function(numRow,seed) {
set.seed(seed)
index <- sample(x = numRow,size = trunc(numRow*(2/3)),replace = FALSE)
}
trainIndex <- partition(numRow = nrow(iris),seed = 123)
training <- train[trainIndex,]
testing <- train[-trainIndex,]
model <- randomForest(training$Sepal.Length ~ ., data = training, ntree = 100 )
temp <- predict(model, testing[,-1])
#K fold cross validation
predicted <- vector(mode = "numeric",length = nrow(iris))
actual <- vector(mode = "numeric",length = nrow(iris))
k <- 10
iris <- iris[sample(x = nrow(iris),size = nrow(iris),replace = FALSE),]
folds <- cut(seq(1,nrow(iris)),breaks=k,labels=FALSE)
for(i in 1:k){
testIndex <- which(folds == i)
testing <- iris[testIndex,]
training <- iris[-testIndex,]
model <- randomForest(training$Sepal.Length ~ ., data = training, ntree = 100 )
temp <- as.data.frame(predict(model, testing[,-1]))
predicted <- c(predicted, temp)
actual <- c(actual, testing[,1])
}
accuracy <- summary(abs(actual - predicted))
#leave one out validation
#k <- nrow(iris) | /crossValidation.R | no_license | jhashanti/Machine-Learning-with-R | R | false | false | 1,204 | r | data(iris)
library(randomForest)
#cross validation
partition <- function(numRow,seed) {
set.seed(seed)
index <- sample(x = numRow,size = trunc(numRow*(2/3)),replace = FALSE)
}
trainIndex <- partition(numRow = nrow(iris),seed = 123)
training <- train[trainIndex,]
testing <- train[-trainIndex,]
model <- randomForest(training$Sepal.Length ~ ., data = training, ntree = 100 )
temp <- predict(model, testing[,-1])
#K fold cross validation
predicted <- vector(mode = "numeric",length = nrow(iris))
actual <- vector(mode = "numeric",length = nrow(iris))
k <- 10
iris <- iris[sample(x = nrow(iris),size = nrow(iris),replace = FALSE),]
folds <- cut(seq(1,nrow(iris)),breaks=k,labels=FALSE)
for(i in 1:k){
testIndex <- which(folds == i)
testing <- iris[testIndex,]
training <- iris[-testIndex,]
model <- randomForest(training$Sepal.Length ~ ., data = training, ntree = 100 )
temp <- as.data.frame(predict(model, testing[,-1]))
predicted <- c(predicted, temp)
actual <- c(actual, testing[,1])
}
accuracy <- summary(abs(actual - predicted))
#leave one out validation
#k <- nrow(iris) |
\name{demInterpolation}
\Rdversion{1.0}
\alias{demInterpolation}
\title{Gaussian Process Interpolation Demo}
\description{
Plots, consecutively, an increasing number of data points, followed
by an interpolated fit through the data points using a Gaussian process.
This is a noiseless system, and the data is sampled from a GP with a
known covariance function. The curve is then recovered with minimal
uncertainty after only nine data points are included.
}
\usage{
demInterpolation(path=getwd(), filename='demInterpolation', png=FALSE, gif=FALSE)
}
\arguments{
\item{path}{path where the plot images are saved.}
\item{filename}{name of saved images.}
\item{png}{save image as png.}
\item{gif}{save series of images as animated gif.}
}
\seealso{
\code{
\link{gpOptions}, \link{kernCreate}, \link{kernCompute},
\link{gaussSamp}, \link{kernDiagCompute}, \link{gpCreate}, \link{gpPlot}.
}
}
\keyword{model}
| /man/demInterpolation.Rd | no_license | SheffieldML/gptk | R | false | false | 936 | rd | \name{demInterpolation}
\Rdversion{1.0}
\alias{demInterpolation}
\title{Gaussian Process Interpolation Demo}
\description{
Plots, consecutively, an increasing number of data points, followed
by an interpolated fit through the data points using a Gaussian process.
This is a noiseless system, and the data is sampled from a GP with a
known covariance function. The curve is then recovered with minimal
uncertainty after only nine data points are included.
}
\usage{
demInterpolation(path=getwd(), filename='demInterpolation', png=FALSE, gif=FALSE)
}
\arguments{
\item{path}{path where the plot images are saved.}
\item{filename}{name of saved images.}
\item{png}{save image as png.}
\item{gif}{save series of images as animated gif.}
}
\seealso{
\code{
\link{gpOptions}, \link{kernCreate}, \link{kernCompute},
\link{gaussSamp}, \link{kernDiagCompute}, \link{gpCreate}, \link{gpPlot}.
}
}
\keyword{model}
|
# @file GetDefaultCovariates.R
#
# Copyright 2017 Observational Health Data Sciences and Informatics
#
# This file is part of FeatureExtraction
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Get default covariate information from the database
#'
#' @description
#' Constructs a large default set of covariates for one or more cohorts using data in the CDM schema.
#' Includes covariates for all drugs, drug classes, condition, condition classes, procedures,
#' observations, etc.
#'
#' @param covariateSettings An object of type \code{defaultCovariateSettings} as created using the
#' \code{\link{createCovariateSettings}} function.
#'
#' @template GetCovarParams
#'
#' @export
getDbDefaultCovariateData <- function(connection,
oracleTempSchema = NULL,
cdmDatabaseSchema,
cdmVersion = "4",
cohortTempTable = "cohort_person",
rowIdField = "subject_id",
covariateSettings) {
if (substr(cohortTempTable, 1, 1) != "#") {
cohortTempTable <- paste("#", cohortTempTable, sep = "")
}
if (!covariateSettings$useCovariateConditionGroupMeddra & !covariateSettings$useCovariateConditionGroupSnomed) {
covariateSettings$useCovariateConditionGroup <- FALSE
}
if (cdmVersion == "4") {
cohortDefinitionId <- "cohort_concept_id"
conceptClassId <- "concept_class"
measurement <- "observation"
} else {
cohortDefinitionId <- "cohort_definition_id"
conceptClassId <- "concept_class_id"
measurement <- "measurement"
}
if (is.null(covariateSettings$excludedCovariateConceptIds) || length(covariateSettings$excludedCovariateConceptIds) ==
0) {
hasExcludedCovariateConceptIds <- FALSE
} else {
if (!is.numeric(covariateSettings$excludedCovariateConceptIds))
stop("excludedCovariateConceptIds must be a (vector of) numeric")
hasExcludedCovariateConceptIds <- TRUE
DatabaseConnector::insertTable(connection,
tableName = "#excluded_cov",
data = data.frame(concept_id = as.integer(covariateSettings$excludedCovariateConceptIds)),
dropTableIfExists = TRUE,
createTable = TRUE,
tempTable = TRUE,
oracleTempSchema = oracleTempSchema)
if (!is.null(covariateSettings$addDescendantsToExclude) && covariateSettings$addDescendantsToExclude) {
writeLines("Adding descendants to concepts to exclude")
sql <- SqlRender::loadRenderTranslateSql(sqlFilename = "IncludeDescendants.sql",
packageName = "FeatureExtraction",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
table_name = "#excluded_cov")
DatabaseConnector::executeSql(connection, sql, progressBar = FALSE, reportOverallTime = FALSE)
}
}
if (is.null(covariateSettings$includedCovariateConceptIds) || length(covariateSettings$includedCovariateConceptIds) ==
0) {
hasIncludedCovariateConceptIds <- FALSE
} else {
if (!is.numeric(covariateSettings$includedCovariateConceptIds))
stop("includedCovariateConceptIds must be a (vector of) numeric")
hasIncludedCovariateConceptIds <- TRUE
DatabaseConnector::insertTable(connection,
tableName = "#included_cov",
data = data.frame(concept_id = as.integer(covariateSettings$includedCovariateConceptIds)),
dropTableIfExists = TRUE,
createTable = TRUE,
tempTable = TRUE,
oracleTempSchema = oracleTempSchema)
if (!is.null(covariateSettings$addDescendantsToInclude) && covariateSettings$addDescendantsToInclude) {
writeLines("Adding descendants to concepts to include")
sql <- SqlRender::loadRenderTranslateSql(sqlFilename = "IncludeDescendants.sql",
packageName = "FeatureExtraction",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
table_name = "#included_cov")
DatabaseConnector::executeSql(connection, sql, progressBar = FALSE, reportOverallTime = FALSE)
}
}
writeLines("Constructing default covariates")
renderedSql <- SqlRender::loadRenderTranslateSql("GetCovariates.sql",
packageName = "FeatureExtraction",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
cdm_version = cdmVersion,
cohort_temp_table = cohortTempTable,
row_id_field = rowIdField,
cohort_definition_id = cohortDefinitionId,
concept_class_id = conceptClassId,
measurement = measurement,
use_covariate_demographics = covariateSettings$useCovariateDemographics,
use_covariate_demographics_gender = covariateSettings$useCovariateDemographicsGender,
use_covariate_demographics_race = covariateSettings$useCovariateDemographicsRace,
use_covariate_demographics_ethnicity = covariateSettings$useCovariateDemographicsEthnicity,
use_covariate_demographics_age = covariateSettings$useCovariateDemographicsAge,
use_covariate_demographics_year = covariateSettings$useCovariateDemographicsYear,
use_covariate_demographics_month = covariateSettings$useCovariateDemographicsMonth,
use_covariate_condition_occurrence = covariateSettings$useCovariateConditionOccurrence,
use_covariate_condition_occurrence_long_term = covariateSettings$useCovariateConditionOccurrenceLongTerm,
use_covariate_condition_occurrence_short_term = covariateSettings$useCovariateConditionOccurrenceShortTerm,
use_covariate_condition_occurrence_inpt_medium_term = covariateSettings$useCovariateConditionOccurrenceInptMediumTerm,
use_covariate_condition_era = covariateSettings$useCovariateConditionEra,
use_covariate_condition_era_ever = covariateSettings$useCovariateConditionEraEver,
use_covariate_condition_era_overlap = covariateSettings$useCovariateConditionEraOverlap,
use_covariate_condition_group = covariateSettings$useCovariateConditionGroup,
use_covariate_condition_group_meddra = covariateSettings$useCovariateConditionGroupMeddra,
use_covariate_condition_group_snomed = covariateSettings$useCovariateConditionGroupSnomed,
use_covariate_drug_exposure = covariateSettings$useCovariateDrugExposure,
use_covariate_drug_exposure_long_term = covariateSettings$useCovariateDrugExposureLongTerm,
use_covariate_drug_exposure_short_term = covariateSettings$useCovariateDrugExposureShortTerm,
use_covariate_drug_era = covariateSettings$useCovariateDrugEra,
use_covariate_drug_era_long_term = covariateSettings$useCovariateDrugEraLongTerm,
use_covariate_drug_era_short_term = covariateSettings$useCovariateDrugEraShortTerm,
use_covariate_drug_era_overlap = covariateSettings$useCovariateDrugEraOverlap,
use_covariate_drug_era_ever = covariateSettings$useCovariateDrugEraEver,
use_covariate_drug_group = covariateSettings$useCovariateDrugGroup,
use_covariate_procedure_occurrence = covariateSettings$useCovariateProcedureOccurrence,
use_covariate_procedure_occurrence_long_term = covariateSettings$useCovariateProcedureOccurrenceLongTerm,
use_covariate_procedure_occurrence_short_term = covariateSettings$useCovariateProcedureOccurrenceShortTerm,
use_covariate_procedure_group = covariateSettings$useCovariateProcedureGroup,
use_covariate_observation = covariateSettings$useCovariateObservation,
use_covariate_observation_long_term = covariateSettings$useCovariateObservationLongTerm,
use_covariate_observation_short_term = covariateSettings$useCovariateObservationShortTerm,
use_covariate_observation_count_long_term = covariateSettings$useCovariateObservationCountLongTerm,
use_covariate_measurement = covariateSettings$useCovariateMeasurement,
use_covariate_measurement_long_term = covariateSettings$useCovariateMeasurementLongTerm,
use_covariate_measurement_short_term = covariateSettings$useCovariateMeasurementShortTerm,
use_covariate_measurement_count_long_term = covariateSettings$useCovariateMeasurementCountLongTerm,
use_covariate_measurement_below = covariateSettings$useCovariateMeasurementBelow,
use_covariate_measurement_above = covariateSettings$useCovariateMeasurementAbove,
use_covariate_concept_counts = covariateSettings$useCovariateConceptCounts,
use_covariate_risk_scores = covariateSettings$useCovariateRiskScores,
use_covariate_risk_scores_Charlson = covariateSettings$useCovariateRiskScoresCharlson,
use_covariate_risk_scores_DCSI = covariateSettings$useCovariateRiskScoresDCSI,
use_covariate_risk_scores_CHADS2 = covariateSettings$useCovariateRiskScoresCHADS2,
use_covariate_risk_scores_CHADS2VASc = covariateSettings$useCovariateRiskScoresCHADS2VASc,
use_covariate_interaction_year = covariateSettings$useCovariateInteractionYear,
use_covariate_interaction_month = covariateSettings$useCovariateInteractionMonth,
has_excluded_covariate_concept_ids = hasExcludedCovariateConceptIds,
has_included_covariate_concept_ids = hasIncludedCovariateConceptIds,
delete_covariates_small_count = covariateSettings$deleteCovariatesSmallCount,
long_term_days = covariateSettings$longTermDays,
medium_term_days = covariateSettings$mediumTermDays,
short_term_days = covariateSettings$shortTermDays,
window_end_days = covariateSettings$windowEndDays)
DatabaseConnector::executeSql(connection, renderedSql)
writeLines("Done")
writeLines("Fetching data from server")
start <- Sys.time()
covariateSql <- "SELECT row_id, covariate_id, covariate_value FROM #cov ORDER BY covariate_id, row_id"
covariateSql <- SqlRender::translateSql(sql = covariateSql,
targetDialect = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)$sql
covariates <- DatabaseConnector::querySql.ffdf(connection, covariateSql)
covariateRefSql <- "SELECT covariate_id, covariate_name, analysis_id, concept_id FROM #cov_ref ORDER BY covariate_id"
covariateRefSql <- SqlRender::translateSql(sql = covariateRefSql,
targetDialect = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)$sql
covariateRef <- DatabaseConnector::querySql.ffdf(connection, covariateRefSql)
sql <- "SELECT COUNT_BIG(*) FROM @cohort_temp_table"
sql <- SqlRender::renderSql(sql, cohort_temp_table = cohortTempTable)$sql
sql <- SqlRender::translateSql(sql = sql,
targetDialect = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)$sql
populationSize <- DatabaseConnector::querySql(connection, sql)[1, 1]
delta <- Sys.time() - start
writeLines(paste("Fetching data took", signif(delta, 3), attr(delta, "units")))
renderedSql <- SqlRender::loadRenderTranslateSql("RemoveCovariateTempTables.sql",
packageName = "FeatureExtraction",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)
DatabaseConnector::executeSql(connection,
renderedSql,
progressBar = FALSE,
reportOverallTime = FALSE)
colnames(covariates) <- SqlRender::snakeCaseToCamelCase(colnames(covariates))
colnames(covariateRef) <- SqlRender::snakeCaseToCamelCase(colnames(covariateRef))
# Remove redundant covariates
writeLines("Removing redundant covariates")
start <- Sys.time()
deletedCovariateIds <- c()
if (nrow(covariates) != 0) {
# First delete all single covariates that appear in every row with the same value
valueCounts <- bySumFf(ff::ff(1, length = nrow(covariates)), covariates$covariateId)
nonSparseIds <- valueCounts$bins[valueCounts$sums == populationSize]
for (covariateId in nonSparseIds) {
selection <- covariates$covariateId == covariateId
idx <- ffbase::ffwhich(selection, selection == TRUE)
values <- ffbase::unique.ff(covariates$covariateValue[idx])
if (length(values) == 1) {
idx <- ffbase::ffwhich(selection, selection == FALSE)
covariates <- covariates[idx, ]
deletedCovariateIds <- c(deletedCovariateIds, covariateId)
}
}
# Next, from groups of covariates that together cover every row, remove the most prevalence one
problematicAnalysisIds <- c(2, 3, 4, 5, 6, 7) # Gender, race, ethnicity, age, year, month
for (analysisId in problematicAnalysisIds) {
t <- covariateRef$analysisId == analysisId
if (ffbase::sum.ff(t) != 0) {
covariateIds <- ff::as.ram(covariateRef$covariateId[ffbase::ffwhich(t, t == TRUE)])
freq <- sapply(covariateIds, function(x) {
ffbase::sum.ff(covariates$covariateId == x)
})
if (sum(freq) == populationSize) {
# Each row belongs to one of the categories, making one redunant. Remove most prevalent one
categoryToDelete <- covariateIds[which(freq == max(freq))[1]]
deletedCovariateIds <- c(deletedCovariateIds, categoryToDelete)
t <- covariates$covariateId == categoryToDelete
covariates <- covariates[ffbase::ffwhich(t, t == FALSE), ]
}
}
}
}
delta <- Sys.time() - start
writeLines(paste("Removing redundant covariates took", signif(delta, 3), attr(delta, "units")))
metaData <- list(sql = renderedSql,
call = match.call(),
deletedCovariateIds = deletedCovariateIds)
result <- list(covariates = covariates, covariateRef = covariateRef, metaData = metaData)
class(result) <- "covariateData"
return(result)
}
#' Create covariate settings
#'
#' @details
#' creates an object specifying how covariates should be contructed from data in the CDM model.
#'
#' @param useCovariateDemographics A boolean value (TRUE/FALSE) to determine if
#' demographic covariates (age in 5-yr increments,
#' gender, race, ethnicity, year of index date, month
#' of index date) will be created and included in
#' future models.
#' @param useCovariateDemographicsGender A boolean value (TRUE/FALSE) to determine if gender
#' should be included in the model.
#' @param useCovariateDemographicsRace A boolean value (TRUE/FALSE) to determine if race
#' should be included in the model.
#' @param useCovariateDemographicsEthnicity A boolean value (TRUE/FALSE) to determine if
#' ethnicity should be included in the model.
#' @param useCovariateDemographicsAge A boolean value (TRUE/FALSE) to determine if age
#' (in 5 year increments) should be included in the
#' model.
#' @param useCovariateDemographicsYear A boolean value (TRUE/FALSE) to determine if
#' calendar year should be included in the model.
#' @param useCovariateDemographicsMonth A boolean value (TRUE/FALSE) to determine if
#' calendar month should be included in the model.
#' @param useCovariateConditionOccurrence A boolean value (TRUE/FALSE) to determine if
#' covariates derived from CONDITION_OCCURRENCE table
#' will be created and included in future models.
#' @param useCovariateConditionOccurrenceLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of condition in the long term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateConditionOccurrence =
#' TRUE.
#' @param useCovariateConditionOccurrenceShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of condition in the short term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateConditionOccurrence =
#' TRUE.
#' @param useCovariateConditionOccurrenceInptMediumTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of condition within
#' inpatient type in medium term window prior to or on cohort
#' index date. Only applicable if
#' useCovariateConditionOccurrence = TRUE.
#' @param useCovariateConditionEra A boolean value (TRUE/FALSE) to determine if
#' covariates derived from CONDITION_ERA table will be
#' created and included in future models.
#' @param useCovariateConditionEraEver A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of condition era anytime
#' prior to or on cohort index date. Only applicable
#' if useCovariateConditionEra = TRUE.
#' @param useCovariateConditionEraOverlap A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of condition era that
#' overlaps the cohort index date. Only applicable if
#' useCovariateConditionEra = TRUE.
#' @param useCovariateConditionGroup A boolean value (TRUE/FALSE) to determine if all
#' CONDITION_OCCURRENCE and CONDITION_ERA covariates
#' should be aggregated or rolled-up to higher-level
#' concepts based on vocabluary classification.
#' @param useCovariateConditionGroupMeddra A boolean value (TRUE/FALSE) to determine if all
#' CONDITION_OCCURRENCE and CONDITION_ERA covariates
#' should be aggregated or rolled-up to higher-level
#' concepts based on the MEDDRA classification.
#' @param useCovariateConditionGroupSnomed A boolean value (TRUE/FALSE) to determine if all
#' CONDITION_OCCURRENCE and CONDITION_ERA covariates
#' should be aggregated or rolled-up to higher-level
#' concepts based on the SNOMED classification.
#' @param useCovariateDrugExposure A boolean value (TRUE/FALSE) to determine if
#' covariates derived from DRUG_EXPOSURE table will be
#' created and included in future models.
#' @param useCovariateDrugExposureLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug in the long term window
#' prior to or on cohort index date. Only applicable
#' if useCovariateDrugExposure = TRUE.
#' @param useCovariateDrugExposureShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug in the short term window
#' prior to or on cohort index date. Only applicable
#' if useCovariateDrugExposure = TRUE.
#' @param useCovariateDrugEra A boolean value (TRUE/FALSE) to determine if
#' covariates derived from DRUG_ERA table will be
#' created and included in future models.
#' @param useCovariateDrugEraLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug era in the long term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateDrugEra = TRUE.
#' @param useCovariateDrugEraShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug era in the short term window
#' prior to or on cohort index date. Only applicable
#' if useCovariateDrugEra = TRUE.
#' @param useCovariateDrugEraEver A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug era anytime prior
#' to or on cohort index date. Only applicable if
#' useCovariateDrugEra = TRUE.
#' @param useCovariateDrugEraOverlap A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug era that overlaps
#' the cohort index date. Only applicable if
#' useCovariateDrugEra = TRUE.
#' @param useCovariateDrugGroup A boolean value (TRUE/FALSE) to determine if all
#' DRUG_EXPOSURE and DRUG_ERA covariates should be
#' aggregated or rolled-up to higher-level concepts of
#' drug classes based on vocabluary classification.
#' @param useCovariateProcedureOccurrence A boolean value (TRUE/FALSE) to determine if
#' covariates derived from PROCEDURE_OCCURRENCE table
#' will be created and included in future models.
#' @param useCovariateProcedureOccurrenceLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of procedure in the long term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateProcedureOccurrence =
#' TRUE.
#' @param useCovariateProcedureOccurrenceShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of procedure in the short term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateProcedureOccurrence =
#' TRUE.
#' @param useCovariateProcedureGroup A boolean value (TRUE/FALSE) to determine if all
#' PROCEDURE_OCCURRENCE covariates should be
#' aggregated or rolled-up to higher-level concepts
#' based on vocabluary classification.
#' @param useCovariateObservation A boolean value (TRUE/FALSE) to determine if
#' covariates derived from OBSERVATION table will be
#' created and included in future models.
#' @param useCovariateObservationLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of observation in the long term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateObservation = TRUE.
#' @param useCovariateObservationShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of observation in the short term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateObservation = TRUE.
#' @param useCovariateObservationCountLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for the count of each observation concept in
#' LongTerm window prior to or on cohort index date. Only
#' applicable if useCovariateObservation = TRUE.
#' @param useCovariateMeasurement A boolean value (TRUE/FALSE) to determine if
#' covariates derived from OBSERVATION table will be
#' created and included in future models.
#' @param useCovariateMeasurementLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of measurement in the long term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateMeasurement = TRUE.
#' @param useCovariateMeasurementShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of measurement in the short term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateMeasurement = TRUE.
#' @param useCovariateMeasurementCountLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for the count of each measurement concept in
#' LongTerm window prior to or on cohort index date. Only
#' applicable if useCovariateMeasurement = TRUE.
#' @param useCovariateMeasurementBelow A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of measurement with a
#' numeric value below normal range for latest value
#' within medium term window of cohort index. Only applicable if
#' useCovariateMeasurement = TRUE (CDM v5+) or
#' useCovariateObservation = TRUE (CDM v4).
#' @param useCovariateMeasurementAbove A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of measurement with a
#' numeric value above normal range for latest value
#' within medium term window of cohort index. Only applicable if
#' useCovariateMeasurement = TRUE (CDM v5+) or
#' useCovariateObservation = TRUE (CDM v4).
#' @param useCovariateConceptCounts A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' count the number of concepts that a person has
#' within each domain (CONDITION, DRUG, PROCEDURE,
#' OBSERVATION)
#' @param useCovariateRiskScores A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' calculate various Risk Scores, including Charlson,
#' DCSI.
#' @param useCovariateRiskScoresCharlson A boolean value (TRUE/FALSE) to determine if the
#' Charlson comorbidity index should be included in
#' the model.
#' @param useCovariateRiskScoresDCSI A boolean value (TRUE/FALSE) to determine if the
#' DCSI score should be included in the model.
#' @param useCovariateRiskScoresCHADS2 A boolean value (TRUE/FALSE) to determine if the
#' CHADS2 score should be included in the model.
#' @param useCovariateRiskScoresCHADS2VASc A boolean value (TRUE/FALSE) to determine if the
#' CHADS2VASc score should be included in the model.
#' @param useCovariateInteractionYear A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' represent interaction terms between all other
#' covariates and the year of the cohort index date.
#' @param useCovariateInteractionMonth A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' represent interaction terms between all other
#' covariates and the month of the cohort index date.
#' @param deleteCovariatesSmallCount A numeric value used to remove covariates that
#' occur in both cohorts fewer than
#' deleteCovariateSmallCounts time.
#' @param excludedCovariateConceptIds A list of concept IDs that should NOT be used to
#' construct covariates.
#' @param addDescendantsToExclude Should descendant concept IDs be added to the list
#' of concepts to exclude?
#' @param includedCovariateConceptIds A list of concept IDs that should be used to
#' construct covariates.
#' @param addDescendantsToInclude Should descendant concept IDs be added to the list
#' of concepts to include?
#' @param longTermDays What is the length (in days) of the long-term window?
#' @param mediumTermDays What is the length (in days) of the medium-term window?
#' @param shortTermDays What is the length (in days) of the short-term window?
#' @param windowEndDays What is the last day of the window? 0 means the cohort
#' start date is the last date (included), 1 means the window
#' stops the day before the cohort start date, etc.
#' @param useCovariateProcedureOccurrence365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateConditionOccurrence365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateDrugExposure365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateMeasurementCount365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateDrugEra365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateObservation365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateObservationCount365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateMeasurement365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateConditionOccurrenceInpt180d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateConditionOccurrence30d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateDrugExposure30d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateDrugEra30d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateMeasurement30d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateObservation30d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateProcedureOccurrence30d DEPRECATED. Use the ShortTerm equivalent instead
#'
#'
#' @return
#' An object of type \code{defaultCovariateSettings}, to be used in other functions.
#'
#' @export
createCovariateSettings <- function(useCovariateDemographics = FALSE,
useCovariateDemographicsGender = FALSE,
useCovariateDemographicsRace = FALSE,
useCovariateDemographicsEthnicity = FALSE,
useCovariateDemographicsAge = FALSE,
useCovariateDemographicsYear = FALSE,
useCovariateDemographicsMonth = FALSE,
useCovariateConditionOccurrence = FALSE,
useCovariateConditionOccurrenceLongTerm = FALSE,
useCovariateConditionOccurrenceShortTerm = FALSE,
useCovariateConditionOccurrenceInptMediumTerm = FALSE,
useCovariateConditionEra = FALSE,
useCovariateConditionEraEver = FALSE,
useCovariateConditionEraOverlap = FALSE,
useCovariateConditionGroup = FALSE,
useCovariateConditionGroupMeddra = FALSE,
useCovariateConditionGroupSnomed = FALSE,
useCovariateDrugExposure = FALSE,
useCovariateDrugExposureLongTerm = FALSE,
useCovariateDrugExposureShortTerm = FALSE,
useCovariateDrugEra = FALSE,
useCovariateDrugEraLongTerm = FALSE,
useCovariateDrugEraShortTerm = FALSE,
useCovariateDrugEraOverlap = FALSE,
useCovariateDrugEraEver = FALSE,
useCovariateDrugGroup = FALSE,
useCovariateProcedureOccurrence = FALSE,
useCovariateProcedureOccurrenceLongTerm = FALSE,
useCovariateProcedureOccurrenceShortTerm = FALSE,
useCovariateProcedureGroup = FALSE,
useCovariateObservation = FALSE,
useCovariateObservationLongTerm = FALSE,
useCovariateObservationShortTerm = FALSE,
useCovariateObservationCountLongTerm = FALSE,
useCovariateMeasurement = FALSE,
useCovariateMeasurementLongTerm = FALSE,
useCovariateMeasurementShortTerm = FALSE,
useCovariateMeasurementCountLongTerm = FALSE,
useCovariateMeasurementBelow = FALSE,
useCovariateMeasurementAbove = FALSE,
useCovariateConceptCounts = FALSE,
useCovariateRiskScores = FALSE,
useCovariateRiskScoresCharlson = FALSE,
useCovariateRiskScoresDCSI = FALSE,
useCovariateRiskScoresCHADS2 = FALSE,
useCovariateRiskScoresCHADS2VASc = FALSE,
useCovariateInteractionYear = FALSE,
useCovariateInteractionMonth = FALSE,
excludedCovariateConceptIds = c(),
addDescendantsToExclude = TRUE,
includedCovariateConceptIds = c(),
addDescendantsToInclude = TRUE,
deleteCovariatesSmallCount = 100,
longTermDays = 365,
mediumTermDays = 180,
shortTermDays = 30,
windowEndDays = 0,
useCovariateProcedureOccurrence365d,
useCovariateConditionOccurrence365d,
useCovariateDrugExposure365d,
useCovariateMeasurementCount365d,
useCovariateDrugEra365d,
useCovariateObservation365d,
useCovariateObservationCount365d,
useCovariateMeasurement365d,
useCovariateConditionOccurrenceInpt180d,
useCovariateConditionOccurrence30d,
useCovariateDrugExposure30d,
useCovariateDrugEra30d,
useCovariateMeasurement30d,
useCovariateObservation30d,
useCovariateProcedureOccurrence30d) {
if (!missing(useCovariateProcedureOccurrence365d)) {
warning("Argument useCovariateProcedureOccurrence365d is deprecated. Use useCovariateProcedureOccurrenceLongTerm instead")
useCovariateProcedureOccurrenceLongTerm <- useCovariateProcedureOccurrence365d
}
if (!missing(useCovariateConditionOccurrence365d)) {
warning("Argument useCovariateConditionOccurrence365d is deprecated. Use useCovariateConditionOccurrenceLongTerm instead")
useCovariateConditionOccurrenceLongTerm <- useCovariateConditionOccurrence365d
}
if (!missing(useCovariateDrugExposure365d)) {
warning("Argument useCovariateDrugExposure365d is deprecated. Use useCovariateDrugExposureLongTerm instead")
useCovariateDrugExposureLongTerm <- useCovariateDrugExposure365d
}
if (!missing(useCovariateMeasurementCount365d)) {
warning("Argument useCovariateMeasurementCount365d is deprecated. Use useCovariateObservationCountLongTerm instead")
useCovariateObservationCountLongTerm <- useCovariateMeasurementCount365d
}
if (!missing(useCovariateDrugEra365d)) {
warning("Argument useCovariateDrugEra365d is deprecated. Use useCovariateDrugEraLongTerm instead")
useCovariateDrugEraLongTerm <- useCovariateDrugEra365d
}
if (!missing(useCovariateObservation365d)) {
warning("Argument useCovariateObservation365d is deprecated. Use useCovariateObservationLongTerm instead")
useCovariateObservationLongTerm <- useCovariateObservation365d
}
if (!missing(useCovariateObservationCount365d)) {
warning("Argument useCovariateObservationCount365d is deprecated. Use useCovariateObservationCountLongTerm instead")
useCovariateObservationCountLongTerm <- useCovariateObservationCount365d
}
if (!missing(useCovariateMeasurement365d)) {
warning("Argument useCovariateMeasurement365d is deprecated. Use useCovariateMeasurementLongTerm instead")
useCovariateMeasurementLongTerm <- useCovariateMeasurement365d
}
if (!missing(useCovariateConditionOccurrenceInpt180d)) {
warning("Argument useCovariateConditionOccurrenceInpt180d is deprecated. Use useCovariateConditionOccurrenceInptMediumTerm instead")
useCovariateConditionOccurrenceInptMediumTerm <- useCovariateConditionOccurrenceInpt180d
}
if (!missing(useCovariateConditionOccurrence30d)) {
warning("Argument useCovariateConditionOccurrence30d is deprecated. Use useCovariateConditionOccurrenceShortTerm instead")
useCovariateConditionOccurrenceShortTerm <- useCovariateConditionOccurrence30d
}
if (!missing(useCovariateDrugExposure30d)) {
warning("Argument useCovariateDrugExposure30d is deprecated. Use useCovariateDrugExposureShortTerm instead")
useCovariateDrugExposureShortTerm <- useCovariateDrugExposure30d
}
if (!missing(useCovariateDrugEra30d)) {
warning("Argument useCovariateDrugEra30d is deprecated. Use useCovariateDrugEraShortTerm instead")
useCovariateDrugEraShortTerm <- useCovariateDrugEra30d
}
if (!missing(useCovariateMeasurement30d)) {
warning("Argument useCovariateMeasurement30d is deprecated. Use useCovariateMeasurementShortTerm instead")
useCovariateMeasurementShortTerm <- useCovariateMeasurement30d
}
if (!missing(useCovariateObservation30d)) {
warning("Argument useCovariateObservation30d is deprecated. Use useCovariateObservationShortTerm instead")
useCovariateObservationShortTerm <- useCovariateObservation30d
}
if (!missing(useCovariateProcedureOccurrence30d)) {
warning("Argument useCovariateProcedureOccurrence30d is deprecated. Use useCovariateProcedureOccurrenceShortTerm instead")
useCovariateProcedureOccurrenceShortTerm <- useCovariateProcedureOccurrence30d
}
# # First: get the default values:
covariateSettings <- list()
formalNames <- names(formals(createCovariateSettings))
formalNames <- formalNames[!grepl("(365)|(180)|(30)", formalNames)]
for (name in formalNames) {
covariateSettings[[name]] <- get(name)
}
# Next: overwrite defaults with actual values if specified:
values <- lapply(as.list(match.call())[-1], function(x) eval(x, envir = sys.frame(-3)))
for (name in names(values)) {
if (name %in% names(covariateSettings))
covariateSettings[[name]] <- values[[name]]
}
attr(covariateSettings, "fun") <- "getDbDefaultCovariateData"
class(covariateSettings) <- "covariateSettings"
return(covariateSettings)
}
| /R/GetDefaultCovariates.R | permissive | tomwhite/FeatureExtraction | R | false | false | 50,618 | r | # @file GetDefaultCovariates.R
#
# Copyright 2017 Observational Health Data Sciences and Informatics
#
# This file is part of FeatureExtraction
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Get default covariate information from the database
#'
#' @description
#' Constructs a large default set of covariates for one or more cohorts using data in the CDM schema.
#' Includes covariates for all drugs, drug classes, condition, condition classes, procedures,
#' observations, etc.
#'
#' @param covariateSettings An object of type \code{defaultCovariateSettings} as created using the
#' \code{\link{createCovariateSettings}} function.
#'
#' @template GetCovarParams
#'
#' @export
getDbDefaultCovariateData <- function(connection,
oracleTempSchema = NULL,
cdmDatabaseSchema,
cdmVersion = "4",
cohortTempTable = "cohort_person",
rowIdField = "subject_id",
covariateSettings) {
if (substr(cohortTempTable, 1, 1) != "#") {
cohortTempTable <- paste("#", cohortTempTable, sep = "")
}
if (!covariateSettings$useCovariateConditionGroupMeddra & !covariateSettings$useCovariateConditionGroupSnomed) {
covariateSettings$useCovariateConditionGroup <- FALSE
}
if (cdmVersion == "4") {
cohortDefinitionId <- "cohort_concept_id"
conceptClassId <- "concept_class"
measurement <- "observation"
} else {
cohortDefinitionId <- "cohort_definition_id"
conceptClassId <- "concept_class_id"
measurement <- "measurement"
}
if (is.null(covariateSettings$excludedCovariateConceptIds) || length(covariateSettings$excludedCovariateConceptIds) ==
0) {
hasExcludedCovariateConceptIds <- FALSE
} else {
if (!is.numeric(covariateSettings$excludedCovariateConceptIds))
stop("excludedCovariateConceptIds must be a (vector of) numeric")
hasExcludedCovariateConceptIds <- TRUE
DatabaseConnector::insertTable(connection,
tableName = "#excluded_cov",
data = data.frame(concept_id = as.integer(covariateSettings$excludedCovariateConceptIds)),
dropTableIfExists = TRUE,
createTable = TRUE,
tempTable = TRUE,
oracleTempSchema = oracleTempSchema)
if (!is.null(covariateSettings$addDescendantsToExclude) && covariateSettings$addDescendantsToExclude) {
writeLines("Adding descendants to concepts to exclude")
sql <- SqlRender::loadRenderTranslateSql(sqlFilename = "IncludeDescendants.sql",
packageName = "FeatureExtraction",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
table_name = "#excluded_cov")
DatabaseConnector::executeSql(connection, sql, progressBar = FALSE, reportOverallTime = FALSE)
}
}
if (is.null(covariateSettings$includedCovariateConceptIds) || length(covariateSettings$includedCovariateConceptIds) ==
0) {
hasIncludedCovariateConceptIds <- FALSE
} else {
if (!is.numeric(covariateSettings$includedCovariateConceptIds))
stop("includedCovariateConceptIds must be a (vector of) numeric")
hasIncludedCovariateConceptIds <- TRUE
DatabaseConnector::insertTable(connection,
tableName = "#included_cov",
data = data.frame(concept_id = as.integer(covariateSettings$includedCovariateConceptIds)),
dropTableIfExists = TRUE,
createTable = TRUE,
tempTable = TRUE,
oracleTempSchema = oracleTempSchema)
if (!is.null(covariateSettings$addDescendantsToInclude) && covariateSettings$addDescendantsToInclude) {
writeLines("Adding descendants to concepts to include")
sql <- SqlRender::loadRenderTranslateSql(sqlFilename = "IncludeDescendants.sql",
packageName = "FeatureExtraction",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
table_name = "#included_cov")
DatabaseConnector::executeSql(connection, sql, progressBar = FALSE, reportOverallTime = FALSE)
}
}
writeLines("Constructing default covariates")
renderedSql <- SqlRender::loadRenderTranslateSql("GetCovariates.sql",
packageName = "FeatureExtraction",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
cdm_version = cdmVersion,
cohort_temp_table = cohortTempTable,
row_id_field = rowIdField,
cohort_definition_id = cohortDefinitionId,
concept_class_id = conceptClassId,
measurement = measurement,
use_covariate_demographics = covariateSettings$useCovariateDemographics,
use_covariate_demographics_gender = covariateSettings$useCovariateDemographicsGender,
use_covariate_demographics_race = covariateSettings$useCovariateDemographicsRace,
use_covariate_demographics_ethnicity = covariateSettings$useCovariateDemographicsEthnicity,
use_covariate_demographics_age = covariateSettings$useCovariateDemographicsAge,
use_covariate_demographics_year = covariateSettings$useCovariateDemographicsYear,
use_covariate_demographics_month = covariateSettings$useCovariateDemographicsMonth,
use_covariate_condition_occurrence = covariateSettings$useCovariateConditionOccurrence,
use_covariate_condition_occurrence_long_term = covariateSettings$useCovariateConditionOccurrenceLongTerm,
use_covariate_condition_occurrence_short_term = covariateSettings$useCovariateConditionOccurrenceShortTerm,
use_covariate_condition_occurrence_inpt_medium_term = covariateSettings$useCovariateConditionOccurrenceInptMediumTerm,
use_covariate_condition_era = covariateSettings$useCovariateConditionEra,
use_covariate_condition_era_ever = covariateSettings$useCovariateConditionEraEver,
use_covariate_condition_era_overlap = covariateSettings$useCovariateConditionEraOverlap,
use_covariate_condition_group = covariateSettings$useCovariateConditionGroup,
use_covariate_condition_group_meddra = covariateSettings$useCovariateConditionGroupMeddra,
use_covariate_condition_group_snomed = covariateSettings$useCovariateConditionGroupSnomed,
use_covariate_drug_exposure = covariateSettings$useCovariateDrugExposure,
use_covariate_drug_exposure_long_term = covariateSettings$useCovariateDrugExposureLongTerm,
use_covariate_drug_exposure_short_term = covariateSettings$useCovariateDrugExposureShortTerm,
use_covariate_drug_era = covariateSettings$useCovariateDrugEra,
use_covariate_drug_era_long_term = covariateSettings$useCovariateDrugEraLongTerm,
use_covariate_drug_era_short_term = covariateSettings$useCovariateDrugEraShortTerm,
use_covariate_drug_era_overlap = covariateSettings$useCovariateDrugEraOverlap,
use_covariate_drug_era_ever = covariateSettings$useCovariateDrugEraEver,
use_covariate_drug_group = covariateSettings$useCovariateDrugGroup,
use_covariate_procedure_occurrence = covariateSettings$useCovariateProcedureOccurrence,
use_covariate_procedure_occurrence_long_term = covariateSettings$useCovariateProcedureOccurrenceLongTerm,
use_covariate_procedure_occurrence_short_term = covariateSettings$useCovariateProcedureOccurrenceShortTerm,
use_covariate_procedure_group = covariateSettings$useCovariateProcedureGroup,
use_covariate_observation = covariateSettings$useCovariateObservation,
use_covariate_observation_long_term = covariateSettings$useCovariateObservationLongTerm,
use_covariate_observation_short_term = covariateSettings$useCovariateObservationShortTerm,
use_covariate_observation_count_long_term = covariateSettings$useCovariateObservationCountLongTerm,
use_covariate_measurement = covariateSettings$useCovariateMeasurement,
use_covariate_measurement_long_term = covariateSettings$useCovariateMeasurementLongTerm,
use_covariate_measurement_short_term = covariateSettings$useCovariateMeasurementShortTerm,
use_covariate_measurement_count_long_term = covariateSettings$useCovariateMeasurementCountLongTerm,
use_covariate_measurement_below = covariateSettings$useCovariateMeasurementBelow,
use_covariate_measurement_above = covariateSettings$useCovariateMeasurementAbove,
use_covariate_concept_counts = covariateSettings$useCovariateConceptCounts,
use_covariate_risk_scores = covariateSettings$useCovariateRiskScores,
use_covariate_risk_scores_Charlson = covariateSettings$useCovariateRiskScoresCharlson,
use_covariate_risk_scores_DCSI = covariateSettings$useCovariateRiskScoresDCSI,
use_covariate_risk_scores_CHADS2 = covariateSettings$useCovariateRiskScoresCHADS2,
use_covariate_risk_scores_CHADS2VASc = covariateSettings$useCovariateRiskScoresCHADS2VASc,
use_covariate_interaction_year = covariateSettings$useCovariateInteractionYear,
use_covariate_interaction_month = covariateSettings$useCovariateInteractionMonth,
has_excluded_covariate_concept_ids = hasExcludedCovariateConceptIds,
has_included_covariate_concept_ids = hasIncludedCovariateConceptIds,
delete_covariates_small_count = covariateSettings$deleteCovariatesSmallCount,
long_term_days = covariateSettings$longTermDays,
medium_term_days = covariateSettings$mediumTermDays,
short_term_days = covariateSettings$shortTermDays,
window_end_days = covariateSettings$windowEndDays)
DatabaseConnector::executeSql(connection, renderedSql)
writeLines("Done")
writeLines("Fetching data from server")
start <- Sys.time()
covariateSql <- "SELECT row_id, covariate_id, covariate_value FROM #cov ORDER BY covariate_id, row_id"
covariateSql <- SqlRender::translateSql(sql = covariateSql,
targetDialect = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)$sql
covariates <- DatabaseConnector::querySql.ffdf(connection, covariateSql)
covariateRefSql <- "SELECT covariate_id, covariate_name, analysis_id, concept_id FROM #cov_ref ORDER BY covariate_id"
covariateRefSql <- SqlRender::translateSql(sql = covariateRefSql,
targetDialect = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)$sql
covariateRef <- DatabaseConnector::querySql.ffdf(connection, covariateRefSql)
sql <- "SELECT COUNT_BIG(*) FROM @cohort_temp_table"
sql <- SqlRender::renderSql(sql, cohort_temp_table = cohortTempTable)$sql
sql <- SqlRender::translateSql(sql = sql,
targetDialect = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)$sql
populationSize <- DatabaseConnector::querySql(connection, sql)[1, 1]
delta <- Sys.time() - start
writeLines(paste("Fetching data took", signif(delta, 3), attr(delta, "units")))
renderedSql <- SqlRender::loadRenderTranslateSql("RemoveCovariateTempTables.sql",
packageName = "FeatureExtraction",
dbms = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)
DatabaseConnector::executeSql(connection,
renderedSql,
progressBar = FALSE,
reportOverallTime = FALSE)
colnames(covariates) <- SqlRender::snakeCaseToCamelCase(colnames(covariates))
colnames(covariateRef) <- SqlRender::snakeCaseToCamelCase(colnames(covariateRef))
# Remove redundant covariates
writeLines("Removing redundant covariates")
start <- Sys.time()
deletedCovariateIds <- c()
if (nrow(covariates) != 0) {
# First delete all single covariates that appear in every row with the same value
valueCounts <- bySumFf(ff::ff(1, length = nrow(covariates)), covariates$covariateId)
nonSparseIds <- valueCounts$bins[valueCounts$sums == populationSize]
for (covariateId in nonSparseIds) {
selection <- covariates$covariateId == covariateId
idx <- ffbase::ffwhich(selection, selection == TRUE)
values <- ffbase::unique.ff(covariates$covariateValue[idx])
if (length(values) == 1) {
idx <- ffbase::ffwhich(selection, selection == FALSE)
covariates <- covariates[idx, ]
deletedCovariateIds <- c(deletedCovariateIds, covariateId)
}
}
# Next, from groups of covariates that together cover every row, remove the most prevalence one
problematicAnalysisIds <- c(2, 3, 4, 5, 6, 7) # Gender, race, ethnicity, age, year, month
for (analysisId in problematicAnalysisIds) {
t <- covariateRef$analysisId == analysisId
if (ffbase::sum.ff(t) != 0) {
covariateIds <- ff::as.ram(covariateRef$covariateId[ffbase::ffwhich(t, t == TRUE)])
freq <- sapply(covariateIds, function(x) {
ffbase::sum.ff(covariates$covariateId == x)
})
if (sum(freq) == populationSize) {
# Each row belongs to one of the categories, making one redunant. Remove most prevalent one
categoryToDelete <- covariateIds[which(freq == max(freq))[1]]
deletedCovariateIds <- c(deletedCovariateIds, categoryToDelete)
t <- covariates$covariateId == categoryToDelete
covariates <- covariates[ffbase::ffwhich(t, t == FALSE), ]
}
}
}
}
delta <- Sys.time() - start
writeLines(paste("Removing redundant covariates took", signif(delta, 3), attr(delta, "units")))
metaData <- list(sql = renderedSql,
call = match.call(),
deletedCovariateIds = deletedCovariateIds)
result <- list(covariates = covariates, covariateRef = covariateRef, metaData = metaData)
class(result) <- "covariateData"
return(result)
}
#' Create covariate settings
#'
#' @details
#' creates an object specifying how covariates should be contructed from data in the CDM model.
#'
#' @param useCovariateDemographics A boolean value (TRUE/FALSE) to determine if
#' demographic covariates (age in 5-yr increments,
#' gender, race, ethnicity, year of index date, month
#' of index date) will be created and included in
#' future models.
#' @param useCovariateDemographicsGender A boolean value (TRUE/FALSE) to determine if gender
#' should be included in the model.
#' @param useCovariateDemographicsRace A boolean value (TRUE/FALSE) to determine if race
#' should be included in the model.
#' @param useCovariateDemographicsEthnicity A boolean value (TRUE/FALSE) to determine if
#' ethnicity should be included in the model.
#' @param useCovariateDemographicsAge A boolean value (TRUE/FALSE) to determine if age
#' (in 5 year increments) should be included in the
#' model.
#' @param useCovariateDemographicsYear A boolean value (TRUE/FALSE) to determine if
#' calendar year should be included in the model.
#' @param useCovariateDemographicsMonth A boolean value (TRUE/FALSE) to determine if
#' calendar month should be included in the model.
#' @param useCovariateConditionOccurrence A boolean value (TRUE/FALSE) to determine if
#' covariates derived from CONDITION_OCCURRENCE table
#' will be created and included in future models.
#' @param useCovariateConditionOccurrenceLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of condition in the long term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateConditionOccurrence =
#' TRUE.
#' @param useCovariateConditionOccurrenceShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of condition in the short term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateConditionOccurrence =
#' TRUE.
#' @param useCovariateConditionOccurrenceInptMediumTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of condition within
#' inpatient type in medium term window prior to or on cohort
#' index date. Only applicable if
#' useCovariateConditionOccurrence = TRUE.
#' @param useCovariateConditionEra A boolean value (TRUE/FALSE) to determine if
#' covariates derived from CONDITION_ERA table will be
#' created and included in future models.
#' @param useCovariateConditionEraEver A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of condition era anytime
#' prior to or on cohort index date. Only applicable
#' if useCovariateConditionEra = TRUE.
#' @param useCovariateConditionEraOverlap A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of condition era that
#' overlaps the cohort index date. Only applicable if
#' useCovariateConditionEra = TRUE.
#' @param useCovariateConditionGroup A boolean value (TRUE/FALSE) to determine if all
#' CONDITION_OCCURRENCE and CONDITION_ERA covariates
#' should be aggregated or rolled-up to higher-level
#' concepts based on vocabluary classification.
#' @param useCovariateConditionGroupMeddra A boolean value (TRUE/FALSE) to determine if all
#' CONDITION_OCCURRENCE and CONDITION_ERA covariates
#' should be aggregated or rolled-up to higher-level
#' concepts based on the MEDDRA classification.
#' @param useCovariateConditionGroupSnomed A boolean value (TRUE/FALSE) to determine if all
#' CONDITION_OCCURRENCE and CONDITION_ERA covariates
#' should be aggregated or rolled-up to higher-level
#' concepts based on the SNOMED classification.
#' @param useCovariateDrugExposure A boolean value (TRUE/FALSE) to determine if
#' covariates derived from DRUG_EXPOSURE table will be
#' created and included in future models.
#' @param useCovariateDrugExposureLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug in the long term window
#' prior to or on cohort index date. Only applicable
#' if useCovariateDrugExposure = TRUE.
#' @param useCovariateDrugExposureShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug in the short term window
#' prior to or on cohort index date. Only applicable
#' if useCovariateDrugExposure = TRUE.
#' @param useCovariateDrugEra A boolean value (TRUE/FALSE) to determine if
#' covariates derived from DRUG_ERA table will be
#' created and included in future models.
#' @param useCovariateDrugEraLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug era in the long term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateDrugEra = TRUE.
#' @param useCovariateDrugEraShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug era in the short term window
#' prior to or on cohort index date. Only applicable
#' if useCovariateDrugEra = TRUE.
#' @param useCovariateDrugEraEver A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug era anytime prior
#' to or on cohort index date. Only applicable if
#' useCovariateDrugEra = TRUE.
#' @param useCovariateDrugEraOverlap A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of drug era that overlaps
#' the cohort index date. Only applicable if
#' useCovariateDrugEra = TRUE.
#' @param useCovariateDrugGroup A boolean value (TRUE/FALSE) to determine if all
#' DRUG_EXPOSURE and DRUG_ERA covariates should be
#' aggregated or rolled-up to higher-level concepts of
#' drug classes based on vocabluary classification.
#' @param useCovariateProcedureOccurrence A boolean value (TRUE/FALSE) to determine if
#' covariates derived from PROCEDURE_OCCURRENCE table
#' will be created and included in future models.
#' @param useCovariateProcedureOccurrenceLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of procedure in the long term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateProcedureOccurrence =
#' TRUE.
#' @param useCovariateProcedureOccurrenceShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of procedure in the short term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateProcedureOccurrence =
#' TRUE.
#' @param useCovariateProcedureGroup A boolean value (TRUE/FALSE) to determine if all
#' PROCEDURE_OCCURRENCE covariates should be
#' aggregated or rolled-up to higher-level concepts
#' based on vocabluary classification.
#' @param useCovariateObservation A boolean value (TRUE/FALSE) to determine if
#' covariates derived from OBSERVATION table will be
#' created and included in future models.
#' @param useCovariateObservationLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of observation in the long term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateObservation = TRUE.
#' @param useCovariateObservationShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of observation in the short term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateObservation = TRUE.
#' @param useCovariateObservationCountLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for the count of each observation concept in
#' LongTerm window prior to or on cohort index date. Only
#' applicable if useCovariateObservation = TRUE.
#' @param useCovariateMeasurement A boolean value (TRUE/FALSE) to determine if
#' covariates derived from OBSERVATION table will be
#' created and included in future models.
#' @param useCovariateMeasurementLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of measurement in the long term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateMeasurement = TRUE.
#' @param useCovariateMeasurementShortTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of measurement in the short term
#' window prior to or on cohort index date. Only
#' applicable if useCovariateMeasurement = TRUE.
#' @param useCovariateMeasurementCountLongTerm A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for the count of each measurement concept in
#' LongTerm window prior to or on cohort index date. Only
#' applicable if useCovariateMeasurement = TRUE.
#' @param useCovariateMeasurementBelow A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of measurement with a
#' numeric value below normal range for latest value
#' within medium term window of cohort index. Only applicable if
#' useCovariateMeasurement = TRUE (CDM v5+) or
#' useCovariateObservation = TRUE (CDM v4).
#' @param useCovariateMeasurementAbove A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' look for presence/absence of measurement with a
#' numeric value above normal range for latest value
#' within medium term window of cohort index. Only applicable if
#' useCovariateMeasurement = TRUE (CDM v5+) or
#' useCovariateObservation = TRUE (CDM v4).
#' @param useCovariateConceptCounts A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' count the number of concepts that a person has
#' within each domain (CONDITION, DRUG, PROCEDURE,
#' OBSERVATION)
#' @param useCovariateRiskScores A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' calculate various Risk Scores, including Charlson,
#' DCSI.
#' @param useCovariateRiskScoresCharlson A boolean value (TRUE/FALSE) to determine if the
#' Charlson comorbidity index should be included in
#' the model.
#' @param useCovariateRiskScoresDCSI A boolean value (TRUE/FALSE) to determine if the
#' DCSI score should be included in the model.
#' @param useCovariateRiskScoresCHADS2 A boolean value (TRUE/FALSE) to determine if the
#' CHADS2 score should be included in the model.
#' @param useCovariateRiskScoresCHADS2VASc A boolean value (TRUE/FALSE) to determine if the
#' CHADS2VASc score should be included in the model.
#' @param useCovariateInteractionYear A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' represent interaction terms between all other
#' covariates and the year of the cohort index date.
#' @param useCovariateInteractionMonth A boolean value (TRUE/FALSE) to determine if
#' covariates will be created and used in models that
#' represent interaction terms between all other
#' covariates and the month of the cohort index date.
#' @param deleteCovariatesSmallCount A numeric value used to remove covariates that
#' occur in both cohorts fewer than
#' deleteCovariateSmallCounts time.
#' @param excludedCovariateConceptIds A list of concept IDs that should NOT be used to
#' construct covariates.
#' @param addDescendantsToExclude Should descendant concept IDs be added to the list
#' of concepts to exclude?
#' @param includedCovariateConceptIds A list of concept IDs that should be used to
#' construct covariates.
#' @param addDescendantsToInclude Should descendant concept IDs be added to the list
#' of concepts to include?
#' @param longTermDays What is the length (in days) of the long-term window?
#' @param mediumTermDays What is the length (in days) of the medium-term window?
#' @param shortTermDays What is the length (in days) of the short-term window?
#' @param windowEndDays What is the last day of the window? 0 means the cohort
#' start date is the last date (included), 1 means the window
#' stops the day before the cohort start date, etc.
#' @param useCovariateProcedureOccurrence365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateConditionOccurrence365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateDrugExposure365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateMeasurementCount365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateDrugEra365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateObservation365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateObservationCount365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateMeasurement365d DEPRECATED. Use the LongTerm equivalent instead
#' @param useCovariateConditionOccurrenceInpt180d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateConditionOccurrence30d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateDrugExposure30d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateDrugEra30d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateMeasurement30d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateObservation30d DEPRECATED. Use the ShortTerm equivalent instead
#' @param useCovariateProcedureOccurrence30d DEPRECATED. Use the ShortTerm equivalent instead
#'
#'
#' @return
#' An object of type \code{defaultCovariateSettings}, to be used in other functions.
#'
#' @export
createCovariateSettings <- function(useCovariateDemographics = FALSE,
useCovariateDemographicsGender = FALSE,
useCovariateDemographicsRace = FALSE,
useCovariateDemographicsEthnicity = FALSE,
useCovariateDemographicsAge = FALSE,
useCovariateDemographicsYear = FALSE,
useCovariateDemographicsMonth = FALSE,
useCovariateConditionOccurrence = FALSE,
useCovariateConditionOccurrenceLongTerm = FALSE,
useCovariateConditionOccurrenceShortTerm = FALSE,
useCovariateConditionOccurrenceInptMediumTerm = FALSE,
useCovariateConditionEra = FALSE,
useCovariateConditionEraEver = FALSE,
useCovariateConditionEraOverlap = FALSE,
useCovariateConditionGroup = FALSE,
useCovariateConditionGroupMeddra = FALSE,
useCovariateConditionGroupSnomed = FALSE,
useCovariateDrugExposure = FALSE,
useCovariateDrugExposureLongTerm = FALSE,
useCovariateDrugExposureShortTerm = FALSE,
useCovariateDrugEra = FALSE,
useCovariateDrugEraLongTerm = FALSE,
useCovariateDrugEraShortTerm = FALSE,
useCovariateDrugEraOverlap = FALSE,
useCovariateDrugEraEver = FALSE,
useCovariateDrugGroup = FALSE,
useCovariateProcedureOccurrence = FALSE,
useCovariateProcedureOccurrenceLongTerm = FALSE,
useCovariateProcedureOccurrenceShortTerm = FALSE,
useCovariateProcedureGroup = FALSE,
useCovariateObservation = FALSE,
useCovariateObservationLongTerm = FALSE,
useCovariateObservationShortTerm = FALSE,
useCovariateObservationCountLongTerm = FALSE,
useCovariateMeasurement = FALSE,
useCovariateMeasurementLongTerm = FALSE,
useCovariateMeasurementShortTerm = FALSE,
useCovariateMeasurementCountLongTerm = FALSE,
useCovariateMeasurementBelow = FALSE,
useCovariateMeasurementAbove = FALSE,
useCovariateConceptCounts = FALSE,
useCovariateRiskScores = FALSE,
useCovariateRiskScoresCharlson = FALSE,
useCovariateRiskScoresDCSI = FALSE,
useCovariateRiskScoresCHADS2 = FALSE,
useCovariateRiskScoresCHADS2VASc = FALSE,
useCovariateInteractionYear = FALSE,
useCovariateInteractionMonth = FALSE,
excludedCovariateConceptIds = c(),
addDescendantsToExclude = TRUE,
includedCovariateConceptIds = c(),
addDescendantsToInclude = TRUE,
deleteCovariatesSmallCount = 100,
longTermDays = 365,
mediumTermDays = 180,
shortTermDays = 30,
windowEndDays = 0,
useCovariateProcedureOccurrence365d,
useCovariateConditionOccurrence365d,
useCovariateDrugExposure365d,
useCovariateMeasurementCount365d,
useCovariateDrugEra365d,
useCovariateObservation365d,
useCovariateObservationCount365d,
useCovariateMeasurement365d,
useCovariateConditionOccurrenceInpt180d,
useCovariateConditionOccurrence30d,
useCovariateDrugExposure30d,
useCovariateDrugEra30d,
useCovariateMeasurement30d,
useCovariateObservation30d,
useCovariateProcedureOccurrence30d) {
if (!missing(useCovariateProcedureOccurrence365d)) {
warning("Argument useCovariateProcedureOccurrence365d is deprecated. Use useCovariateProcedureOccurrenceLongTerm instead")
useCovariateProcedureOccurrenceLongTerm <- useCovariateProcedureOccurrence365d
}
if (!missing(useCovariateConditionOccurrence365d)) {
warning("Argument useCovariateConditionOccurrence365d is deprecated. Use useCovariateConditionOccurrenceLongTerm instead")
useCovariateConditionOccurrenceLongTerm <- useCovariateConditionOccurrence365d
}
if (!missing(useCovariateDrugExposure365d)) {
warning("Argument useCovariateDrugExposure365d is deprecated. Use useCovariateDrugExposureLongTerm instead")
useCovariateDrugExposureLongTerm <- useCovariateDrugExposure365d
}
if (!missing(useCovariateMeasurementCount365d)) {
warning("Argument useCovariateMeasurementCount365d is deprecated. Use useCovariateObservationCountLongTerm instead")
useCovariateObservationCountLongTerm <- useCovariateMeasurementCount365d
}
if (!missing(useCovariateDrugEra365d)) {
warning("Argument useCovariateDrugEra365d is deprecated. Use useCovariateDrugEraLongTerm instead")
useCovariateDrugEraLongTerm <- useCovariateDrugEra365d
}
if (!missing(useCovariateObservation365d)) {
warning("Argument useCovariateObservation365d is deprecated. Use useCovariateObservationLongTerm instead")
useCovariateObservationLongTerm <- useCovariateObservation365d
}
if (!missing(useCovariateObservationCount365d)) {
warning("Argument useCovariateObservationCount365d is deprecated. Use useCovariateObservationCountLongTerm instead")
useCovariateObservationCountLongTerm <- useCovariateObservationCount365d
}
if (!missing(useCovariateMeasurement365d)) {
warning("Argument useCovariateMeasurement365d is deprecated. Use useCovariateMeasurementLongTerm instead")
useCovariateMeasurementLongTerm <- useCovariateMeasurement365d
}
if (!missing(useCovariateConditionOccurrenceInpt180d)) {
warning("Argument useCovariateConditionOccurrenceInpt180d is deprecated. Use useCovariateConditionOccurrenceInptMediumTerm instead")
useCovariateConditionOccurrenceInptMediumTerm <- useCovariateConditionOccurrenceInpt180d
}
if (!missing(useCovariateConditionOccurrence30d)) {
warning("Argument useCovariateConditionOccurrence30d is deprecated. Use useCovariateConditionOccurrenceShortTerm instead")
useCovariateConditionOccurrenceShortTerm <- useCovariateConditionOccurrence30d
}
if (!missing(useCovariateDrugExposure30d)) {
warning("Argument useCovariateDrugExposure30d is deprecated. Use useCovariateDrugExposureShortTerm instead")
useCovariateDrugExposureShortTerm <- useCovariateDrugExposure30d
}
if (!missing(useCovariateDrugEra30d)) {
warning("Argument useCovariateDrugEra30d is deprecated. Use useCovariateDrugEraShortTerm instead")
useCovariateDrugEraShortTerm <- useCovariateDrugEra30d
}
if (!missing(useCovariateMeasurement30d)) {
warning("Argument useCovariateMeasurement30d is deprecated. Use useCovariateMeasurementShortTerm instead")
useCovariateMeasurementShortTerm <- useCovariateMeasurement30d
}
if (!missing(useCovariateObservation30d)) {
warning("Argument useCovariateObservation30d is deprecated. Use useCovariateObservationShortTerm instead")
useCovariateObservationShortTerm <- useCovariateObservation30d
}
if (!missing(useCovariateProcedureOccurrence30d)) {
warning("Argument useCovariateProcedureOccurrence30d is deprecated. Use useCovariateProcedureOccurrenceShortTerm instead")
useCovariateProcedureOccurrenceShortTerm <- useCovariateProcedureOccurrence30d
}
# # First: get the default values:
covariateSettings <- list()
formalNames <- names(formals(createCovariateSettings))
formalNames <- formalNames[!grepl("(365)|(180)|(30)", formalNames)]
for (name in formalNames) {
covariateSettings[[name]] <- get(name)
}
# Next: overwrite defaults with actual values if specified:
values <- lapply(as.list(match.call())[-1], function(x) eval(x, envir = sys.frame(-3)))
for (name in names(values)) {
if (name %in% names(covariateSettings))
covariateSettings[[name]] <- values[[name]]
}
attr(covariateSettings, "fun") <- "getDbDefaultCovariateData"
class(covariateSettings) <- "covariateSettings"
return(covariateSettings)
}
|
#' givemeSTRESS
#'
#' This function takes a sites (rows) x species(columns) matrix,
#' uses the package 'vegan' to perform distance calculations and non-metric
#' dimensional scaling analysis, and returns a stress plot showing how well
#' your data fit the analysis. Should be used with 'givemeNMDS' to confirm
#' that your data is appropriate. May return a warning that says stress is
#' nearly zero -- this is usually occurs with datasets that have lots of
#' 0 values, which is typically expected in large community datasets.
#'
#' @export givemeSTRESS
givemeSTRESS <- function(community) {
library(vegan)
comm.rel <- decostand(community, method = 'total')
comm.distmat <- vegdist(comm.rel, method = 'bray')
comm.distmat <- as.matrix(comm.distmat, labels = T)
commNMDS <- metaMDS(comm.distmat, k=3, maxit = 999, trymax = 500)
stressplot(commNMDS)
} | /R/givemeSTRESS.R | no_license | henrycstevens/ccomm | R | false | false | 891 | r | #' givemeSTRESS
#'
#' This function takes a sites (rows) x species(columns) matrix,
#' uses the package 'vegan' to perform distance calculations and non-metric
#' dimensional scaling analysis, and returns a stress plot showing how well
#' your data fit the analysis. Should be used with 'givemeNMDS' to confirm
#' that your data is appropriate. May return a warning that says stress is
#' nearly zero -- this is usually occurs with datasets that have lots of
#' 0 values, which is typically expected in large community datasets.
#'
#' @export givemeSTRESS
givemeSTRESS <- function(community) {
library(vegan)
comm.rel <- decostand(community, method = 'total')
comm.distmat <- vegdist(comm.rel, method = 'bray')
comm.distmat <- as.matrix(comm.distmat, labels = T)
commNMDS <- metaMDS(comm.distmat, k=3, maxit = 999, trymax = 500)
stressplot(commNMDS)
} |
# .onAttach <- function(...){
# packageStartupMessage("Full description, Bug report, Suggestion and the latest codes:")
# packageStartupMessage("https://github.com/YinLiLin/R-KAML")
# }
.onLoad <- function(libname, pkgname) {
# Limit number of threads in veclib (MacOS MRO)
if (Sys.info()["sysname"] == "Darwin") {
Sys.setenv("VECLIB_MAXIMUM_THREADS" = "1")
}
return(invisible())
}
| /R/zzz.R | permissive | kant/KAML | R | false | false | 415 | r | # .onAttach <- function(...){
# packageStartupMessage("Full description, Bug report, Suggestion and the latest codes:")
# packageStartupMessage("https://github.com/YinLiLin/R-KAML")
# }
.onLoad <- function(libname, pkgname) {
# Limit number of threads in veclib (MacOS MRO)
if (Sys.info()["sysname"] == "Darwin") {
Sys.setenv("VECLIB_MAXIMUM_THREADS" = "1")
}
return(invisible())
}
|
\name{OmegaExcessReturn}
\alias{OmegaExcessReturn}
\alias{OmegaExessReturn}
\title{Omega excess return of the return distribution}
\usage{
OmegaExcessReturn(Ra, Rb, MAR = 0, ...)
}
\arguments{
\item{Ra}{an xts, vector, matrix, data frame, timeSeries
or zoo object of asset returns}
\item{Rb}{return vector of the benchmark asset}
\item{MAR}{the minimum acceptable return}
\item{\dots}{any other passthru parameters}
}
\description{
Omega excess return is another form of downside
risk-adjusted return. It is calculated by multiplying the
downside variance of the style benchmark by 3 times the
style beta.
}
\details{
\deqn{\omega = r_P - 3*\beta_S*\sigma_{MD}^2}{
OmegaExcessReturn = Portfolio return - 3*style beta*style
benchmark variance squared}
where \eqn{\omega} is omega excess return, \eqn{\beta_S} is
style beta, \eqn{\sigma_D} is the portfolio annualised
downside risk and \eqn{\sigma_{MD}} is the benchmark
annualised downside risk.
}
\examples{
data(portfolio_bacon)
MAR = 0.005
print(OmegaExcessReturn(portfolio_bacon[,1], portfolio_bacon[,2], MAR)) #expected 0.0805
data(managers)
MAR = 0
print(OmegaExcessReturn(managers['1996',1], managers['1996',8], MAR))
print(OmegaExcessReturn(managers['1996',1:5], managers['1996',8], MAR))
}
\author{
Matthieu Lestel
}
\references{
Carl Bacon, \emph{Practical portfolio performance
measurement and attribution}, second edition 2008 p.103
}
\keyword{distribution}
\keyword{models}
\keyword{multivariate}
\keyword{ts}
| /man/OmegaExcessReturn.Rd | no_license | guillermozbta/portafolio-master | R | false | false | 1,488 | rd | \name{OmegaExcessReturn}
\alias{OmegaExcessReturn}
\alias{OmegaExessReturn}
\title{Omega excess return of the return distribution}
\usage{
OmegaExcessReturn(Ra, Rb, MAR = 0, ...)
}
\arguments{
\item{Ra}{an xts, vector, matrix, data frame, timeSeries
or zoo object of asset returns}
\item{Rb}{return vector of the benchmark asset}
\item{MAR}{the minimum acceptable return}
\item{\dots}{any other passthru parameters}
}
\description{
Omega excess return is another form of downside
risk-adjusted return. It is calculated by multiplying the
downside variance of the style benchmark by 3 times the
style beta.
}
\details{
\deqn{\omega = r_P - 3*\beta_S*\sigma_{MD}^2}{
OmegaExcessReturn = Portfolio return - 3*style beta*style
benchmark variance squared}
where \eqn{\omega} is omega excess return, \eqn{\beta_S} is
style beta, \eqn{\sigma_D} is the portfolio annualised
downside risk and \eqn{\sigma_{MD}} is the benchmark
annualised downside risk.
}
\examples{
data(portfolio_bacon)
MAR = 0.005
print(OmegaExcessReturn(portfolio_bacon[,1], portfolio_bacon[,2], MAR)) #expected 0.0805
data(managers)
MAR = 0
print(OmegaExcessReturn(managers['1996',1], managers['1996',8], MAR))
print(OmegaExcessReturn(managers['1996',1:5], managers['1996',8], MAR))
}
\author{
Matthieu Lestel
}
\references{
Carl Bacon, \emph{Practical portfolio performance
measurement and attribution}, second edition 2008 p.103
}
\keyword{distribution}
\keyword{models}
\keyword{multivariate}
\keyword{ts}
|
################################################################################
## Implement general problem with balancer package
################################################################################
fit_balancer_formatted <- function(X, trt,
link=c("logit", "linear", "pos-linear"),
regularizer=c(NULL, "l1", "l2", "ridge", "linf"),
hyperparam, normalized=TRUE,
opts=list()) {
#' Find Balancing weights by solving the dual optimization problem
#' @param X n x d matrix of covariates
#' @param trt Vector of treatment status indicators
#' @param link Link function for weights
#' @param regularizer Dual of balance criterion
#' @param hyperparam Regularization hyperparameter
#' @param normalized Whether to normalize the weights
#' @param opts Optimization options
#' \itemize{
#' \item{MAX_ITERS }{Maximum number of iterations to run}
#' \item{EPS }{Error rolerance}}
#'
#' @return \itemize{
#' \item{theta }{Estimated dual propensity score parameters}
#' \item{weights }{Estimated primal weights}
#' \item{imbalance }{Imbalance in covariates}}
## fit weights with balancer
out <- balancer::balancer(X, trt, type="att",
link=link, regularizer=regularizer,
hyperparam=hyperparam, normalized=normalized,
opts=opts)
## drop treated weights
weights <- out$weights[trt == 0]
x <- X[trt == 0,,drop=FALSE]
y <- colMeans(X[trt == 1,,drop=FALSE])
## compute l1, l2, and linf error
l2_error <- sqrt(sum((t(x) %*% weights - y)^2))
l1_error <- sum(abs(t(x) %*% weights - y))
linf_error <- max(abs(t(x) %*% weights - y))
if(is.null(regularizer)) {
primal_obj <- l2_error
} else if(regularizer=="l2") {
primal_obj <- l2_error
} else if(regularizer=="l1") {
primal_obj <- linf_error
} else if(regularizer == "linf") {
primal_obj <- l1_error
} else {
primal_obj <- l2_error
}
## primal objective value scaled by least squares difference for mean
unif_primal_obj <- sqrt(sum((t(x) %*% rep(1/dim(x)[1], dim(x)[1]) - y)^2))
scaled_primal_obj <- primal_obj / unif_primal_obj
eta <- x %*% out$theta
## compute propensity scores
pscores <- 1 / (1 + exp(-eta))
## get magnitude of vectors
mag <- sqrt(sum(y^2))
## check for equality within 10^-3 * magnitude
tol <- 1e-1
if(hyperparam > 0) {
equalfeasible <- abs(hyperparam - primal_obj) / hyperparam < tol
} else {
equalfeasible <- abs(hyperparam - primal_obj) < tol^3
}
lessfeasible <- primal_obj < hyperparam
feasible <- equalfeasible || lessfeasible
return(list(weights=weights,
dual=out$theta,
controls=t(x),
primal_obj=primal_obj,
l1_error=l1_error,
l2_error=l2_error,
linf_error=linf_error,
scaled_primal_obj=scaled_primal_obj,
pscores=pscores,
eta=eta,
feasible=feasible))
}
get_balancer <- function(outcomes, metadata, trt_unit=1, hyperparam,
link=c("logit", "linear", "pos-linear"),
regularizer=c(NULL, "l1", "l2", "ridge", "linf"),
normalized=TRUE,
outcome_col=NULL,
cols=list(unit="unit", time="time",
outcome="outcome", treated="treated"),
opts=list()) {
#' Find Balancing weights by solving the dual optimization problem
#' @param outcomes Tidy dataframe with the outcomes and meta data
#' @param metadata Dataframe of metadata
#' @param trt_unit Unit that is treated (target for regression), default: 0
#' @param hyperparam Regularization hyperparameter
#' @param link Link function for weights
#' @param regularizer Dual of balance criterion
#' @param normalized Whether to normalize the weights
#' @param outcome_col Column name which identifies outcomes, if NULL then
#' assume only one outcome
#' @param cols Column names corresponding to the units,
#' time variable, outcome, and treated indicator
#' @param opts Optimization options
#' \itemize{
#' \item{MAX_ITERS }{Maximum number of iterations to run}
#' \item{EPS }{Error tolerance}}
#'
#' @return outcomes with additional synthetic control added and weights
#' @export
## get the synthetic controls weights
data_out <- format_ipw(outcomes, metadata, outcome_col, cols)
out <- fit_balancer_formatted(data_out$X, data_out$trt, link, regularizer,
hyperparam, normalized, opts)
## match outcome types to synthetic controls
if(!is.null(outcome_col)) {
data_out$outcomes[[outcome_col]] <- factor(outcomes[[outcome_col]],
levels = names(out$groups))
data_out$outcomes <- data_out$outcomes %>% dplyr::arrange_(outcome_col)
}
syndat <- format_data(outcomes, metadata, trt_unit, outcome_col, cols)
out$controls <- syndat$synth_data$Y0plot
ctrls <- impute_controls(syndat$outcomes, out, syndat$trt_unit)
ctrls$dual <- out$dual
ctrls$primal_obj <- out$primal_obj
ctrls$l1_error <- out$l1_error
ctrls$l2_error <- out$l2_error
ctrls$linf_error <- out$linf_error
ctrls$pscores <- out$pscores
ctrls$eta <- out$eta
ctrls$feasible <- out$feasible
ctrls$scaled_primal_obj <- out$scaled_primal_obj
ctrls$controls <- out$controls
return(ctrls)
}
| /R/balancer.R | no_license | ebenmichael/ents | R | false | false | 5,942 | r | ################################################################################
## Implement general problem with balancer package
################################################################################
fit_balancer_formatted <- function(X, trt,
link=c("logit", "linear", "pos-linear"),
regularizer=c(NULL, "l1", "l2", "ridge", "linf"),
hyperparam, normalized=TRUE,
opts=list()) {
#' Find Balancing weights by solving the dual optimization problem
#' @param X n x d matrix of covariates
#' @param trt Vector of treatment status indicators
#' @param link Link function for weights
#' @param regularizer Dual of balance criterion
#' @param hyperparam Regularization hyperparameter
#' @param normalized Whether to normalize the weights
#' @param opts Optimization options
#' \itemize{
#' \item{MAX_ITERS }{Maximum number of iterations to run}
#' \item{EPS }{Error rolerance}}
#'
#' @return \itemize{
#' \item{theta }{Estimated dual propensity score parameters}
#' \item{weights }{Estimated primal weights}
#' \item{imbalance }{Imbalance in covariates}}
## fit weights with balancer
out <- balancer::balancer(X, trt, type="att",
link=link, regularizer=regularizer,
hyperparam=hyperparam, normalized=normalized,
opts=opts)
## drop treated weights
weights <- out$weights[trt == 0]
x <- X[trt == 0,,drop=FALSE]
y <- colMeans(X[trt == 1,,drop=FALSE])
## compute l1, l2, and linf error
l2_error <- sqrt(sum((t(x) %*% weights - y)^2))
l1_error <- sum(abs(t(x) %*% weights - y))
linf_error <- max(abs(t(x) %*% weights - y))
if(is.null(regularizer)) {
primal_obj <- l2_error
} else if(regularizer=="l2") {
primal_obj <- l2_error
} else if(regularizer=="l1") {
primal_obj <- linf_error
} else if(regularizer == "linf") {
primal_obj <- l1_error
} else {
primal_obj <- l2_error
}
## primal objective value scaled by least squares difference for mean
unif_primal_obj <- sqrt(sum((t(x) %*% rep(1/dim(x)[1], dim(x)[1]) - y)^2))
scaled_primal_obj <- primal_obj / unif_primal_obj
eta <- x %*% out$theta
## compute propensity scores
pscores <- 1 / (1 + exp(-eta))
## get magnitude of vectors
mag <- sqrt(sum(y^2))
## check for equality within 10^-3 * magnitude
tol <- 1e-1
if(hyperparam > 0) {
equalfeasible <- abs(hyperparam - primal_obj) / hyperparam < tol
} else {
equalfeasible <- abs(hyperparam - primal_obj) < tol^3
}
lessfeasible <- primal_obj < hyperparam
feasible <- equalfeasible || lessfeasible
return(list(weights=weights,
dual=out$theta,
controls=t(x),
primal_obj=primal_obj,
l1_error=l1_error,
l2_error=l2_error,
linf_error=linf_error,
scaled_primal_obj=scaled_primal_obj,
pscores=pscores,
eta=eta,
feasible=feasible))
}
get_balancer <- function(outcomes, metadata, trt_unit=1, hyperparam,
link=c("logit", "linear", "pos-linear"),
regularizer=c(NULL, "l1", "l2", "ridge", "linf"),
normalized=TRUE,
outcome_col=NULL,
cols=list(unit="unit", time="time",
outcome="outcome", treated="treated"),
opts=list()) {
#' Find Balancing weights by solving the dual optimization problem
#' @param outcomes Tidy dataframe with the outcomes and meta data
#' @param metadata Dataframe of metadata
#' @param trt_unit Unit that is treated (target for regression), default: 0
#' @param hyperparam Regularization hyperparameter
#' @param link Link function for weights
#' @param regularizer Dual of balance criterion
#' @param normalized Whether to normalize the weights
#' @param outcome_col Column name which identifies outcomes, if NULL then
#' assume only one outcome
#' @param cols Column names corresponding to the units,
#' time variable, outcome, and treated indicator
#' @param opts Optimization options
#' \itemize{
#' \item{MAX_ITERS }{Maximum number of iterations to run}
#' \item{EPS }{Error tolerance}}
#'
#' @return outcomes with additional synthetic control added and weights
#' @export
## get the synthetic controls weights
data_out <- format_ipw(outcomes, metadata, outcome_col, cols)
out <- fit_balancer_formatted(data_out$X, data_out$trt, link, regularizer,
hyperparam, normalized, opts)
## match outcome types to synthetic controls
if(!is.null(outcome_col)) {
data_out$outcomes[[outcome_col]] <- factor(outcomes[[outcome_col]],
levels = names(out$groups))
data_out$outcomes <- data_out$outcomes %>% dplyr::arrange_(outcome_col)
}
syndat <- format_data(outcomes, metadata, trt_unit, outcome_col, cols)
out$controls <- syndat$synth_data$Y0plot
ctrls <- impute_controls(syndat$outcomes, out, syndat$trt_unit)
ctrls$dual <- out$dual
ctrls$primal_obj <- out$primal_obj
ctrls$l1_error <- out$l1_error
ctrls$l2_error <- out$l2_error
ctrls$linf_error <- out$linf_error
ctrls$pscores <- out$pscores
ctrls$eta <- out$eta
ctrls$feasible <- out$feasible
ctrls$scaled_primal_obj <- out$scaled_primal_obj
ctrls$controls <- out$controls
return(ctrls)
}
|
#' @template dbispec-sub-wip
#' @format NULL
#' @section Meta:
#' \subsection{`dbGetRowsAffected("DBIResult")`}{
spec_meta_get_rows_affected <- list(
#' Information on affected rows is correct.
rows_affected = function(ctx) {
with_connection({
expect_error(dbGetQuery(con, "SELECT * FROM iris"))
on.exit(expect_error(dbExecute(con, "DROP TABLE iris"), NA),
add = TRUE)
iris <- get_iris(ctx)
dbWriteTable(con, "iris", iris)
local({
query <- paste0(
"DELETE FROM iris WHERE (",
dbQuoteIdentifier(con, "Species"),
" = ", dbQuoteString(con, "versicolor"),
")")
res <- dbSendStatement(con, query)
on.exit(expect_error(dbClearResult(res), NA), add = TRUE)
ra <- dbGetRowsAffected(res)
expect_identical(ra, sum(iris$Species == "versicolor"))
})
local({
query <- "DELETE FROM iris WHERE (0 = 1)"
res <- dbSendStatement(con, query)
on.exit(expect_error(dbClearResult(res), NA), add = TRUE)
ra <- dbGetRowsAffected(res)
expect_identical(ra, 0L)
})
})
},
#' }
NULL
)
| /R/spec-meta-get-rows-affected.R | no_license | thrasibule/DBItest | R | false | false | 1,162 | r | #' @template dbispec-sub-wip
#' @format NULL
#' @section Meta:
#' \subsection{`dbGetRowsAffected("DBIResult")`}{
spec_meta_get_rows_affected <- list(
#' Information on affected rows is correct.
rows_affected = function(ctx) {
with_connection({
expect_error(dbGetQuery(con, "SELECT * FROM iris"))
on.exit(expect_error(dbExecute(con, "DROP TABLE iris"), NA),
add = TRUE)
iris <- get_iris(ctx)
dbWriteTable(con, "iris", iris)
local({
query <- paste0(
"DELETE FROM iris WHERE (",
dbQuoteIdentifier(con, "Species"),
" = ", dbQuoteString(con, "versicolor"),
")")
res <- dbSendStatement(con, query)
on.exit(expect_error(dbClearResult(res), NA), add = TRUE)
ra <- dbGetRowsAffected(res)
expect_identical(ra, sum(iris$Species == "versicolor"))
})
local({
query <- "DELETE FROM iris WHERE (0 = 1)"
res <- dbSendStatement(con, query)
on.exit(expect_error(dbClearResult(res), NA), add = TRUE)
ra <- dbGetRowsAffected(res)
expect_identical(ra, 0L)
})
})
},
#' }
NULL
)
|
# Do p-values from sampling out of a standard normal distribution follow a uniform distribution?
#Refs:
#https://stats.stackexchange.com/questions/10613/why-are-p-values-uniformly-distributed-under-the-null-hypothesis
#https://www.cyclismo.org/tutorial/R/pValues.html
#Simulation
p_val=c()
run_times=10000
set.seed(101)
for(i in 1:run_times){
x_bar=rnorm(100) %>% mean
z=(x_bar-0)/(1/sqrt(100))
p_val[i]=2*pnorm(-abs(z))
}
hist(p_val) | /general_r/p_val_simulation.R | no_license | peterwu19881230/R_Utility | R | false | false | 447 | r | # Do p-values from sampling out of a standard normal distribution follow a uniform distribution?
#Refs:
#https://stats.stackexchange.com/questions/10613/why-are-p-values-uniformly-distributed-under-the-null-hypothesis
#https://www.cyclismo.org/tutorial/R/pValues.html
#Simulation
p_val=c()
run_times=10000
set.seed(101)
for(i in 1:run_times){
x_bar=rnorm(100) %>% mean
z=(x_bar-0)/(1/sqrt(100))
p_val[i]=2*pnorm(-abs(z))
}
hist(p_val) |
#------------------------------------------------------------------------------------
# Functions for calculation of stratification-based case fatality rate
# Byungwon Kim
# Last update: July 28, 2020
#------------------------------------------------------------------------------------
## 1. Data manipulation
SCFR_data <- function(raw_data, time.var, group.var){
# This function translates the raw data into an appropriate form for our proposed CFR calculation
# Input: raw_data - data frame that contains time variable, group variable, several count variables
# time.var - the name of time variable in the raw data
# group.var - the name of group variable in the raw data
#
# Output: data.out - a list of data frames transformed from the raw data
# each data frame has the form of (time) x (group) matrix
# whose elements correspond to their counts
Time.index <- sort(unique(raw_data[,time.var]))
Group.index <- sort(unique(raw_data[,group.var]))
Num.count.var <- ncol(raw_data) - 2 # number of variables except the time and group variables
data.out.array <- array(0,
dim = c(Num.count.var, length(Time.index), length(Group.index)))
for (j in 1:length(Time.index)){
temp <- data.frame(raw_data[raw_data[,time.var] == Time.index[j], colnames(raw_data) != time.var])
for (k in 1:length(Group.index)){
temp2 <- data.frame(temp[temp[,group.var] == Group.index[k], colnames(temp) != group.var])
for (i in 1:Num.count.var){
data.out.array[i, j, k] <- as.numeric(temp2[i])
}
}
}
data.out <- list()
for (i in 1:Num.count.var){
data.out[[i]] <- data.frame(data.out.array[i,,])
rownames(data.out[[i]]) <- Time.index
colnames(data.out[[i]]) <- Group.index
}
return(data.out)
}
## 1-(1). Partial grouping
SCFR_partial_group <- function(data.table, group.ind){
# This function can be used to define partial groups from existing groups if required
# Input: data.table - a table (count matrix) produced by SCFR_data()
# need to be a form of (time) x (group) <- each column of matrix indicates each group
# group.ind - a list of sub-group indicator
# e.g.) if existing age groups are ("0s", "10s", ..., "80s")
# and someone wants to make different groups of age such as ("young", "middle", "old")
# then group.ind can be stated as
# group.ind = list(young = c("0s", "10s", "20s"),
# middle = c("30s", "40s", "50s"),
# old = c("60s", "70s", "80s"))
# No need to cover all existing groups
# But names or locations of columns need to be clearly stated
# Output: data.out - a table (count matrix) that has a form of (time) x (sub group)
Name.subgroup <- names(group.ind)
if (length(Name.subgroup) == 0){
# if names of subgroups are not given,
Name.subgroup <- 1:length(group.ind)
}
data.out <- data.frame(matrix(0, nrow = nrow(data.table), ncol = length(Name.subgroup)))
for (i in 1:length(Name.subgroup)){
temp <- data.table[, group.ind[[Name.subgroup[i]]]]
if (ncol(temp) > 1){
data.out[,i] <- apply(temp, 1, sum)
}else{
data.out[,i] <- temp
}
}
rownames(data.out) <- rownames(data.table)
colnames(data.out) <- Name.subgroup
return(data.out)
}
## 1-(2). Estimation of cured counts in each group
SCFR_Cured <- function(N, cured_count, Matrix = TRUE){
# This function estimates the number of cured patients in each group
# based on the proportion of each group among total confirmed patients
# Input: N - a vector or matrix of confirmed patients
# need to be a form of (time) x (group)
# cured_count - total count of cured patients
# if N is a matrix cured_count is a vector, if N is a vector cured_count is a scalar
# Matrix - indicator variable, TRUE (default) = N is a matrix
# FALSE = N is a vector
# Output: C - a vector or matrix of estimated counts of cured patients
#
# ** This function is required for "SCFR calculation - stratified by age group - for COVID_19 outbreak in South Korea"
# ** because KCDC (Korea Center for Disease Control and prevention) does not report the age-grouped number of cured patients
if (Matrix){
Total <- apply(N, 1, sum)
Prop_group <- N / matrix(rep(Total, ncol(N)), ncol = ncol(N), byrow = FALSE)
C <- floor(Prop_group * matrix(rep(cured_count, ncol(Prop_group)), ncol = ncol(Prop_group), byrow = FALSE))
}else{
Total <- sum(N)
Prop_group <- N / Total
C <- floor(Prop_group * cured_count)
}
}
## 2. Calculation of SCFR
SCFR_estimation <- function(D, U, C, N, model.option = 1){
# This function calculates our proposed (stratification-based) case fatality rate (CFR)
# Input: D - (time) x (group) formed count matrix of deaths
# U - (time) x (group) formed count matrix of quarantined patients (confirmed but not recovered nor dead)
# C - (time) x (group) formed count matrix of cured patients
# N - (time) x (group) formed count matrix of total confirmed patients
# model.option - 1 (default): our proposed CFR
# 2: 1st model of WHO (number of deaths / number of total confirmed patients)
# 3: 2nd model of WHO (number of deaths / (number of cured patients + number of deaths))
# 4: group-wise 1st model of WHO + weighted mean for gross CFR
# 5: group-wise 2nd model of WHO + weighted mean for gross CFR
# Output: CFR - a list of CFRs
# CFR[["info"]] - information of used option
# CFR[["gross"]] - a (time) x 1 vector of gross CFRs
# provided for all options
# CFR[["group"]] - a (time) x (group) matrix of group-wise CFRs
# provided for options 1, 4, and 5
# Additionally for option 1 only,
# CFR[["gross.variance"]] - estimated variances of gross CFRs
# CFR[["gross.lower"]] - lower bounds of 95% confidence band for gross CFRs
# CFR[["gross.upper"]] - upper bounds of 95% confidence band for gross CFRs
# CFR[["group.variance"]] - estimated variances of group-wise CFRs
# CFR[["group.lower"]] - lower bounds of 95% confidence band for group-wise CFRs
# CFR[["group.upper"]] - upper bounds of 95% confidence band for group-wise CFRs
if (model.option == 1){
CFR <- list(info = "stratification-based CFR",
gross = matrix(0, nrow = nrow(D), ncol = 1),
group = matrix(0, nrow = nrow(D), ncol = ncol(D)),
gross.variance = matrix(0, nrow = nrow(D), ncol = 1),
gross.lower = matrix(0, nrow = nrow(D), ncol = 1),
gross.upper = matrix(0, nrow = nrow(D), ncol = 1),
group.variance = matrix(0, nrow = nrow(D), ncol = ncol(D)),
group.lower = matrix(0, nrow = nrow(D), ncol = ncol(D)),
group.upper = matrix(0, nrow = nrow(D), ncol = ncol(D)))
CFR[["group"]] <- (D + U / N * D) / N
p.d <- D / N
p.u <- U / N
eta <- p.d * (1 - p.d) + p.d * p.u * (2 - 3 * p.d + p.u - 4 * p.d * p.u) * (N - 1) / N
zeta <- p.d * p.u * (1 - 2 * p.d + 6 * p.d * p.u - 2 * p.u) * (N - 1) / N
CFR[["group.variance"]] <- eta / N + zeta / N^2
for (i in 1:nrow(D)){
CFR$group[i, is.na(CFR$group[i,])] <- 0
CFR$group.variance[i, is.na(CFR$group.variance[i,])] <- 0
}
CFR[["gross"]] <- (apply(CFR[["group"]] * N, 1, sum)) / apply(N, 1, sum)
CFR[["gross.variance"]] <- apply(CFR[["group.variance"]] * N^2, 1, sum) / (apply(N, 1, sum)^2)
CFR[["gross.lower"]] <- CFR[["gross"]] - 1.96 * sqrt(CFR[["gross.variance"]])
CFR$gross.lower <- ifelse(CFR$gross.lower < 0, 0, CFR$gross.lower)
CFR[["gross.upper"]] <- CFR[["gross"]] + 1.96 * sqrt(CFR[["gross.variance"]])
temp <- CFR[["group"]] - 1.96 * sqrt(CFR[["group.variance"]])
for (j in 1:ncol(temp)){
temp[,j] <- ifelse(temp[,j] < 0, 0, temp[,j])
}
CFR[["group.lower"]] <- temp
CFR[["group.upper"]] <- CFR[["group"]] + 1.96 * sqrt(CFR[["group.variance"]])
}else if (model.option == 2){
CFR <- list(info = "WHO: D / N",
gross = apply(D, 1, sum) / apply(N, 1, sum))
}else if (model.option == 3){
CFR <- list(info = "WHO: D / (C + D)",
gross = apply(D, 1, sum) / (apply(C, 1, sum) + apply(D, 1, sum)))
CFR$gross[is.na(CFR$gross)] <- 0
}else if (model.option == 4){
CFR <- list(info = "group-wise WHO: D / N",
gross = apply(D, 1, sum) / apply(N, 1, sum), # Same as option 2
group = D / N)
}else{
CFR <- list(info = "group-wise WHO: D / (C + D)",
gross = apply(D / (C + D) * N, 1, sum) / apply(N, 1, sum),
group = D / (C + D))
CFR$gross[is.na(CFR$gross)] <- 0
for (i in 1:nrow(D)){
CFR$group[i, is.na(CFR$group[i,])] <- 0
}
}
return(CFR)
}
# 3. Prediction of case fatality rate (Model 2)
SCFR_prediction <- function(D, U, C, N, option = 1){
# This function calculates the predicted values of CFRs based on our 2nd model
# Input: D - (time) x (group) formed count matrix of deaths
# U - (time) x (group) formed count matrix of quarantined patients (confirmed but not recovered nor dead)
# C - (time) x (group) formed count matrix of cured patients
# N - (time) x (group) formed count matrix of total confirmed patients
# option - two options for q hat (estimated fatality rate for uncertain patients)
# 1 (default): number of deaths / number of confirmed patients
# 2: number of death / (number of cured patients + number of deaths)
# Output: CFR.pred - a list of predicted CFRs
# CFR.pred[["gross"]] - a (time) x 1 vector of gross predicted CFRs
# CFR.pred[["group"]] - a (time) x (group) matrix of group-wise predicted CFRs
# CFR.pred[["gross.variance"]] - estimated variances of gross predicted CFRs
# CFR.pred[["gross.lower"]] - lower bounds of 95% prediction band for gross predicted CFRs
# CFR.pred[["gross.upper"]] - upper bounds of 95% prediction band for gross predicted CFRs
# CFR.pred[["group.variance"]] - estimated variances of group-wise CFRs
# CFR.pred[["group.lower"]] - lower bounds of 95% prediction band for group-wise predicted CFRs
# CFR.pred[["group.upper"]] - upper bounds of 95% prediction band for group-wise predicted CFRs
if (option == 1){
q.hat <- D / N
q.hat[is.na(q.hat)] <- 0
}else if (option == 2){
q.hat <- D / (C + D)
q.hat[is.na(q.hat)] <- 0
}
CFR.pred <- list(gross = matrix(0, nrow = nrow(D), ncol = 1),
group = matrix(0, nrow = nrow(D), ncol = ncol(D)),
gross.variance = matrix(0, nrow = nrow(D), ncol = 1),
gross.lower = matrix(0, nrow = nrow(D), ncol = 1),
gross.upper = matrix(0, nrow = nrow(D), ncol = 1),
group.variance = matrix(0, nrow = nrow(D), ncol = ncol(D)),
group.lower = matrix(0, nrow = nrow(D), ncol = ncol(D)),
group.upper = matrix(0, nrow = nrow(D), ncol = ncol(D)))
CFR.pred[["group"]] <- (D + U * q.hat) / N
CFR.pred[["group.variance"]] <- U * q.hat * (1 - q.hat) / N^2
for (i in 1:nrow(D)){
CFR.pred$group[i, is.na(CFR.pred$group[i,])] <- 0
CFR.pred$group.variance[i, is.na(CFR.pred$group.variance[i,])] <- 0
}
CFR.pred[["gross"]] <- (apply(D, 1, sum) + apply((U * q.hat), 1, sum)) / apply(N, 1, sum)
CFR.pred[["gross.variance"]] <- apply(CFR.pred[["group.variance"]] * N^2, 1, sum) / apply(N, 1, sum)^2
CFR.pred[["gross.lower"]] <- CFR.pred[["gross"]] - 1.96 * sqrt(CFR.pred[["gross.variance"]])
CFR.pred$gross.lower <- ifelse(CFR.pred$gross.lower < 0, 0, CFR.pred$gross.lower)
CFR.pred[["gross.upper"]] <- CFR.pred[["gross"]] + 1.96 * sqrt(CFR.pred[["gross.variance"]])
temp <- CFR.pred[["group"]] - 1.96 * sqrt(CFR.pred[["group.variance"]])
for (j in 1:ncol(temp)){
temp[,j] <- ifelse(temp[,j] < 0, 0, temp[,j])
}
CFR.pred[["group.lower"]] <- temp
CFR.pred[["group.upper"]] <- CFR.pred[["group"]] + 1.96 * sqrt(CFR.pred[["group.variance"]])
return(CFR.pred)
}
t_col <- function(color, alpha = 0.5) {
# color = color name
# alpha = transparency [0,1]
# name = an optional name for the color
## Get RGB values for named color
rgb.val <- col2rgb(color)
## Make new color using input color as base and alpha set by transparency
t.col <- rgb(rgb.val[1], rgb.val[2], rgb.val[3],
max = 255,
alpha = 255 * alpha)
## Save the color
invisible(t.col)
}
| /Code_functions_SCFR.R | permissive | sungkyujung/covid19cfr | R | false | false | 13,217 | r | #------------------------------------------------------------------------------------
# Functions for calculation of stratification-based case fatality rate
# Byungwon Kim
# Last update: July 28, 2020
#------------------------------------------------------------------------------------
## 1. Data manipulation
SCFR_data <- function(raw_data, time.var, group.var){
# This function translates the raw data into an appropriate form for our proposed CFR calculation
# Input: raw_data - data frame that contains time variable, group variable, several count variables
# time.var - the name of time variable in the raw data
# group.var - the name of group variable in the raw data
#
# Output: data.out - a list of data frames transformed from the raw data
# each data frame has the form of (time) x (group) matrix
# whose elements correspond to their counts
Time.index <- sort(unique(raw_data[,time.var]))
Group.index <- sort(unique(raw_data[,group.var]))
Num.count.var <- ncol(raw_data) - 2 # number of variables except the time and group variables
data.out.array <- array(0,
dim = c(Num.count.var, length(Time.index), length(Group.index)))
for (j in 1:length(Time.index)){
temp <- data.frame(raw_data[raw_data[,time.var] == Time.index[j], colnames(raw_data) != time.var])
for (k in 1:length(Group.index)){
temp2 <- data.frame(temp[temp[,group.var] == Group.index[k], colnames(temp) != group.var])
for (i in 1:Num.count.var){
data.out.array[i, j, k] <- as.numeric(temp2[i])
}
}
}
data.out <- list()
for (i in 1:Num.count.var){
data.out[[i]] <- data.frame(data.out.array[i,,])
rownames(data.out[[i]]) <- Time.index
colnames(data.out[[i]]) <- Group.index
}
return(data.out)
}
## 1-(1). Partial grouping
SCFR_partial_group <- function(data.table, group.ind){
# This function can be used to define partial groups from existing groups if required
# Input: data.table - a table (count matrix) produced by SCFR_data()
# need to be a form of (time) x (group) <- each column of matrix indicates each group
# group.ind - a list of sub-group indicator
# e.g.) if existing age groups are ("0s", "10s", ..., "80s")
# and someone wants to make different groups of age such as ("young", "middle", "old")
# then group.ind can be stated as
# group.ind = list(young = c("0s", "10s", "20s"),
# middle = c("30s", "40s", "50s"),
# old = c("60s", "70s", "80s"))
# No need to cover all existing groups
# But names or locations of columns need to be clearly stated
# Output: data.out - a table (count matrix) that has a form of (time) x (sub group)
Name.subgroup <- names(group.ind)
if (length(Name.subgroup) == 0){
# if names of subgroups are not given,
Name.subgroup <- 1:length(group.ind)
}
data.out <- data.frame(matrix(0, nrow = nrow(data.table), ncol = length(Name.subgroup)))
for (i in 1:length(Name.subgroup)){
temp <- data.table[, group.ind[[Name.subgroup[i]]]]
if (ncol(temp) > 1){
data.out[,i] <- apply(temp, 1, sum)
}else{
data.out[,i] <- temp
}
}
rownames(data.out) <- rownames(data.table)
colnames(data.out) <- Name.subgroup
return(data.out)
}
## 1-(2). Estimation of cured counts in each group
SCFR_Cured <- function(N, cured_count, Matrix = TRUE){
# This function estimates the number of cured patients in each group
# based on the proportion of each group among total confirmed patients
# Input: N - a vector or matrix of confirmed patients
# need to be a form of (time) x (group)
# cured_count - total count of cured patients
# if N is a matrix cured_count is a vector, if N is a vector cured_count is a scalar
# Matrix - indicator variable, TRUE (default) = N is a matrix
# FALSE = N is a vector
# Output: C - a vector or matrix of estimated counts of cured patients
#
# ** This function is required for "SCFR calculation - stratified by age group - for COVID_19 outbreak in South Korea"
# ** because KCDC (Korea Center for Disease Control and prevention) does not report the age-grouped number of cured patients
if (Matrix){
Total <- apply(N, 1, sum)
Prop_group <- N / matrix(rep(Total, ncol(N)), ncol = ncol(N), byrow = FALSE)
C <- floor(Prop_group * matrix(rep(cured_count, ncol(Prop_group)), ncol = ncol(Prop_group), byrow = FALSE))
}else{
Total <- sum(N)
Prop_group <- N / Total
C <- floor(Prop_group * cured_count)
}
}
## 2. Calculation of SCFR
SCFR_estimation <- function(D, U, C, N, model.option = 1){
# This function calculates our proposed (stratification-based) case fatality rate (CFR)
# Input: D - (time) x (group) formed count matrix of deaths
# U - (time) x (group) formed count matrix of quarantined patients (confirmed but not recovered nor dead)
# C - (time) x (group) formed count matrix of cured patients
# N - (time) x (group) formed count matrix of total confirmed patients
# model.option - 1 (default): our proposed CFR
# 2: 1st model of WHO (number of deaths / number of total confirmed patients)
# 3: 2nd model of WHO (number of deaths / (number of cured patients + number of deaths))
# 4: group-wise 1st model of WHO + weighted mean for gross CFR
# 5: group-wise 2nd model of WHO + weighted mean for gross CFR
# Output: CFR - a list of CFRs
# CFR[["info"]] - information of used option
# CFR[["gross"]] - a (time) x 1 vector of gross CFRs
# provided for all options
# CFR[["group"]] - a (time) x (group) matrix of group-wise CFRs
# provided for options 1, 4, and 5
# Additionally for option 1 only,
# CFR[["gross.variance"]] - estimated variances of gross CFRs
# CFR[["gross.lower"]] - lower bounds of 95% confidence band for gross CFRs
# CFR[["gross.upper"]] - upper bounds of 95% confidence band for gross CFRs
# CFR[["group.variance"]] - estimated variances of group-wise CFRs
# CFR[["group.lower"]] - lower bounds of 95% confidence band for group-wise CFRs
# CFR[["group.upper"]] - upper bounds of 95% confidence band for group-wise CFRs
if (model.option == 1){
CFR <- list(info = "stratification-based CFR",
gross = matrix(0, nrow = nrow(D), ncol = 1),
group = matrix(0, nrow = nrow(D), ncol = ncol(D)),
gross.variance = matrix(0, nrow = nrow(D), ncol = 1),
gross.lower = matrix(0, nrow = nrow(D), ncol = 1),
gross.upper = matrix(0, nrow = nrow(D), ncol = 1),
group.variance = matrix(0, nrow = nrow(D), ncol = ncol(D)),
group.lower = matrix(0, nrow = nrow(D), ncol = ncol(D)),
group.upper = matrix(0, nrow = nrow(D), ncol = ncol(D)))
CFR[["group"]] <- (D + U / N * D) / N
p.d <- D / N
p.u <- U / N
eta <- p.d * (1 - p.d) + p.d * p.u * (2 - 3 * p.d + p.u - 4 * p.d * p.u) * (N - 1) / N
zeta <- p.d * p.u * (1 - 2 * p.d + 6 * p.d * p.u - 2 * p.u) * (N - 1) / N
CFR[["group.variance"]] <- eta / N + zeta / N^2
for (i in 1:nrow(D)){
CFR$group[i, is.na(CFR$group[i,])] <- 0
CFR$group.variance[i, is.na(CFR$group.variance[i,])] <- 0
}
CFR[["gross"]] <- (apply(CFR[["group"]] * N, 1, sum)) / apply(N, 1, sum)
CFR[["gross.variance"]] <- apply(CFR[["group.variance"]] * N^2, 1, sum) / (apply(N, 1, sum)^2)
CFR[["gross.lower"]] <- CFR[["gross"]] - 1.96 * sqrt(CFR[["gross.variance"]])
CFR$gross.lower <- ifelse(CFR$gross.lower < 0, 0, CFR$gross.lower)
CFR[["gross.upper"]] <- CFR[["gross"]] + 1.96 * sqrt(CFR[["gross.variance"]])
temp <- CFR[["group"]] - 1.96 * sqrt(CFR[["group.variance"]])
for (j in 1:ncol(temp)){
temp[,j] <- ifelse(temp[,j] < 0, 0, temp[,j])
}
CFR[["group.lower"]] <- temp
CFR[["group.upper"]] <- CFR[["group"]] + 1.96 * sqrt(CFR[["group.variance"]])
}else if (model.option == 2){
CFR <- list(info = "WHO: D / N",
gross = apply(D, 1, sum) / apply(N, 1, sum))
}else if (model.option == 3){
CFR <- list(info = "WHO: D / (C + D)",
gross = apply(D, 1, sum) / (apply(C, 1, sum) + apply(D, 1, sum)))
CFR$gross[is.na(CFR$gross)] <- 0
}else if (model.option == 4){
CFR <- list(info = "group-wise WHO: D / N",
gross = apply(D, 1, sum) / apply(N, 1, sum), # Same as option 2
group = D / N)
}else{
CFR <- list(info = "group-wise WHO: D / (C + D)",
gross = apply(D / (C + D) * N, 1, sum) / apply(N, 1, sum),
group = D / (C + D))
CFR$gross[is.na(CFR$gross)] <- 0
for (i in 1:nrow(D)){
CFR$group[i, is.na(CFR$group[i,])] <- 0
}
}
return(CFR)
}
# 3. Prediction of case fatality rate (Model 2)
SCFR_prediction <- function(D, U, C, N, option = 1){
# This function calculates the predicted values of CFRs based on our 2nd model
# Input: D - (time) x (group) formed count matrix of deaths
# U - (time) x (group) formed count matrix of quarantined patients (confirmed but not recovered nor dead)
# C - (time) x (group) formed count matrix of cured patients
# N - (time) x (group) formed count matrix of total confirmed patients
# option - two options for q hat (estimated fatality rate for uncertain patients)
# 1 (default): number of deaths / number of confirmed patients
# 2: number of death / (number of cured patients + number of deaths)
# Output: CFR.pred - a list of predicted CFRs
# CFR.pred[["gross"]] - a (time) x 1 vector of gross predicted CFRs
# CFR.pred[["group"]] - a (time) x (group) matrix of group-wise predicted CFRs
# CFR.pred[["gross.variance"]] - estimated variances of gross predicted CFRs
# CFR.pred[["gross.lower"]] - lower bounds of 95% prediction band for gross predicted CFRs
# CFR.pred[["gross.upper"]] - upper bounds of 95% prediction band for gross predicted CFRs
# CFR.pred[["group.variance"]] - estimated variances of group-wise CFRs
# CFR.pred[["group.lower"]] - lower bounds of 95% prediction band for group-wise predicted CFRs
# CFR.pred[["group.upper"]] - upper bounds of 95% prediction band for group-wise predicted CFRs
if (option == 1){
q.hat <- D / N
q.hat[is.na(q.hat)] <- 0
}else if (option == 2){
q.hat <- D / (C + D)
q.hat[is.na(q.hat)] <- 0
}
CFR.pred <- list(gross = matrix(0, nrow = nrow(D), ncol = 1),
group = matrix(0, nrow = nrow(D), ncol = ncol(D)),
gross.variance = matrix(0, nrow = nrow(D), ncol = 1),
gross.lower = matrix(0, nrow = nrow(D), ncol = 1),
gross.upper = matrix(0, nrow = nrow(D), ncol = 1),
group.variance = matrix(0, nrow = nrow(D), ncol = ncol(D)),
group.lower = matrix(0, nrow = nrow(D), ncol = ncol(D)),
group.upper = matrix(0, nrow = nrow(D), ncol = ncol(D)))
CFR.pred[["group"]] <- (D + U * q.hat) / N
CFR.pred[["group.variance"]] <- U * q.hat * (1 - q.hat) / N^2
for (i in 1:nrow(D)){
CFR.pred$group[i, is.na(CFR.pred$group[i,])] <- 0
CFR.pred$group.variance[i, is.na(CFR.pred$group.variance[i,])] <- 0
}
CFR.pred[["gross"]] <- (apply(D, 1, sum) + apply((U * q.hat), 1, sum)) / apply(N, 1, sum)
CFR.pred[["gross.variance"]] <- apply(CFR.pred[["group.variance"]] * N^2, 1, sum) / apply(N, 1, sum)^2
CFR.pred[["gross.lower"]] <- CFR.pred[["gross"]] - 1.96 * sqrt(CFR.pred[["gross.variance"]])
CFR.pred$gross.lower <- ifelse(CFR.pred$gross.lower < 0, 0, CFR.pred$gross.lower)
CFR.pred[["gross.upper"]] <- CFR.pred[["gross"]] + 1.96 * sqrt(CFR.pred[["gross.variance"]])
temp <- CFR.pred[["group"]] - 1.96 * sqrt(CFR.pred[["group.variance"]])
for (j in 1:ncol(temp)){
temp[,j] <- ifelse(temp[,j] < 0, 0, temp[,j])
}
CFR.pred[["group.lower"]] <- temp
CFR.pred[["group.upper"]] <- CFR.pred[["group"]] + 1.96 * sqrt(CFR.pred[["group.variance"]])
return(CFR.pred)
}
t_col <- function(color, alpha = 0.5) {
# color = color name
# alpha = transparency [0,1]
# name = an optional name for the color
## Get RGB values for named color
rgb.val <- col2rgb(color)
## Make new color using input color as base and alpha set by transparency
t.col <- rgb(rgb.val[1], rgb.val[2], rgb.val[3],
max = 255,
alpha = 255 * alpha)
## Save the color
invisible(t.col)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scg.R
\name{scg-method}
\alias{scg-method}
\title{Spectral Coarse Graining}
\description{
Functions to perform the Spectral Coarse Graining (SCG) of matrices and
graphs.
}
\section{Introduction}{
The SCG functions provide a framework, called
Spectral Coarse Graining (SCG), for reducing large graphs while preserving
their \emph{spectral-related features}, that is features closely related
with the eigenvalues and eigenvectors of a graph matrix (which for now can
be the adjacency, the stochastic, or the Laplacian matrix).
Common examples of such features comprise the first-passage-time of random
walkers on Markovian graphs, thermodynamic properties of lattice models in
statistical physics (e.g. Ising model), and the epidemic threshold of
epidemic network models (SIR and SIS models).
SCG differs from traditional clustering schemes by producing a
\emph{coarse-grained graph} (not just a partition of the vertices),
representative of the original one. As shown in [1], Principal Component
Analysis can be viewed as a particular SCG, called \emph{exact SCG}, where
the matrix to be coarse-grained is the covariance matrix of some data set.
SCG should be of interest to practitioners of various fields dealing with
problems where matrix eigenpairs play an important role, as for instance is
the case of dynamical processes on networks.
}
\author{
David Morton de Lachapelle,
\url{http://people.epfl.ch/david.morton}.
}
\references{
D. Morton de Lachapelle, D. Gfeller, and P. De Los Rios,
Shrinking Matrices while Preserving their Eigenpairs with Application to the
Spectral Coarse Graining of Graphs. Submitted to \emph{SIAM Journal on
Matrix Analysis and Applications}, 2008.
\url{http://people.epfl.ch/david.morton}
D. Gfeller, and P. De Los Rios, Spectral Coarse Graining and Synchronization
in Oscillator Networks. \emph{Physical Review Letters}, \bold{100}(17),
2008. \url{http://arxiv.org/abs/0708.2055}
D. Gfeller, and P. De Los Rios, Spectral Coarse Graining of Complex
Networks, \emph{Physical Review Letters}, \bold{99}(3), 2007.
\url{http://arxiv.org/abs/0706.0812}
}
\keyword{graphs}
| /man/scg-method.Rd | no_license | Ruchika8/Dgraph | R | false | true | 2,187 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scg.R
\name{scg-method}
\alias{scg-method}
\title{Spectral Coarse Graining}
\description{
Functions to perform the Spectral Coarse Graining (SCG) of matrices and
graphs.
}
\section{Introduction}{
The SCG functions provide a framework, called
Spectral Coarse Graining (SCG), for reducing large graphs while preserving
their \emph{spectral-related features}, that is features closely related
with the eigenvalues and eigenvectors of a graph matrix (which for now can
be the adjacency, the stochastic, or the Laplacian matrix).
Common examples of such features comprise the first-passage-time of random
walkers on Markovian graphs, thermodynamic properties of lattice models in
statistical physics (e.g. Ising model), and the epidemic threshold of
epidemic network models (SIR and SIS models).
SCG differs from traditional clustering schemes by producing a
\emph{coarse-grained graph} (not just a partition of the vertices),
representative of the original one. As shown in [1], Principal Component
Analysis can be viewed as a particular SCG, called \emph{exact SCG}, where
the matrix to be coarse-grained is the covariance matrix of some data set.
SCG should be of interest to practitioners of various fields dealing with
problems where matrix eigenpairs play an important role, as for instance is
the case of dynamical processes on networks.
}
\author{
David Morton de Lachapelle,
\url{http://people.epfl.ch/david.morton}.
}
\references{
D. Morton de Lachapelle, D. Gfeller, and P. De Los Rios,
Shrinking Matrices while Preserving their Eigenpairs with Application to the
Spectral Coarse Graining of Graphs. Submitted to \emph{SIAM Journal on
Matrix Analysis and Applications}, 2008.
\url{http://people.epfl.ch/david.morton}
D. Gfeller, and P. De Los Rios, Spectral Coarse Graining and Synchronization
in Oscillator Networks. \emph{Physical Review Letters}, \bold{100}(17),
2008. \url{http://arxiv.org/abs/0708.2055}
D. Gfeller, and P. De Los Rios, Spectral Coarse Graining of Complex
Networks, \emph{Physical Review Letters}, \bold{99}(3), 2007.
\url{http://arxiv.org/abs/0706.0812}
}
\keyword{graphs}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2758
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2594
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2594
c
c Input Parameter (command line, file):
c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.conf05.01X-QBF.BB1-Zi.BB2-01X.BB3-Zi.with-IOC.unfold-002.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2484
c no.of clauses 2758
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2594
c
c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.conf05.01X-QBF.BB1-Zi.BB2-01X.BB3-Zi.with-IOC.unfold-002.qdimacs 2484 2758 E1 [995 997 999 1001 1003 1005 1007 1009 1011 1013 1015 1017 1019 1021 1023 1025 1036 1037 1038 1039 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1064 1065 1068 1069 1072 1073 1076 1077 1081 1083 1085 1087 1089 1091 1093 1095 1097 1529 1531 1533 1535 1537 1539 1541 1543 1545 1547 1549 1551 1553 1555 1557 1559 1570 1571 1572 1573 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1598 1599 1602 1603 1606 1607 1610 1611 1615 1617 1619 1621 1623 1625 1627 1629 1631] 0 20 1059 2594 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.conf05.01X-QBF.BB1-Zi.BB2-01X.BB3-Zi.with-IOC.unfold-002/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.conf05.01X-QBF.BB1-Zi.BB2-01X.BB3-Zi.with-IOC.unfold-002.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 1,503 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2758
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2594
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2594
c
c Input Parameter (command line, file):
c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.conf05.01X-QBF.BB1-Zi.BB2-01X.BB3-Zi.with-IOC.unfold-002.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2484
c no.of clauses 2758
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2594
c
c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF02-c01.blif-biu.inv.prop.bb-bmc.conf05.01X-QBF.BB1-Zi.BB2-01X.BB3-Zi.with-IOC.unfold-002.qdimacs 2484 2758 E1 [995 997 999 1001 1003 1005 1007 1009 1011 1013 1015 1017 1019 1021 1023 1025 1036 1037 1038 1039 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1064 1065 1068 1069 1072 1073 1076 1077 1081 1083 1085 1087 1089 1091 1093 1095 1097 1529 1531 1533 1535 1537 1539 1541 1543 1545 1547 1549 1551 1553 1555 1557 1559 1570 1571 1572 1573 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1598 1599 1602 1603 1606 1607 1610 1611 1615 1617 1619 1621 1623 1625 1627 1629 1631] 0 20 1059 2594 RED
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/help_cost_analysis_functions.R
\name{generate_wt_vol_units}
\alias{generate_wt_vol_units}
\title{Function to get the weight and volume units}
\usage{
generate_wt_vol_units()
}
\value{
weight and vol units
}
\description{
Function to get the weight and volume units
}
\examples{
ans <- generate_wt_vol_units()
}
| /man/generate_wt_vol_units.Rd | no_license | sheejamk/packDAMipd | R | false | true | 389 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/help_cost_analysis_functions.R
\name{generate_wt_vol_units}
\alias{generate_wt_vol_units}
\title{Function to get the weight and volume units}
\usage{
generate_wt_vol_units()
}
\value{
weight and vol units
}
\description{
Function to get the weight and volume units
}
\examples{
ans <- generate_wt_vol_units()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotFit.R
\name{plotFit}
\alias{plotFit}
\title{Plot the fit for standards and the samples}
\usage{
plotFit(std, xvar, yvar, dilvar, fitpar = NULL, FUNmod = NULL,
iout = NULL, bg = NULL, vsmp = NULL, smpflag = NULL,
trimval = NULL, trimext = NULL, stdcol = c("firebrick3",
"darkslategray"), rugcol = c("cadetblue", "purple", "firebrick2"), ...)
}
\arguments{
\item{std}{matrix or data frame with standards for fitting.}
\item{xvar}{character strings for the variables used to fit a standard
curve. If \code{NULL}, first two columns are assumed to be \code{x} and
\code{y} variables.}
\item{yvar}{character strings for the variables used to fit a standard
curve. If \code{NULL}, first two columns are assumed to be \code{x} and
\code{y} variables.}
\item{dilvar}{character string to check if there is a dilution variable in
\code{std}. If found, used for x-axis labels only.}
\item{fitpar}{values of function parameters.}
\item{FUNmod}{model function.}
\item{iout}{indices of removed standard points.}
\item{bg}{background values.}
\item{vsmp}{sample values.}
\item{smpflag}{character vector, flags for each sample.}
\item{trimval}{for final results, the values at which the samples are
trimmed.}
\item{trimext}{integer vector of length two indicating if the values are
trimmed at the extremum (lower and upper).}
\item{stdcol}{vector of two colors for the standard points and the fit on the
plot.}
\item{rugcol}{vector of three colors for the rugplot, which indicates sample
values (inside the bounds, between the bounds and extrema, and beyond
extrema).}
\item{...}{further graphical parameters.}
}
\description{
Produces a plot that includes points for standards, proposed fit, removed
outliers, bounds for "flat" portions of the curve, and values for samples and
for the background.
}
\details{
to be added
}
| /man/plotFit.Rd | no_license | EPPIcenter/flexfitTemp | R | false | true | 1,910 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotFit.R
\name{plotFit}
\alias{plotFit}
\title{Plot the fit for standards and the samples}
\usage{
plotFit(std, xvar, yvar, dilvar, fitpar = NULL, FUNmod = NULL,
iout = NULL, bg = NULL, vsmp = NULL, smpflag = NULL,
trimval = NULL, trimext = NULL, stdcol = c("firebrick3",
"darkslategray"), rugcol = c("cadetblue", "purple", "firebrick2"), ...)
}
\arguments{
\item{std}{matrix or data frame with standards for fitting.}
\item{xvar}{character strings for the variables used to fit a standard
curve. If \code{NULL}, first two columns are assumed to be \code{x} and
\code{y} variables.}
\item{yvar}{character strings for the variables used to fit a standard
curve. If \code{NULL}, first two columns are assumed to be \code{x} and
\code{y} variables.}
\item{dilvar}{character string to check if there is a dilution variable in
\code{std}. If found, used for x-axis labels only.}
\item{fitpar}{values of function parameters.}
\item{FUNmod}{model function.}
\item{iout}{indices of removed standard points.}
\item{bg}{background values.}
\item{vsmp}{sample values.}
\item{smpflag}{character vector, flags for each sample.}
\item{trimval}{for final results, the values at which the samples are
trimmed.}
\item{trimext}{integer vector of length two indicating if the values are
trimmed at the extremum (lower and upper).}
\item{stdcol}{vector of two colors for the standard points and the fit on the
plot.}
\item{rugcol}{vector of three colors for the rugplot, which indicates sample
values (inside the bounds, between the bounds and extrema, and beyond
extrema).}
\item{...}{further graphical parameters.}
}
\description{
Produces a plot that includes points for standards, proposed fit, removed
outliers, bounds for "flat" portions of the curve, and values for samples and
for the background.
}
\details{
to be added
}
|
##just a quick plot of the basis
library(ggplot2)
library(dplyr)
library(cowplot)
library(extrafont)
#font_import()
fonts()
loadfonts()
#extrafont::choose_font()
nbasis <- 24
tvec <- 0:4000
margin_offset <- .10
margin_offset = (max(tvec) - min(tvec))*margin_offset; #convert margin_offset into time scale of tvec
basis_overlap <- 1.52 #50%
tmin = min(tvec) - margin_offset
tmax=max(tvec) + margin_offset
#new basis plot for showing entropy etc.
centers <- seq(tmin, tmax, by = (tmax-tmin)/(nbasis-1))
sig = (centers[2] - centers[1])/basis_overlap; #SD of the basis functions themselves
gaussmat <- sapply(centers, function(x) {
dvec <- dnorm(x=tvec, mean=x, sd=sig)
dvec <- dvec/max(dvec) #renormalize to max=1
})
# pdf("gauss_basis_24basis.pdf", width=6, height=5)
# matplot(gaussmat, type="l", lty=1, lwd=5, col=colorRampPalette(c("blue", "red"))(13), ann=FALSE, xaxt='n', yaxt='n', bty='n')
# dev.off()
set.seed(1001)
weights <- runif(nbasis, min=0, max=8)
weights[1] <- 0
weights[24] <- 0
weights[23] <- 1
#initial values for basis elements
v <- t(sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*weights
}))
weights_df <- data.frame(time=centers/1000, weight=weights) %>% filter(time > 0 & time <= 4)
vm <- reshape2::melt(v, varnames=c("time", "basis")) %>% mutate(basis=factor(basis), time=time/1000) #seconds
vfunc <- apply(v, 1, sum) %>% as.data.frame() %>% setNames("value") %>% mutate(time=1:length(value)/1000)
plot_font <- "Tahoma"
pdf("g1.pdf", width=10, height=7)
g1 <- ggplot(vm, aes(x=time, y=value, group=basis)) +
geom_rect(data=weights_df, aes(xmin=time-.04, xmax=time+.04, ymin=0, ymax=weight, y=NULL, group=NULL, color=NULL),
fill="grey70", show.legend = FALSE) + #weights
geom_line(show.legend = FALSE, size=1.2, color="red") + #basis elements
geom_line(data=vfunc, aes(group=NULL), color="black", size=1.5) + #integrated value
cowplot::theme_cowplot(font_size = 24) + ylab("Expected Value (points)") +
xlab("Time (seconds)") +
annotate(geom="text", x=1.21, y=38.4, label="Reward", size=9, hjust=1, family=plot_font) +
annotate(geom="text", x=1.25, y=22, label="RPE+", size=9, hjust=1, family=plot_font) +
annotate(geom="text", x=4, y=45, label="High\nentropy", size=12, hjust=1, family=plot_font, lineheight = 0.8) +
annotate(geom="point", x=1.4, y=38, size=9, color="darkblue") +
annotate(geom="segment", x=1.4, xend=1.4, y=8.5, yend=35.0, size=1.5, color="gray90", lineend="butt") +
theme(text=element_text(family=plot_font), axis.title.x = element_text(margin=margin(t=15)),
axis.title.y = element_text(margin=margin(r=15))) + ylim(c(0,60)) +
theme(plot.title = element_text(face = "plain", size=32))
plot(g1)
dev.off()
#first choice
weights[9] <- 22
weights[8] <- 11
weights[10] <- 12
#updated values for basis elements
v <- t(sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*weights
}))
weights_df <- data.frame(time=centers/1000, weight=weights) %>% filter(time > 0 & time <= 4)
vm <- reshape2::melt(v, varnames=c("time", "basis")) %>% mutate(basis=factor(basis), time=time/1000) #seconds
vfunc <- apply(v, 1, sum) %>% as.data.frame() %>% setNames("value") %>% mutate(time=1:length(value)/1000)
#dev.new()
#plot(vfunc, type="l")
#second choice
weights[16] <- 21
weights[17] <- 31
weights[18] <- 11
#updated values for basis elements
v_next <- t(sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*weights
}))
weights_df_next <- data.frame(time=centers/1000, weight=weights) %>% filter(time > 0 & time <= 4)
vm_next <- reshape2::melt(v, varnames=c("time", "basis")) %>% mutate(basis=factor(basis), time=time/1000) #seconds
vfunc_next <- apply(v_next, 1, sum) %>% as.data.frame() %>% setNames("value") %>% mutate(time=1:length(value)/1000)
pdf("g2.pdf", width=10, height=6)
g2 <- ggplot(vm, aes(x=time, y=value, group=basis)) +
geom_rect(data=weights_df, aes(xmin=time-.04, xmax=time+.04, ymin=0, ymax=weight, y=NULL, group=NULL, color=NULL),
fill="grey70", show.legend = FALSE) + #weights
annotate(geom="segment", x=2.9, xend=2.9, y=5, yend=55, size=1.5, color="gray90", lineend="butt") +
geom_line(show.legend = FALSE, size=1.2, color="red") + #basis elements
geom_line(data=vfunc_next, aes(group=NULL), color="grey50", size=1.5, lty=6) + #integrated after update
geom_line(data=vfunc, aes(group=NULL), color="black", size=1.5) + #integrated value
cowplot::theme_cowplot(font_size = 24) + ylab("Expected Value (points)") +
xlab("Time (seconds)") + annotate(geom="point", x=2.9, y=58, size=9, color="darkblue") +
annotate(geom="text", x=2.7, y=58.5, label="Reward", size=9, hjust=1, family=plot_font) +
#annotate(geom="text", x=3, y=30, label="PE+", size=9, hjust=1, family=plot_font) +
annotate(geom="text", x=1.3, y=35, label=expression(RT[Vmax]), size=9, hjust=0.5, family=plot_font, parse=TRUE) +
annotate(geom="segment", x=1.24, xend=2.75, y=41.5, yend=41.5, size=1.5, color="gray50", lineend="round",
arrow = arrow(length = unit(0.5,"cm"))) +
annotate(geom="text", x=1.9, y=45.5, label=expression(Delta*RT[Vmax]), size=9, hjust=0.5, family=plot_font, color="gray50", parse=TRUE) +
theme(text=element_text(family=plot_font), axis.title.x = element_text(margin=margin(t=15)),
axis.title.y = element_text(margin=margin(r=15))) + ylim(c(0,60)) +
theme(plot.title = element_text(face = "plain", size=32))
plot(g2)
dev.off()
pdf("two_panel.pdf", width=12, height=5)
plot_grid(g1 + ggtitle("Early in learning"), g2 + ggtitle("Change in RT Vmax") + ylab(""), nrow=1)
dev.off()
#nth choice
weights <- runif(nbasis, min=0, max=4)
weights[1:3] <- 0
weights[22:24] <- 0
weights[13] <- 12
weights[14] <- 35
weights[15] <- 26
weights[16] <- 9
weights[17] <- 6
#updated values for basis elements
v <- t(sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*weights
}))
weights_df <- data.frame(time=centers/1000, weight=weights) %>% filter(time > 0 & time <= 4)
vm <- reshape2::melt(v, varnames=c("time", "basis")) %>% mutate(basis=factor(basis), time=time/1000) #seconds
vfunc <- apply(v, 1, sum) %>% as.data.frame() %>% setNames("value") %>% mutate(time=1:length(value)/1000)
pdf("g3.pdf", width=10, height=6)
g3 <- ggplot(vm, aes(x=time, y=value, group=basis)) +
geom_rect(data=weights_df, aes(xmin=time-.04, xmax=time+.04, ymin=0, ymax=weight, y=NULL, group=NULL, color=NULL),
fill="grey70", show.legend = FALSE) + #weights
geom_line(show.legend = FALSE, size=1.2, color="red") + #basis elements
geom_line(data=vfunc, aes(group=NULL), color="black", size=1.5) + #integrated value
cowplot::theme_cowplot(font_size = 24) + ylab("Expected Value (points)") +
xlab("Time (seconds)") +
#annotate(geom="point", x=3.10, y=58, size=9, color="darkblue") +
#annotate(geom="text", x=2.95, y=58.5, label="Reward", size=9, hjust=1, family=plot_font) +
#annotate(geom="text", x=3, y=30, label="PE+", size=9, hjust=1, family=plot_font) +
annotate(geom="text", x=2.5, y=54, label=expression(RT[Vmax]), size=9, hjust=0.5, family=plot_font, parse=TRUE) +
annotate(geom="text", x=0.1, y=45, label="Low\nentropy", size=12, hjust=0, family=plot_font, lineheight = 0.8) +
#annotate(geom="segment", x=3.10, xend=3.10, y=5, yend=55, size=1.5, color="gray60", lineend="butt") +
#annotate(geom="segment", x=1.24, xend=2.9, y=42, yend=42, size=1.5, color="gray60", lineend="round",
# arrow = arrow(length = unit(0.5,"cm"))) +
theme(text=element_text(family=plot_font), axis.title.x = element_text(margin=margin(t=15)),
axis.title.y = element_text(margin=margin(r=15))) + ylim(c(0,60)) +
theme(plot.title = element_text(face = "plain", size=32))
plot(g3)
dev.off()
#pdf("three_panel.pdf", width=18, height=6)
gp <- plot_grid(g1 + ggtitle("Early in learning"), g2 + ggtitle("Change in RT Vmax") + ylab(""),
g3 + ggtitle("Late in learning") + ylab(""), nrow=1)
# plot_grid(g1, g2 + ylab(""), g3 + ylab(""), label_size = 18, vjust=1, hjust=0.1,
# nrow=1, labels=c("a) Early in learning", "b) Change in RT Vmax", "c) Late in learning"))
#dev.off()
ggsave("three_panel.pdf", gp, width=18, height=6, useDingbats=FALSE)
embed_fonts('three_panel.pdf', outfile='three_panel_embed.pdf')
#dev.new()
#plot(vfunc, type="l")
# pdf("g3.pdf", width=10, height=6)
#
# g2 <- ggplot(vm, aes(x=time, y=value, group=basis)) +
# geom_rect(data=weights_df, aes(xmin=time-.04, xmax=time+.04, ymin=0, ymax=weight, y=NULL, group=NULL, color=NULL),
# fill="grey70", show.legend = FALSE) + #weights
# geom_line(show.legend = FALSE, size=1.2, color="red") + #basis elements
# geom_line(data=vfunc, aes(group=NULL), color="black", size=1.5) + #integrated value
# cowplot::theme_cowplot(font_size = 24) + ylab("Expected Value (points)") +
# xlab("Time (seconds)") + annotate(geom="point", x=3.1, y=58, size=9, color="darkblue") +
# annotate(geom="text", x=2.90, y=58.5, label="Reward", size=9, hjust=1, family=plot_font) +
# #annotate(geom="text", x=2.95, y=30, label="PE+", size=9, hjust=1, family=plot_font) +
# annotate(geom="text", x=1.3, y=35, label="RT Vmax", size=9, hjust=0.5, family=plot_font) +
# annotate(geom="segment", x=3.1, xend=3.1, y=5, yend=55, size=1.5, color="gray60", lineend="butt") +
# theme(text=element_text(family=plot_font), axis.title.x = element_text(margin=margin(t=15)),
# axis.title.y = element_text(margin=margin(r=15))) + ylim(c(0,60))
# plot(g2)
# dev.off()
###OLD CODE
ntimesteps=13
maxt=4000
centers <- seq(0,4000, length=ntimesteps)
gaussmat <- sapply(centers, function(v) {
dnorm(x=0:maxt, mean=v, sd=300)
})
#matplot(gaussmat, type="l", lty=1, lwd=5, col=colors(distinct=TRUE))
pdf("gauss_basis.pdf", width=6, height=5)
matplot(gaussmat, type="l", lty=1, lwd=5, col=colorRampPalette(c("blue", "red"))(13), ann=FALSE, xaxt='n', yaxt='n', bty='n')
dev.off()
##test out weights and centers to show example
weights<- 100*c(0.0776, 7.4801, 2.0792, 1.4008, 2.0000, 1.0000, 3.0000, 9.0000, 25.0000, 22.0024, 30.2286, 12.0326, 30.2332)
v <- 5* apply( sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*weights
}), 2, sum)
pdf("learned_ev.pdf", width=6, height=5)
df <- data.frame(time=seq(0,4,length=4001), ev=v)
ggplot(df, aes(x=time, y=ev)) + geom_line(size=1.5) + ylab("Expected value (learned)\n") + xlab("\nTime (s)") + theme_bw(base_size=24)
dev.off()
##uncertainty weights
uweights<- -10*c(0.3776, 1.4801, 0.9792, 1.4008, 2.0000, 1.0000, 3.0000, 5.0000, 7.0000, 8.0024, 10.2286, 3.0326, 1.2332)
u <- 5* apply( sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*uweights
}), 2, sum)
library(ggplot2)
pdf("learned_uncertainty.pdf", width=6, height=5)
df <- data.frame(time=seq(0,4,length=4001), u=u)
ggplot(df, aes(x=time, y=u)) + geom_line(size=1.5) + ylab("Uncertainty (experienced)\n") + xlab("\nTime (s)") + theme_bw(base_size=24)
dev.off()
pdf("weights.pdf", width=6, height=3.5)
par(mar=c(1, 5, 2, 1) + 0.1)
plot(c(0, 4000), c(0, max(weights) + 50), type = "n", xlab = "", ylab = "Basis weight (AU)", yaxs="i",
cex.lab=2, cex.axis=2, cex.main=2, cex.sub=2, bty='n', xaxt='n')
rect(centers-50, 0, centers+50, weights, col="gray")
#axis(1, at=c(0,4), labels=c("0s", "4s"))#, pos=, lty=, col=, las=, tck=, ...)
dev.off()
##true underlying IEV contingency.
setwd("~/Data_Analysis/clock_analysis/td_model")
source("~/Data_Analysis/clock_analysis/td_model/getrew.R")
fm <- getMagFreq(0:4000, "IEV")
f <- fm[4002:8002]
m <- fm[1:4001]
ev <- f*m
library(ggplot2)
pdf("true_ev.pdf", width=6, height=5)
df <- data.frame(time=seq(0,4,length=4001), ev=ev)
ggplot(df, aes(x=time, y=ev)) + geom_line(size=1.5) + ylab("Expected value\n") + xlab("\nTime (s)") + theme_bw(base_size=24)
dev.off()
##real curve
pdf("iev_func.pdf", width=5, height=4)
ggplot(df, aes(x=time, y=ev)) + geom_line(size=1.5) + ylab("Expected value\n") + xlab("\nTime (ms)") + theme_bw(base_size=24)
dev.off()
df <- c()
#take CEVR out since CEV and CEVR are identical wrt EV (and we are not showing prob + freq)
for (cont in c("IEV", "DEV", "CEV", "CEVR")) { #, "CEVR"
fm <- getMagFreq(0:4000, cont)
#if (cont=="CEV") { cont="CEV/CEVR" } #for plot name
df <- rbind(df, data.frame(contingency=cont, time=0:4000, mag=fm$Mag, freq=fm$Freq, ev=fm$Mag*fm$Freq))
}
#Figure: plot of EV in clock task
pdf("Clock contingencies.pdf", width=5, height=3.4)
ggplot(df, aes(x=time/1000, y=ev, color=contingency)) + geom_line(size=2) + ylab("Expected value (points)") + xlab("Time (seconds)") + scale_color_brewer("Contingency", palette="Dark2") +
theme_bw(base_size=18) + theme(axis.title.x=element_text(margin = margin(t = 10)), axis.title.y=element_text(margin = margin(r = 10)), legend.margin = unit(0.15, "cm"),
plot.margin=margin(r=3, l=3, t=10, b=10))
dev.off()
#freq, prob, and ev
library(cowplot)
gcommon <- list(geom_line(size=2), xlab("Time (seconds)"), scale_color_brewer("Contingency", palette="Dark2"), theme_bw(base_size=18),
theme(axis.title.x=element_text(margin = margin(t = 10)), axis.title.y=element_text(margin = margin(r = 8)),
legend.margin = unit(0.15, "cm"),
plot.margin=margin(r=10, l=10, t=10, b=5)))
g1 <- ggplot(df, aes(x=time/1000, y=mag, color=contingency)) + gcommon + ylab("Reward magnitude (points)") +
theme(legend.position="none", plot.margin=margin(r=10, l=0, t=10, b=5))
g2 <- ggplot(df, aes(x=time/1000, y=freq, color=contingency)) + gcommon + ylab("Reward probability") + theme(legend.position="none")
df$ev[df$contingency=="CEVR"] <- df$ev[df$contingency=="CEVR"] + 0.5 #offset for plotting
df$ev[df$contingency=="CEV"] <- df$ev[df$contingency=="CEV"] - 0.5 #offset for plotting
g3 <- ggplot(df, aes(x=time/1000, y=ev, color=contingency)) + gcommon + ylab("Expected value (points)") + theme(legend.position="none")
pdf("Clock contingencies with freq mag.pdf", width=8.5, height=4)
pg <- plot_grid(g1, g2, g3, nrow=1)
plot(pg)
dev.off()
pdf("Clock contingencies legend.pdf", width=2, height=2)
legend_b <- get_legend(g1 + theme(legend.position="right") + theme_bw(base_size=25) + guides(color = guide_legend(keywidth=2, keyheight=2)))
p <- plot_grid(legend_b, nrow=1)
plot(p)
dev.off()
# add the legend underneath the row we made earlier. Give it 10% of the height
# of one plot (via rel_heights).
# p <- plot_grid(pg, legend_b, nrow=1, rel_widths = c(.9, .15))
# plot(p)
setwd("~/Data_Analysis/temporal_instrumental_agent/clock_task/vba_fmri")
#load(file="dataframe_for_entropy_analysis_Oct2016.RData")
#this contains data with 24 basis functions and post-Niv learning rule
#load(file="dataframe_for_entropy_analysis_Nov2016.RData")
load(file="dataframe_for_entropy_analysis_Mar2017.RData") #has the random priors entropy
bdf = bdf %>% rename(subject=rowID) %>% group_by(subject) %>% arrange(subject, run, trial) %>% mutate(totreward=sum(score), cumreward=cumsum(score)) %>% ungroup() %>%
mutate(medreward=median(totreward), #between subjects
msplit=factor(as.numeric(totreward > medreward), levels=c(0,1), labels=c("< Median", ">= Median")))
bdf <- bdf %>% mutate(msplit=recode(msplit, "< Median" = "Total~earnings<median", ">= Median"="Total~earnings>=median"))
bdf$rewFunc <- factor(bdf$rewFunc, levels=c("IEV", "DEV", "CEV", "CEVR")) #to match contingency plot
# Figure 1c
pdf("Fig_1c.pdf", width = 8.5, height = 3.75)
ggplot(bdf, aes(x=trial, y=rt/1000, color = rewFunc )) + stat_smooth(method="loess", size = 2) +
scale_color_brewer("Contingency", palette="Dark2") +
theme_bw(base_size=18) + facet_wrap(~msplit, labeller=label_parsed) + xlab("Trial") +
ylab("Response time (seconds)") + scale_y_continuous(breaks=c(1.25, 1.5, 1.75, 2, 2.25)) +
theme(axis.title.x=element_text(margin = margin(t = 12)), axis.title.y=element_text(margin = margin(r = 12)),
legend.margin = margin(t=0, r=2, b=0, l=5), plot.margin=margin(r=10, l=10, t=10, b=5)) + theme(legend.position="none")
dev.off()
# 1d
# pdf("Fig_1d.pdf", width = 10, height = 4)
# ggplot(subset(bdf), aes(x=trial, y=abstschange*100, color = rewFunc)) + stat_smooth(method="loess", size = 2) + theme_bw(base_size=25) + facet_wrap(~msplit) + ylab("RT swings, ms") + labs(colour = "Contingency") #facet_wrap(~msplit) #geom_jitter(alpha=0.2) +
# dev.off()
pdf("Fig_1d.pdf", width = 8.5, height = 3.75)
ggplot(bdf, aes(x=trial, y=abstschange/10, color = rewFunc )) + stat_smooth(method="loess", size = 2) +
scale_color_brewer("Contingency", palette="Dark2") +
theme_bw(base_size=18) + facet_wrap(~msplit, labeller=label_parsed) + xlab("Trial") +
ylab("Change in RT (seconds)") +
theme(axis.title.x=element_text(margin = margin(t = 12)), axis.title.y=element_text(margin = margin(r = 12)),
legend.margin = margin(t=0, r=2, b=0, l=5), plot.margin=margin(r=10, l=10, t=10, b=5)) + theme(legend.position="none")
dev.off()
#Supplementary figure: Sinusoidal contingency
ntimesteps=500
ev = 10*sin(2*pi*(1:ntimesteps)*1/ntimesteps) + 2.5*sin(2*pi*(1:ntimesteps)*2/ntimesteps) + 2.0*cos(2*pi*(1:ntimesteps)*4/ntimesteps)
ev = ev + abs(min(ev)) + 10;
prb = 25*cos(2*pi*(1:ntimesteps)*1/ntimesteps) + 10*cos(2*pi*(1:ntimesteps)*3/ntimesteps) + 6*sin(2*pi*(1:ntimesteps)*5/ntimesteps)
prb_max=0.7
prb_min=0.3
prb = (prb - min(prb))*(prb_max-prb_min)/(max(prb)-min(prb)) + prb_min
allshift = array(NA_real_, dim=c(ntimesteps, ntimesteps, 3))
for (i in 1:ntimesteps) {
if (i > 1) {
shift = c(i:ntimesteps, 1:(i-1))
} else { shift <- 1:ntimesteps }
evi = ev[shift]
prbi = prb[shift]
allshift[i,,1] = evi
allshift[i,,2] = prbi
allshift[i,,3] = evi/prbi
}
shift3 <- rbind(data.frame(time=1:ntimesteps/100, EV=allshift[,1,1], Probability=allshift[,1,2], Magnitude=allshift[,1,3], name="shift = 0"),
data.frame(time=1:ntimesteps/100, EV=allshift[,100,1], Probability=allshift[,100,2], Magnitude=allshift[,100,3], name="shift = 100"),
data.frame(time=1:ntimesteps/100, EV=allshift[,200,1], Probability=allshift[,200,2], Magnitude=allshift[,200,3], name="shift = 200"))
library(reshape2); library(ggplot2)
m3 <- melt(shift3, id.vars=c("time", "name"))
pdf("Sinusoid contingency.pdf", width=10, height=8)
ggplot(m3, aes(x=time, y=value)) + geom_line(size=1.5) + facet_grid(variable ~ name, scales="free_y") + theme_bw(base_size=24) + xlab("Time (seconds)") + ylab("") +
theme(panel.margin = unit(20, "pt"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
strip.background = element_rect(fill = "grey90", colour = "grey50", size = 0.2))
dev.off()
##BMC Figure
library(R.matlab)
setwd(file.path(getMainDir(), "temporal_instrumental_agent", "clock_task", "figures"))
scepticbmc <- readMat("finalicissimo_BMC_for_eLife_fig.mat")
#out.Ef contains estimated frequencies
#out.Vf contains the variance-covariance matrix of frequencies
#inside plotUncertainTimeSeries, which is called from VBA_groupBMC, it appears the SEs are derived by the sqrt of the diagonal of out.Vf
#somehow got mangled -- went into MATLAB and just saved these in a simpler .mat
Ef <- scepticbmc$out[,,1]$Ef
scepticbmc$out[,,1]$Vf
#bmcef <- readMat("elife_bmc_frequencies.mat")
bmcef <- readMat("~/Data_Analysis/temporal_instrumental_agent/clock_task/vba_fmri/figures/ploscompbio_bmc_frequencies.mat")
df <- data.frame(model=unlist(bmcef$modelnames), freq=bmcef$Ef, se=sqrt(diag(bmcef$Vf)))
#df$m_ordered <- ordered(df$model, levels=c("fixed", "fixed_uv", "fixed_decay", "kalman_softmax",
# "kalman_uv_sum", "kalman_logistic", "kalman_processnoise", "kalman_sigmavolatility", "Qstep"),
# labels=c("Fixed LR V", "Fixed LR U + V", "Fixed LR V Decay", "KF V", "KF U + V",
# "KF U -> V", "KF Process Noise", "KF Volatility", "TD"))
df$m_ordered <- ordered(df$model, levels=c("fixed", "fixed_uv", "fixed_decay", "kalman_softmax",
"kalman_uv_sum", "Qstep"),
labels=c("Fixed LR V", "Fixed LR U + V", "Fixed LR V Sel. Maint.", "KF V", "KF U + V", "TD"))
#for ggplot with coord_flip, need to reverse
df$m_ordered <- factor(df$m_ordered, levels=rev(levels(df$m_ordered)))
library(ggplot2)
#updated version with smaller model set for PLoS Comp Bio
pdf("SCEPTIC Main BMC v5 May2017.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity", size=1.3, fatten=2.3) +
annotate("text", x=4, y=0.65, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.65, y=0.48, label=as.character(expression(paste("BOR < ",10^{-51})))),
hjust=0, vjust=0, parse=TRUE, size=6, label.padding = unit(0.4, "lines")) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank(), plot.margin=margin(t=5, r=10, b=5, l=0)) +
scale_y_continuous(breaks=c(0,0.25, 0.5, 0.75), labels=c("0", ".25", ".5", ".75"))
dev.off()
pdf("SCEPTIC Main BMC.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity") + geom_errorbar(width=0.5) +
annotate("text", x=7, y=0.54, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
annotate("text", x=1.2, y=0.30, label=as.character(expression(paste("BOR = ",8.03," x ",10^{-49}))), hjust=0, vjust=0, parse=TRUE, size=6) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)))
dev.off()
library(ggplot2)
pdf("SCEPTIC Main BMC v2.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_bar(stat="identity", fill="grey92", color="black") + geom_errorbar(width=0.5) +
annotate("text", x=7, y=0.54, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
annotate("text", x=1.2, y=0.30, label=as.character(expression(paste("BOR = ",8.03," x ",10^{-49}))), hjust=0, vjust=0, parse=TRUE, size=6) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)))
dev.off()
library(ggplot2)
pdf("SCEPTIC Main BMC v3.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_bar(stat="identity", fill="grey92", color="black") + geom_errorbar(width=0.5) +
annotate("text", x=7, y=0.54, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.47, label=as.character(expression(paste("BOR < ",10^{-49})))),
hjust=0, vjust=0, parse=TRUE, size=6) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)))
dev.off()
pdf("SCEPTIC Main BMC v4.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_bar(stat="identity", fill="grey92", color="black") + geom_errorbar(width=0.5) +
annotate("text", x=7, y=0.54, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.47, label=as.character(expression(paste("BOR < ",10^{-49})))),
hjust=0, vjust=0, parse=TRUE, size=6) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank())
dev.off()
pdf("SCEPTIC Main BMC v5.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity", size=1.3, fatten=2.5) +
annotate("text", x=7, y=0.54, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.47, label=as.character(expression(paste("BOR < ",10^{-43})))),
hjust=0, vjust=0, parse=TRUE, size=6, label.padding = unit(0.4, "lines")) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank()) +
scale_y_continuous(breaks=c(0,0.25, 0.5, 0.75), labels=c("0", ".25", ".5", ".75"))
dev.off()
#ar1 and schoenberg results
arfreqs <- readMat("ar_modelfreqs_Sep2016.mat")
#manual entry from Jon email 19Sep2016
mnames <- c("Fixed LR V", "Fixed LR U + V", "Fixed LR V Sel. Maint.", "KF V", "KF Process Noise", "KF U + V", "KF Volatility")
df <- data.frame(model=ordered(mnames), freq=arfreqs$ar1Ef, se=sqrt(diag(arfreqs$ar1Vf)))
#for ggplot with coord_flip, need to reverse
df$m_ordered <- factor(df$model, levels=rev(levels(df$model)))
pdf("Ar1 Main BMC v5.pdf", width=6, height=4.3)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity", size=1.3, fatten=2.5) +
annotate("text", x=5, y=0.45, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.40, label=as.character(expression(paste("BOR < ",10^{-32})))),
hjust=0, vjust=0, parse=TRUE, size=6, label.padding = unit(0.4, "lines")) +
ylab("Estimated Model Frequency") + xlab("Includes AR(1) choice") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank()) +
scale_y_continuous(breaks=c(0,0.25, 0.5, 0.75), labels=c("0", ".25", ".5", ".75"))
dev.off()
#manual entry from Jon email 19Sep2016
df <- data.frame(model=ordered(mnames), freq=arfreqs$schEf, se=sqrt(diag(arfreqs$schVf)))
#for ggplot with coord_flip, need to reverse
df$m_ordered <- factor(df$model, levels=rev(levels(df$model)))
pdf("Scho Main BMC v5.pdf", width=6, height=4.3)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity", size=1.3, fatten=2.5) +
annotate("text", x=5, y=0.45, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.43, label=as.character(expression(paste("BOR < ",10^{-37})))),
hjust=0, vjust=0, parse=TRUE, size=6, label.padding = unit(0.4, "lines")) +
ylab("Estimated Model Frequency") + xlab("Includes Schoenberg choice") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank()) +
scale_y_continuous(breaks=c(0,0.25, 0.5, 0.75), labels=c("0", ".25", ".5", ".75"))
dev.off()
#frank TC (replicate v5 above)
frank_bmcef <- readMat("elife_bmc_franktc_frequencies.mat")
df <- data.frame(model=unlist(frank_bmcef$models), freq=frank_bmcef$Ef, se=sqrt(diag(frank_bmcef$Vf)))
df$modelmath <- ordered(df$model, levels=c("K", "K_Lambda", "K_Lambda_Nu", "K_Lambda_Nu_AlphaG",
"K_Lambda_Nu_AlphaG_AlphaN", "K_Lambda_Nu_AlphaG_AlphaN_Rho", "K_Lambda_Nu_AlphaG_AlphaN_Rho_Epsilon"))
#for ggplot with coord_flip, need to reverse
df$modelmath <- factor(df$modelmath, levels=rev(levels(df$modelmath)))
pdf("Frank TC BMC v5.pdf", width=6, height=4)
ggplot(df, aes(x=modelmath, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity", size=1.3, fatten=2.5) +
annotate("text", x=5, y=0.63, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.60, label=as.character(expression(paste("BOR < ",10^{-35})))),
hjust=0, vjust=0, parse=TRUE, size=6, label.padding = unit(0.4, "lines")) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
axis.title.y=element_text(margin = margin(r = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank()) +
scale_y_continuous(breaks=c(0,0.25, 0.5, 0.75), labels=c("0", ".25", ".5", ".75")) +
scale_x_discrete("Parameter added to TC", labels=rev(expression(K, lambda, nu, alpha[G], alpha[N], rho, epsilon)))
#scale_x_discrete("test", labels=c(expression(alpha), expression(beta)))
dev.off()
| /clock_task/figures/sceptic_fmri_paper_figures.R | no_license | DecisionNeurosciencePsychopathology/temporal_instrumental_agent | R | false | false | 28,670 | r | ##just a quick plot of the basis
library(ggplot2)
library(dplyr)
library(cowplot)
library(extrafont)
#font_import()
fonts()
loadfonts()
#extrafont::choose_font()
nbasis <- 24
tvec <- 0:4000
margin_offset <- .10
margin_offset = (max(tvec) - min(tvec))*margin_offset; #convert margin_offset into time scale of tvec
basis_overlap <- 1.52 #50%
tmin = min(tvec) - margin_offset
tmax=max(tvec) + margin_offset
#new basis plot for showing entropy etc.
centers <- seq(tmin, tmax, by = (tmax-tmin)/(nbasis-1))
sig = (centers[2] - centers[1])/basis_overlap; #SD of the basis functions themselves
gaussmat <- sapply(centers, function(x) {
dvec <- dnorm(x=tvec, mean=x, sd=sig)
dvec <- dvec/max(dvec) #renormalize to max=1
})
# pdf("gauss_basis_24basis.pdf", width=6, height=5)
# matplot(gaussmat, type="l", lty=1, lwd=5, col=colorRampPalette(c("blue", "red"))(13), ann=FALSE, xaxt='n', yaxt='n', bty='n')
# dev.off()
set.seed(1001)
weights <- runif(nbasis, min=0, max=8)
weights[1] <- 0
weights[24] <- 0
weights[23] <- 1
#initial values for basis elements
v <- t(sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*weights
}))
weights_df <- data.frame(time=centers/1000, weight=weights) %>% filter(time > 0 & time <= 4)
vm <- reshape2::melt(v, varnames=c("time", "basis")) %>% mutate(basis=factor(basis), time=time/1000) #seconds
vfunc <- apply(v, 1, sum) %>% as.data.frame() %>% setNames("value") %>% mutate(time=1:length(value)/1000)
plot_font <- "Tahoma"
pdf("g1.pdf", width=10, height=7)
g1 <- ggplot(vm, aes(x=time, y=value, group=basis)) +
geom_rect(data=weights_df, aes(xmin=time-.04, xmax=time+.04, ymin=0, ymax=weight, y=NULL, group=NULL, color=NULL),
fill="grey70", show.legend = FALSE) + #weights
geom_line(show.legend = FALSE, size=1.2, color="red") + #basis elements
geom_line(data=vfunc, aes(group=NULL), color="black", size=1.5) + #integrated value
cowplot::theme_cowplot(font_size = 24) + ylab("Expected Value (points)") +
xlab("Time (seconds)") +
annotate(geom="text", x=1.21, y=38.4, label="Reward", size=9, hjust=1, family=plot_font) +
annotate(geom="text", x=1.25, y=22, label="RPE+", size=9, hjust=1, family=plot_font) +
annotate(geom="text", x=4, y=45, label="High\nentropy", size=12, hjust=1, family=plot_font, lineheight = 0.8) +
annotate(geom="point", x=1.4, y=38, size=9, color="darkblue") +
annotate(geom="segment", x=1.4, xend=1.4, y=8.5, yend=35.0, size=1.5, color="gray90", lineend="butt") +
theme(text=element_text(family=plot_font), axis.title.x = element_text(margin=margin(t=15)),
axis.title.y = element_text(margin=margin(r=15))) + ylim(c(0,60)) +
theme(plot.title = element_text(face = "plain", size=32))
plot(g1)
dev.off()
#first choice
weights[9] <- 22
weights[8] <- 11
weights[10] <- 12
#updated values for basis elements
v <- t(sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*weights
}))
weights_df <- data.frame(time=centers/1000, weight=weights) %>% filter(time > 0 & time <= 4)
vm <- reshape2::melt(v, varnames=c("time", "basis")) %>% mutate(basis=factor(basis), time=time/1000) #seconds
vfunc <- apply(v, 1, sum) %>% as.data.frame() %>% setNames("value") %>% mutate(time=1:length(value)/1000)
#dev.new()
#plot(vfunc, type="l")
#second choice
weights[16] <- 21
weights[17] <- 31
weights[18] <- 11
#updated values for basis elements
v_next <- t(sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*weights
}))
weights_df_next <- data.frame(time=centers/1000, weight=weights) %>% filter(time > 0 & time <= 4)
vm_next <- reshape2::melt(v, varnames=c("time", "basis")) %>% mutate(basis=factor(basis), time=time/1000) #seconds
vfunc_next <- apply(v_next, 1, sum) %>% as.data.frame() %>% setNames("value") %>% mutate(time=1:length(value)/1000)
pdf("g2.pdf", width=10, height=6)
g2 <- ggplot(vm, aes(x=time, y=value, group=basis)) +
geom_rect(data=weights_df, aes(xmin=time-.04, xmax=time+.04, ymin=0, ymax=weight, y=NULL, group=NULL, color=NULL),
fill="grey70", show.legend = FALSE) + #weights
annotate(geom="segment", x=2.9, xend=2.9, y=5, yend=55, size=1.5, color="gray90", lineend="butt") +
geom_line(show.legend = FALSE, size=1.2, color="red") + #basis elements
geom_line(data=vfunc_next, aes(group=NULL), color="grey50", size=1.5, lty=6) + #integrated after update
geom_line(data=vfunc, aes(group=NULL), color="black", size=1.5) + #integrated value
cowplot::theme_cowplot(font_size = 24) + ylab("Expected Value (points)") +
xlab("Time (seconds)") + annotate(geom="point", x=2.9, y=58, size=9, color="darkblue") +
annotate(geom="text", x=2.7, y=58.5, label="Reward", size=9, hjust=1, family=plot_font) +
#annotate(geom="text", x=3, y=30, label="PE+", size=9, hjust=1, family=plot_font) +
annotate(geom="text", x=1.3, y=35, label=expression(RT[Vmax]), size=9, hjust=0.5, family=plot_font, parse=TRUE) +
annotate(geom="segment", x=1.24, xend=2.75, y=41.5, yend=41.5, size=1.5, color="gray50", lineend="round",
arrow = arrow(length = unit(0.5,"cm"))) +
annotate(geom="text", x=1.9, y=45.5, label=expression(Delta*RT[Vmax]), size=9, hjust=0.5, family=plot_font, color="gray50", parse=TRUE) +
theme(text=element_text(family=plot_font), axis.title.x = element_text(margin=margin(t=15)),
axis.title.y = element_text(margin=margin(r=15))) + ylim(c(0,60)) +
theme(plot.title = element_text(face = "plain", size=32))
plot(g2)
dev.off()
pdf("two_panel.pdf", width=12, height=5)
plot_grid(g1 + ggtitle("Early in learning"), g2 + ggtitle("Change in RT Vmax") + ylab(""), nrow=1)
dev.off()
#nth choice
weights <- runif(nbasis, min=0, max=4)
weights[1:3] <- 0
weights[22:24] <- 0
weights[13] <- 12
weights[14] <- 35
weights[15] <- 26
weights[16] <- 9
weights[17] <- 6
#updated values for basis elements
v <- t(sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*weights
}))
weights_df <- data.frame(time=centers/1000, weight=weights) %>% filter(time > 0 & time <= 4)
vm <- reshape2::melt(v, varnames=c("time", "basis")) %>% mutate(basis=factor(basis), time=time/1000) #seconds
vfunc <- apply(v, 1, sum) %>% as.data.frame() %>% setNames("value") %>% mutate(time=1:length(value)/1000)
pdf("g3.pdf", width=10, height=6)
g3 <- ggplot(vm, aes(x=time, y=value, group=basis)) +
geom_rect(data=weights_df, aes(xmin=time-.04, xmax=time+.04, ymin=0, ymax=weight, y=NULL, group=NULL, color=NULL),
fill="grey70", show.legend = FALSE) + #weights
geom_line(show.legend = FALSE, size=1.2, color="red") + #basis elements
geom_line(data=vfunc, aes(group=NULL), color="black", size=1.5) + #integrated value
cowplot::theme_cowplot(font_size = 24) + ylab("Expected Value (points)") +
xlab("Time (seconds)") +
#annotate(geom="point", x=3.10, y=58, size=9, color="darkblue") +
#annotate(geom="text", x=2.95, y=58.5, label="Reward", size=9, hjust=1, family=plot_font) +
#annotate(geom="text", x=3, y=30, label="PE+", size=9, hjust=1, family=plot_font) +
annotate(geom="text", x=2.5, y=54, label=expression(RT[Vmax]), size=9, hjust=0.5, family=plot_font, parse=TRUE) +
annotate(geom="text", x=0.1, y=45, label="Low\nentropy", size=12, hjust=0, family=plot_font, lineheight = 0.8) +
#annotate(geom="segment", x=3.10, xend=3.10, y=5, yend=55, size=1.5, color="gray60", lineend="butt") +
#annotate(geom="segment", x=1.24, xend=2.9, y=42, yend=42, size=1.5, color="gray60", lineend="round",
# arrow = arrow(length = unit(0.5,"cm"))) +
theme(text=element_text(family=plot_font), axis.title.x = element_text(margin=margin(t=15)),
axis.title.y = element_text(margin=margin(r=15))) + ylim(c(0,60)) +
theme(plot.title = element_text(face = "plain", size=32))
plot(g3)
dev.off()
#pdf("three_panel.pdf", width=18, height=6)
gp <- plot_grid(g1 + ggtitle("Early in learning"), g2 + ggtitle("Change in RT Vmax") + ylab(""),
g3 + ggtitle("Late in learning") + ylab(""), nrow=1)
# plot_grid(g1, g2 + ylab(""), g3 + ylab(""), label_size = 18, vjust=1, hjust=0.1,
# nrow=1, labels=c("a) Early in learning", "b) Change in RT Vmax", "c) Late in learning"))
#dev.off()
ggsave("three_panel.pdf", gp, width=18, height=6, useDingbats=FALSE)
embed_fonts('three_panel.pdf', outfile='three_panel_embed.pdf')
#dev.new()
#plot(vfunc, type="l")
# pdf("g3.pdf", width=10, height=6)
#
# g2 <- ggplot(vm, aes(x=time, y=value, group=basis)) +
# geom_rect(data=weights_df, aes(xmin=time-.04, xmax=time+.04, ymin=0, ymax=weight, y=NULL, group=NULL, color=NULL),
# fill="grey70", show.legend = FALSE) + #weights
# geom_line(show.legend = FALSE, size=1.2, color="red") + #basis elements
# geom_line(data=vfunc, aes(group=NULL), color="black", size=1.5) + #integrated value
# cowplot::theme_cowplot(font_size = 24) + ylab("Expected Value (points)") +
# xlab("Time (seconds)") + annotate(geom="point", x=3.1, y=58, size=9, color="darkblue") +
# annotate(geom="text", x=2.90, y=58.5, label="Reward", size=9, hjust=1, family=plot_font) +
# #annotate(geom="text", x=2.95, y=30, label="PE+", size=9, hjust=1, family=plot_font) +
# annotate(geom="text", x=1.3, y=35, label="RT Vmax", size=9, hjust=0.5, family=plot_font) +
# annotate(geom="segment", x=3.1, xend=3.1, y=5, yend=55, size=1.5, color="gray60", lineend="butt") +
# theme(text=element_text(family=plot_font), axis.title.x = element_text(margin=margin(t=15)),
# axis.title.y = element_text(margin=margin(r=15))) + ylim(c(0,60))
# plot(g2)
# dev.off()
###OLD CODE
ntimesteps=13
maxt=4000
centers <- seq(0,4000, length=ntimesteps)
gaussmat <- sapply(centers, function(v) {
dnorm(x=0:maxt, mean=v, sd=300)
})
#matplot(gaussmat, type="l", lty=1, lwd=5, col=colors(distinct=TRUE))
pdf("gauss_basis.pdf", width=6, height=5)
matplot(gaussmat, type="l", lty=1, lwd=5, col=colorRampPalette(c("blue", "red"))(13), ann=FALSE, xaxt='n', yaxt='n', bty='n')
dev.off()
##test out weights and centers to show example
weights<- 100*c(0.0776, 7.4801, 2.0792, 1.4008, 2.0000, 1.0000, 3.0000, 9.0000, 25.0000, 22.0024, 30.2286, 12.0326, 30.2332)
v <- 5* apply( sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*weights
}), 2, sum)
pdf("learned_ev.pdf", width=6, height=5)
df <- data.frame(time=seq(0,4,length=4001), ev=v)
ggplot(df, aes(x=time, y=ev)) + geom_line(size=1.5) + ylab("Expected value (learned)\n") + xlab("\nTime (s)") + theme_bw(base_size=24)
dev.off()
##uncertainty weights
uweights<- -10*c(0.3776, 1.4801, 0.9792, 1.4008, 2.0000, 1.0000, 3.0000, 5.0000, 7.0000, 8.0024, 10.2286, 3.0326, 1.2332)
u <- 5* apply( sapply(1:nrow(gaussmat), function(r) {
gaussmat[r,]*uweights
}), 2, sum)
library(ggplot2)
pdf("learned_uncertainty.pdf", width=6, height=5)
df <- data.frame(time=seq(0,4,length=4001), u=u)
ggplot(df, aes(x=time, y=u)) + geom_line(size=1.5) + ylab("Uncertainty (experienced)\n") + xlab("\nTime (s)") + theme_bw(base_size=24)
dev.off()
pdf("weights.pdf", width=6, height=3.5)
par(mar=c(1, 5, 2, 1) + 0.1)
plot(c(0, 4000), c(0, max(weights) + 50), type = "n", xlab = "", ylab = "Basis weight (AU)", yaxs="i",
cex.lab=2, cex.axis=2, cex.main=2, cex.sub=2, bty='n', xaxt='n')
rect(centers-50, 0, centers+50, weights, col="gray")
#axis(1, at=c(0,4), labels=c("0s", "4s"))#, pos=, lty=, col=, las=, tck=, ...)
dev.off()
##true underlying IEV contingency.
setwd("~/Data_Analysis/clock_analysis/td_model")
source("~/Data_Analysis/clock_analysis/td_model/getrew.R")
fm <- getMagFreq(0:4000, "IEV")
f <- fm[4002:8002]
m <- fm[1:4001]
ev <- f*m
library(ggplot2)
pdf("true_ev.pdf", width=6, height=5)
df <- data.frame(time=seq(0,4,length=4001), ev=ev)
ggplot(df, aes(x=time, y=ev)) + geom_line(size=1.5) + ylab("Expected value\n") + xlab("\nTime (s)") + theme_bw(base_size=24)
dev.off()
##real curve
pdf("iev_func.pdf", width=5, height=4)
ggplot(df, aes(x=time, y=ev)) + geom_line(size=1.5) + ylab("Expected value\n") + xlab("\nTime (ms)") + theme_bw(base_size=24)
dev.off()
df <- c()
#take CEVR out since CEV and CEVR are identical wrt EV (and we are not showing prob + freq)
for (cont in c("IEV", "DEV", "CEV", "CEVR")) { #, "CEVR"
fm <- getMagFreq(0:4000, cont)
#if (cont=="CEV") { cont="CEV/CEVR" } #for plot name
df <- rbind(df, data.frame(contingency=cont, time=0:4000, mag=fm$Mag, freq=fm$Freq, ev=fm$Mag*fm$Freq))
}
#Figure: plot of EV in clock task
pdf("Clock contingencies.pdf", width=5, height=3.4)
ggplot(df, aes(x=time/1000, y=ev, color=contingency)) + geom_line(size=2) + ylab("Expected value (points)") + xlab("Time (seconds)") + scale_color_brewer("Contingency", palette="Dark2") +
theme_bw(base_size=18) + theme(axis.title.x=element_text(margin = margin(t = 10)), axis.title.y=element_text(margin = margin(r = 10)), legend.margin = unit(0.15, "cm"),
plot.margin=margin(r=3, l=3, t=10, b=10))
dev.off()
#freq, prob, and ev
library(cowplot)
gcommon <- list(geom_line(size=2), xlab("Time (seconds)"), scale_color_brewer("Contingency", palette="Dark2"), theme_bw(base_size=18),
theme(axis.title.x=element_text(margin = margin(t = 10)), axis.title.y=element_text(margin = margin(r = 8)),
legend.margin = unit(0.15, "cm"),
plot.margin=margin(r=10, l=10, t=10, b=5)))
g1 <- ggplot(df, aes(x=time/1000, y=mag, color=contingency)) + gcommon + ylab("Reward magnitude (points)") +
theme(legend.position="none", plot.margin=margin(r=10, l=0, t=10, b=5))
g2 <- ggplot(df, aes(x=time/1000, y=freq, color=contingency)) + gcommon + ylab("Reward probability") + theme(legend.position="none")
df$ev[df$contingency=="CEVR"] <- df$ev[df$contingency=="CEVR"] + 0.5 #offset for plotting
df$ev[df$contingency=="CEV"] <- df$ev[df$contingency=="CEV"] - 0.5 #offset for plotting
g3 <- ggplot(df, aes(x=time/1000, y=ev, color=contingency)) + gcommon + ylab("Expected value (points)") + theme(legend.position="none")
pdf("Clock contingencies with freq mag.pdf", width=8.5, height=4)
pg <- plot_grid(g1, g2, g3, nrow=1)
plot(pg)
dev.off()
pdf("Clock contingencies legend.pdf", width=2, height=2)
legend_b <- get_legend(g1 + theme(legend.position="right") + theme_bw(base_size=25) + guides(color = guide_legend(keywidth=2, keyheight=2)))
p <- plot_grid(legend_b, nrow=1)
plot(p)
dev.off()
# add the legend underneath the row we made earlier. Give it 10% of the height
# of one plot (via rel_heights).
# p <- plot_grid(pg, legend_b, nrow=1, rel_widths = c(.9, .15))
# plot(p)
setwd("~/Data_Analysis/temporal_instrumental_agent/clock_task/vba_fmri")
#load(file="dataframe_for_entropy_analysis_Oct2016.RData")
#this contains data with 24 basis functions and post-Niv learning rule
#load(file="dataframe_for_entropy_analysis_Nov2016.RData")
load(file="dataframe_for_entropy_analysis_Mar2017.RData") #has the random priors entropy
bdf = bdf %>% rename(subject=rowID) %>% group_by(subject) %>% arrange(subject, run, trial) %>% mutate(totreward=sum(score), cumreward=cumsum(score)) %>% ungroup() %>%
mutate(medreward=median(totreward), #between subjects
msplit=factor(as.numeric(totreward > medreward), levels=c(0,1), labels=c("< Median", ">= Median")))
bdf <- bdf %>% mutate(msplit=recode(msplit, "< Median" = "Total~earnings<median", ">= Median"="Total~earnings>=median"))
bdf$rewFunc <- factor(bdf$rewFunc, levels=c("IEV", "DEV", "CEV", "CEVR")) #to match contingency plot
# Figure 1c
pdf("Fig_1c.pdf", width = 8.5, height = 3.75)
ggplot(bdf, aes(x=trial, y=rt/1000, color = rewFunc )) + stat_smooth(method="loess", size = 2) +
scale_color_brewer("Contingency", palette="Dark2") +
theme_bw(base_size=18) + facet_wrap(~msplit, labeller=label_parsed) + xlab("Trial") +
ylab("Response time (seconds)") + scale_y_continuous(breaks=c(1.25, 1.5, 1.75, 2, 2.25)) +
theme(axis.title.x=element_text(margin = margin(t = 12)), axis.title.y=element_text(margin = margin(r = 12)),
legend.margin = margin(t=0, r=2, b=0, l=5), plot.margin=margin(r=10, l=10, t=10, b=5)) + theme(legend.position="none")
dev.off()
# 1d
# pdf("Fig_1d.pdf", width = 10, height = 4)
# ggplot(subset(bdf), aes(x=trial, y=abstschange*100, color = rewFunc)) + stat_smooth(method="loess", size = 2) + theme_bw(base_size=25) + facet_wrap(~msplit) + ylab("RT swings, ms") + labs(colour = "Contingency") #facet_wrap(~msplit) #geom_jitter(alpha=0.2) +
# dev.off()
pdf("Fig_1d.pdf", width = 8.5, height = 3.75)
ggplot(bdf, aes(x=trial, y=abstschange/10, color = rewFunc )) + stat_smooth(method="loess", size = 2) +
scale_color_brewer("Contingency", palette="Dark2") +
theme_bw(base_size=18) + facet_wrap(~msplit, labeller=label_parsed) + xlab("Trial") +
ylab("Change in RT (seconds)") +
theme(axis.title.x=element_text(margin = margin(t = 12)), axis.title.y=element_text(margin = margin(r = 12)),
legend.margin = margin(t=0, r=2, b=0, l=5), plot.margin=margin(r=10, l=10, t=10, b=5)) + theme(legend.position="none")
dev.off()
#Supplementary figure: Sinusoidal contingency
ntimesteps=500
ev = 10*sin(2*pi*(1:ntimesteps)*1/ntimesteps) + 2.5*sin(2*pi*(1:ntimesteps)*2/ntimesteps) + 2.0*cos(2*pi*(1:ntimesteps)*4/ntimesteps)
ev = ev + abs(min(ev)) + 10;
prb = 25*cos(2*pi*(1:ntimesteps)*1/ntimesteps) + 10*cos(2*pi*(1:ntimesteps)*3/ntimesteps) + 6*sin(2*pi*(1:ntimesteps)*5/ntimesteps)
prb_max=0.7
prb_min=0.3
prb = (prb - min(prb))*(prb_max-prb_min)/(max(prb)-min(prb)) + prb_min
allshift = array(NA_real_, dim=c(ntimesteps, ntimesteps, 3))
for (i in 1:ntimesteps) {
if (i > 1) {
shift = c(i:ntimesteps, 1:(i-1))
} else { shift <- 1:ntimesteps }
evi = ev[shift]
prbi = prb[shift]
allshift[i,,1] = evi
allshift[i,,2] = prbi
allshift[i,,3] = evi/prbi
}
shift3 <- rbind(data.frame(time=1:ntimesteps/100, EV=allshift[,1,1], Probability=allshift[,1,2], Magnitude=allshift[,1,3], name="shift = 0"),
data.frame(time=1:ntimesteps/100, EV=allshift[,100,1], Probability=allshift[,100,2], Magnitude=allshift[,100,3], name="shift = 100"),
data.frame(time=1:ntimesteps/100, EV=allshift[,200,1], Probability=allshift[,200,2], Magnitude=allshift[,200,3], name="shift = 200"))
library(reshape2); library(ggplot2)
m3 <- melt(shift3, id.vars=c("time", "name"))
pdf("Sinusoid contingency.pdf", width=10, height=8)
ggplot(m3, aes(x=time, y=value)) + geom_line(size=1.5) + facet_grid(variable ~ name, scales="free_y") + theme_bw(base_size=24) + xlab("Time (seconds)") + ylab("") +
theme(panel.margin = unit(20, "pt"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
strip.background = element_rect(fill = "grey90", colour = "grey50", size = 0.2))
dev.off()
##BMC Figure
library(R.matlab)
setwd(file.path(getMainDir(), "temporal_instrumental_agent", "clock_task", "figures"))
scepticbmc <- readMat("finalicissimo_BMC_for_eLife_fig.mat")
#out.Ef contains estimated frequencies
#out.Vf contains the variance-covariance matrix of frequencies
#inside plotUncertainTimeSeries, which is called from VBA_groupBMC, it appears the SEs are derived by the sqrt of the diagonal of out.Vf
#somehow got mangled -- went into MATLAB and just saved these in a simpler .mat
Ef <- scepticbmc$out[,,1]$Ef
scepticbmc$out[,,1]$Vf
#bmcef <- readMat("elife_bmc_frequencies.mat")
bmcef <- readMat("~/Data_Analysis/temporal_instrumental_agent/clock_task/vba_fmri/figures/ploscompbio_bmc_frequencies.mat")
df <- data.frame(model=unlist(bmcef$modelnames), freq=bmcef$Ef, se=sqrt(diag(bmcef$Vf)))
#df$m_ordered <- ordered(df$model, levels=c("fixed", "fixed_uv", "fixed_decay", "kalman_softmax",
# "kalman_uv_sum", "kalman_logistic", "kalman_processnoise", "kalman_sigmavolatility", "Qstep"),
# labels=c("Fixed LR V", "Fixed LR U + V", "Fixed LR V Decay", "KF V", "KF U + V",
# "KF U -> V", "KF Process Noise", "KF Volatility", "TD"))
df$m_ordered <- ordered(df$model, levels=c("fixed", "fixed_uv", "fixed_decay", "kalman_softmax",
"kalman_uv_sum", "Qstep"),
labels=c("Fixed LR V", "Fixed LR U + V", "Fixed LR V Sel. Maint.", "KF V", "KF U + V", "TD"))
#for ggplot with coord_flip, need to reverse
df$m_ordered <- factor(df$m_ordered, levels=rev(levels(df$m_ordered)))
library(ggplot2)
#updated version with smaller model set for PLoS Comp Bio
pdf("SCEPTIC Main BMC v5 May2017.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity", size=1.3, fatten=2.3) +
annotate("text", x=4, y=0.65, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.65, y=0.48, label=as.character(expression(paste("BOR < ",10^{-51})))),
hjust=0, vjust=0, parse=TRUE, size=6, label.padding = unit(0.4, "lines")) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank(), plot.margin=margin(t=5, r=10, b=5, l=0)) +
scale_y_continuous(breaks=c(0,0.25, 0.5, 0.75), labels=c("0", ".25", ".5", ".75"))
dev.off()
pdf("SCEPTIC Main BMC.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity") + geom_errorbar(width=0.5) +
annotate("text", x=7, y=0.54, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
annotate("text", x=1.2, y=0.30, label=as.character(expression(paste("BOR = ",8.03," x ",10^{-49}))), hjust=0, vjust=0, parse=TRUE, size=6) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)))
dev.off()
library(ggplot2)
pdf("SCEPTIC Main BMC v2.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_bar(stat="identity", fill="grey92", color="black") + geom_errorbar(width=0.5) +
annotate("text", x=7, y=0.54, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
annotate("text", x=1.2, y=0.30, label=as.character(expression(paste("BOR = ",8.03," x ",10^{-49}))), hjust=0, vjust=0, parse=TRUE, size=6) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)))
dev.off()
library(ggplot2)
pdf("SCEPTIC Main BMC v3.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_bar(stat="identity", fill="grey92", color="black") + geom_errorbar(width=0.5) +
annotate("text", x=7, y=0.54, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.47, label=as.character(expression(paste("BOR < ",10^{-49})))),
hjust=0, vjust=0, parse=TRUE, size=6) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)))
dev.off()
pdf("SCEPTIC Main BMC v4.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_bar(stat="identity", fill="grey92", color="black") + geom_errorbar(width=0.5) +
annotate("text", x=7, y=0.54, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.47, label=as.character(expression(paste("BOR < ",10^{-49})))),
hjust=0, vjust=0, parse=TRUE, size=6) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank())
dev.off()
pdf("SCEPTIC Main BMC v5.pdf", width=6, height=4)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity", size=1.3, fatten=2.5) +
annotate("text", x=7, y=0.54, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.47, label=as.character(expression(paste("BOR < ",10^{-43})))),
hjust=0, vjust=0, parse=TRUE, size=6, label.padding = unit(0.4, "lines")) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank()) +
scale_y_continuous(breaks=c(0,0.25, 0.5, 0.75), labels=c("0", ".25", ".5", ".75"))
dev.off()
#ar1 and schoenberg results
arfreqs <- readMat("ar_modelfreqs_Sep2016.mat")
#manual entry from Jon email 19Sep2016
mnames <- c("Fixed LR V", "Fixed LR U + V", "Fixed LR V Sel. Maint.", "KF V", "KF Process Noise", "KF U + V", "KF Volatility")
df <- data.frame(model=ordered(mnames), freq=arfreqs$ar1Ef, se=sqrt(diag(arfreqs$ar1Vf)))
#for ggplot with coord_flip, need to reverse
df$m_ordered <- factor(df$model, levels=rev(levels(df$model)))
pdf("Ar1 Main BMC v5.pdf", width=6, height=4.3)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity", size=1.3, fatten=2.5) +
annotate("text", x=5, y=0.45, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.40, label=as.character(expression(paste("BOR < ",10^{-32})))),
hjust=0, vjust=0, parse=TRUE, size=6, label.padding = unit(0.4, "lines")) +
ylab("Estimated Model Frequency") + xlab("Includes AR(1) choice") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank()) +
scale_y_continuous(breaks=c(0,0.25, 0.5, 0.75), labels=c("0", ".25", ".5", ".75"))
dev.off()
#manual entry from Jon email 19Sep2016
df <- data.frame(model=ordered(mnames), freq=arfreqs$schEf, se=sqrt(diag(arfreqs$schVf)))
#for ggplot with coord_flip, need to reverse
df$m_ordered <- factor(df$model, levels=rev(levels(df$model)))
pdf("Scho Main BMC v5.pdf", width=6, height=4.3)
ggplot(df, aes(x=m_ordered, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity", size=1.3, fatten=2.5) +
annotate("text", x=5, y=0.45, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.43, label=as.character(expression(paste("BOR < ",10^{-37})))),
hjust=0, vjust=0, parse=TRUE, size=6, label.padding = unit(0.4, "lines")) +
ylab("Estimated Model Frequency") + xlab("Includes Schoenberg choice") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank()) +
scale_y_continuous(breaks=c(0,0.25, 0.5, 0.75), labels=c("0", ".25", ".5", ".75"))
dev.off()
#frank TC (replicate v5 above)
frank_bmcef <- readMat("elife_bmc_franktc_frequencies.mat")
df <- data.frame(model=unlist(frank_bmcef$models), freq=frank_bmcef$Ef, se=sqrt(diag(frank_bmcef$Vf)))
df$modelmath <- ordered(df$model, levels=c("K", "K_Lambda", "K_Lambda_Nu", "K_Lambda_Nu_AlphaG",
"K_Lambda_Nu_AlphaG_AlphaN", "K_Lambda_Nu_AlphaG_AlphaN_Rho", "K_Lambda_Nu_AlphaG_AlphaN_Rho_Epsilon"))
#for ggplot with coord_flip, need to reverse
df$modelmath <- factor(df$modelmath, levels=rev(levels(df$modelmath)))
pdf("Frank TC BMC v5.pdf", width=6, height=4)
ggplot(df, aes(x=modelmath, y=freq, ymin=freq-se, ymax=freq+se)) + geom_pointrange(stat="identity", size=1.3, fatten=2.5) +
annotate("text", x=5, y=0.63, label="EP = 1.0", hjust=0, vjust=0.5, size=4.5) + # ylim(-0.05,1.1) +
geom_label(mapping=aes(x=x,y=y,label=label, ymin=NULL, ymax=NULL),
data=data.frame(x=0.8, y=0.60, label=as.character(expression(paste("BOR < ",10^{-35})))),
hjust=0, vjust=0, parse=TRUE, size=6, label.padding = unit(0.4, "lines")) +
ylab("Estimated Model Frequency") + xlab("") + coord_flip() +
theme_bw(base_size=20) + theme(axis.title.x=element_text(margin = margin(t = 15)),
axis.title.y=element_text(margin = margin(r = 15)),
panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank()) +
scale_y_continuous(breaks=c(0,0.25, 0.5, 0.75), labels=c("0", ".25", ".5", ".75")) +
scale_x_discrete("Parameter added to TC", labels=rev(expression(K, lambda, nu, alpha[G], alpha[N], rho, epsilon)))
#scale_x_discrete("test", labels=c(expression(alpha), expression(beta)))
dev.off()
|
invisible('
v2 uses linear interpolation function for transmission rates
')
# Model 3 is the same as Model 2. However, self-reported gpm was removed from
# the tree. This was done to see whether this could be influencing the parameter
# estimation. As some self-reported gpm could be in fact msm.
library(BayesianTools)
library(akima)
library(phydynR)
library(senegalHIVmodel)
# print R session Info. List R version and loaded packages,
# and information o operational sytem (OS)
sessionInfo()
# Choose and set a seed for all analysis.
# Helpful for reproducing results
seed <- as.integer(runif(n = 1, min = 1, max = 10000))
message(seed)
set.seed(seed)
# gpm = general population males
# gpf = general population females
# msm = man that have sex with other man
# src = source (sequences closely related to population being studies by that are from other countries)
demes <- c('gpm', 'gpf', 'msm', 'src')
# Sets the equations of the model (birth, death and migration rates are pre-filled with zeros)
eqns <- setup.model.equations(demes)
attach(eqns)
# These are the values used for the simulations
# Initial time for simulations
T0 <- 1978
# Final time for simulations
T1 <- 2014
# Duration of infection. In our model we assumed 1 stage of infection
GAMMA <- 1/10
# parameter template:
# gpsp0, gpsp1, gpsp2, and gpsploc are the necessary parameters to estimate the
# spline function (gpspline) for the general population (gp)
# msmsp0, msmsp1, msmsp2, msmsploc are the necessary parameters to estimate the
# spline function (msmspline) for the msm risk group
# maleX is the ratio of infectiouness of males to females
# import is the importation rate of HIV from other countries to Senegal
# srcNe is the
# pmsm2msm is the probability of msm to infect another msm
# pgpf2gpm is the probability of a female from the gp to infect a male from de gp
# initmsm is the initial size of infected msm which is 1
# initgp is the initial size of infected gp which is 1
THETA <- list(
gpsp0 = 6/10,
gpsp1 = 4/10,
gpsp2 = 1/10,
gpsploc = 1987,
msmsp0 = 4/10,
msmsp1 = 4/10,
msmsp2 = 2/10,
msmsploc = 1995,
maleX = 1.02,
import = 1/20,
srcNe = 20,
gpspline = function( t, parms ){
if (t < T0 ) return( parms$gpsp0 )
if (t > T1) return (parms$gpsp2)
with(parms, pmax(0.025, approx( x = c(T0, gpsploc, T1), y=c(gpsp0, gpsp1, gpsp2) , xout = t, rule = 2)$y) )
},
msmspline = function( t, parms){
if (t < T0 ) return( parms$msmsp0 )
if (t > T1) return ( parms$msmsp2 )
with(parms, pmax(0.025, approx( x = c(T0, msmsploc, T1), y=c(msmsp0, msmsp1, msmsp2) , xout = t, rule = 2)$y) )
},
pmsm2msm = 0.85,
pgpf2gpm = 0.85,
initmsm = 1,
initgp = 1
)
# arbitrary large number > A(t) forall t
SRCSIZE <<- 1e5
# X0 is the initial conditions for the 4 demes (gpf, gpm, msm, src)
X0 <- c(gpm = unname(THETA$initgp/2),
gpf = unname(THETA$initgp/2),
msm = unname(THETA$initmsm),
src = SRCSIZE)
# Because there are 4 demes in this model, the birth matrix is a 4 x 4 matrix
# Each element in the matrix is a string that will be passed as R code
births['msm', 'msm'] <- 'parms$msmspline(t, parms) * msm * parms$pmsm2msm'
births['msm', 'gpf'] <- 'parms$msmspline(t, parms) * msm * (1-parms$pmsm2msm)'
births['gpm', 'gpf'] <- 'parms$gpspline(t, parms) * gpm * parms$maleX'
births['gpf', 'gpm'] <- 'parms$gpspline(t, parms) * gpf * parms$pgpf2gpm'
births['gpf', 'msm'] <- 'parms$gpspline(t, parms) * gpf * (1-parms$pgpf2gpm)'
# f = (1/2)*(Y^2)/Ne
births['src', 'src'] <- '0.5 * SRCSIZE^2 / parms$srcNe'
# Migrations is also a 4 x 4 matrix because we have 4 demes
migs['src', 'gpm'] <- 'parms$import * gpm'
migs['src', 'gpf'] <- 'parms$import * gpf'
migs['src', 'msm'] <- 'parms$import * msm'
migs['gpm', 'src'] <- 'parms$import * gpm'
migs['gpf', 'src'] <- 'parms$import * gpf'
migs['msm', 'src'] <- 'parms$import * msm'
# Deaths is a vector that showed in which rate a lineage dies
deaths['msm'] <- 'GAMMA * msm'
deaths['gpf'] <- 'GAMMA * gpf'
deaths['gpm'] <- 'GAMMA * gpm'
deaths['src'] <- '0.5 * SRCSIZE^2 / parms$srcNe'
#sde = FALSE means that an ordinary differential equation model will be constructed
# build the demographic process to be used in the coalescent analysis
dm <- build.demographic.process(births = births,
deaths = deaths,
migrations = migs,
parameterNames = names(THETA),
rcpp = FALSE,
sde = FALSE)
#show.demographic.process( dm, x0 = X0, t0 = 1980, t1 = 2014, theta = THETA )
#o <- dm( x0 = X0, t0 = 1980, t1 = 2014, theta = THETA )[[5]]
#print(o)
# mean of the prevalence statistics
MEAN_PREV_STAT <- NA
# standar deviation of the prevalence statistics
SD_PREV_STAT <- NA
#run the function to set the mean and sd for prevalence
set_prev_state_parameters()
| /analyses/scripts/R-scripts/Models/Subtypes_Combined/Model6/1.model.v6.R | no_license | thednainus/senegalHIVmodel | R | false | false | 4,918 | r | invisible('
v2 uses linear interpolation function for transmission rates
')
# Model 3 is the same as Model 2. However, self-reported gpm was removed from
# the tree. This was done to see whether this could be influencing the parameter
# estimation. As some self-reported gpm could be in fact msm.
library(BayesianTools)
library(akima)
library(phydynR)
library(senegalHIVmodel)
# print R session Info. List R version and loaded packages,
# and information o operational sytem (OS)
sessionInfo()
# Choose and set a seed for all analysis.
# Helpful for reproducing results
seed <- as.integer(runif(n = 1, min = 1, max = 10000))
message(seed)
set.seed(seed)
# gpm = general population males
# gpf = general population females
# msm = man that have sex with other man
# src = source (sequences closely related to population being studies by that are from other countries)
demes <- c('gpm', 'gpf', 'msm', 'src')
# Sets the equations of the model (birth, death and migration rates are pre-filled with zeros)
eqns <- setup.model.equations(demes)
attach(eqns)
# These are the values used for the simulations
# Initial time for simulations
T0 <- 1978
# Final time for simulations
T1 <- 2014
# Duration of infection. In our model we assumed 1 stage of infection
GAMMA <- 1/10
# parameter template:
# gpsp0, gpsp1, gpsp2, and gpsploc are the necessary parameters to estimate the
# spline function (gpspline) for the general population (gp)
# msmsp0, msmsp1, msmsp2, msmsploc are the necessary parameters to estimate the
# spline function (msmspline) for the msm risk group
# maleX is the ratio of infectiouness of males to females
# import is the importation rate of HIV from other countries to Senegal
# srcNe is the
# pmsm2msm is the probability of msm to infect another msm
# pgpf2gpm is the probability of a female from the gp to infect a male from de gp
# initmsm is the initial size of infected msm which is 1
# initgp is the initial size of infected gp which is 1
THETA <- list(
gpsp0 = 6/10,
gpsp1 = 4/10,
gpsp2 = 1/10,
gpsploc = 1987,
msmsp0 = 4/10,
msmsp1 = 4/10,
msmsp2 = 2/10,
msmsploc = 1995,
maleX = 1.02,
import = 1/20,
srcNe = 20,
gpspline = function( t, parms ){
if (t < T0 ) return( parms$gpsp0 )
if (t > T1) return (parms$gpsp2)
with(parms, pmax(0.025, approx( x = c(T0, gpsploc, T1), y=c(gpsp0, gpsp1, gpsp2) , xout = t, rule = 2)$y) )
},
msmspline = function( t, parms){
if (t < T0 ) return( parms$msmsp0 )
if (t > T1) return ( parms$msmsp2 )
with(parms, pmax(0.025, approx( x = c(T0, msmsploc, T1), y=c(msmsp0, msmsp1, msmsp2) , xout = t, rule = 2)$y) )
},
pmsm2msm = 0.85,
pgpf2gpm = 0.85,
initmsm = 1,
initgp = 1
)
# arbitrary large number > A(t) forall t
SRCSIZE <<- 1e5
# X0 is the initial conditions for the 4 demes (gpf, gpm, msm, src)
X0 <- c(gpm = unname(THETA$initgp/2),
gpf = unname(THETA$initgp/2),
msm = unname(THETA$initmsm),
src = SRCSIZE)
# Because there are 4 demes in this model, the birth matrix is a 4 x 4 matrix
# Each element in the matrix is a string that will be passed as R code
births['msm', 'msm'] <- 'parms$msmspline(t, parms) * msm * parms$pmsm2msm'
births['msm', 'gpf'] <- 'parms$msmspline(t, parms) * msm * (1-parms$pmsm2msm)'
births['gpm', 'gpf'] <- 'parms$gpspline(t, parms) * gpm * parms$maleX'
births['gpf', 'gpm'] <- 'parms$gpspline(t, parms) * gpf * parms$pgpf2gpm'
births['gpf', 'msm'] <- 'parms$gpspline(t, parms) * gpf * (1-parms$pgpf2gpm)'
# f = (1/2)*(Y^2)/Ne
births['src', 'src'] <- '0.5 * SRCSIZE^2 / parms$srcNe'
# Migrations is also a 4 x 4 matrix because we have 4 demes
migs['src', 'gpm'] <- 'parms$import * gpm'
migs['src', 'gpf'] <- 'parms$import * gpf'
migs['src', 'msm'] <- 'parms$import * msm'
migs['gpm', 'src'] <- 'parms$import * gpm'
migs['gpf', 'src'] <- 'parms$import * gpf'
migs['msm', 'src'] <- 'parms$import * msm'
# Deaths is a vector that showed in which rate a lineage dies
deaths['msm'] <- 'GAMMA * msm'
deaths['gpf'] <- 'GAMMA * gpf'
deaths['gpm'] <- 'GAMMA * gpm'
deaths['src'] <- '0.5 * SRCSIZE^2 / parms$srcNe'
#sde = FALSE means that an ordinary differential equation model will be constructed
# build the demographic process to be used in the coalescent analysis
dm <- build.demographic.process(births = births,
deaths = deaths,
migrations = migs,
parameterNames = names(THETA),
rcpp = FALSE,
sde = FALSE)
#show.demographic.process( dm, x0 = X0, t0 = 1980, t1 = 2014, theta = THETA )
#o <- dm( x0 = X0, t0 = 1980, t1 = 2014, theta = THETA )[[5]]
#print(o)
# mean of the prevalence statistics
MEAN_PREV_STAT <- NA
# standar deviation of the prevalence statistics
SD_PREV_STAT <- NA
#run the function to set the mean and sd for prevalence
set_prev_state_parameters()
|
source("common_functions.R")
source("set_config.R")
# Load the data frame if it is not loaded already
if (! any(ls() == "powerConsumptionData")) {
if (file.exists(datafile)) {
powerConsumptionData <- LoadPowerConsumptionData(datafile, analDates)
}
}
# If the power consumption data is loaded, generate the corresponding plot
if (any(ls() == "powerConsumptionData")) {
# Refer to common_functions.R for the source code of DisplayPlot4 function
DisplayPlot4(powerConsumptionData, "plot4.png")
}
| /plot4.R | no_license | rupendrab/ExData_Plotting1 | R | false | false | 545 | r | source("common_functions.R")
source("set_config.R")
# Load the data frame if it is not loaded already
if (! any(ls() == "powerConsumptionData")) {
if (file.exists(datafile)) {
powerConsumptionData <- LoadPowerConsumptionData(datafile, analDates)
}
}
# If the power consumption data is loaded, generate the corresponding plot
if (any(ls() == "powerConsumptionData")) {
# Refer to common_functions.R for the source code of DisplayPlot4 function
DisplayPlot4(powerConsumptionData, "plot4.png")
}
|
see.power<-function(alpha=NULL,sigma=NULL,n=NULL,effect=NULL,test="lower",xlim=c(-3,3),strict=FALSE){
upper.titlel<-bquote(paste("Distribution assuming ",H[0],": ",mu >= 0))
upper.titleu<-bquote(paste("Distribution assuming ",H[0],": ",mu <= 0))
upper.titleb<-bquote(paste("Distribution assuming ",H[0],": ",mu," = 0"))
lower.titlel<-bquote(paste("Distribution assuming ",H[A],": ",mu," < 0"))
lower.titleu<-bquote(paste("Distribution assuming ",H[A],": ",mu," > 0"))
lower.titleb<-bquote(paste("Distribution assuming ",H[A],": ",mu != 0))
effect=abs(effect)
layout(matrix(c(1,rep(2,2),rep(3,2)), 5, 1, byrow = TRUE))
par(mar=c(4, 4, 2, 1))
if(test == "lower"){
dev.hold()
powerp<-power.z.test(alpha=alpha,sigma=sigma,effect=effect,power=NULL,n=n,test="one.tail")$power
power1<-round(powerp,3)
qalpha<-qnorm(alpha,mean=0,sd=sigma/sqrt(n))
qpower<-qnorm(powerp,mean=-1*effect,sd=sigma/sqrt(n))
plot(seq(0,1),seq(0,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
text(.5,0.8,bquote(paste(alpha," = ",.(alpha),", 1 - ",beta," = ", .(power1),", ",italic(n)," = ",.(n),", ",sigma," = ",.(sigma))),cex=2)
text(.5,0.14,bquote(paste("effect size = ",.(effect),", 'tail' of test = '", .(test),"'")),cex=2)
shade.norm(x=qalpha,sigma=sigma/sqrt(n),mu=0,tail="lower",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightyellow2",main=upper.titlel,xlim=xlim)
abline(v=qalpha,col=1)
legend("topright",pch=22,pt.bg="lightyellow2",pt.cex=2,legend=expression(alpha))
text(qalpha,0.5*dnorm(0,mean=0,sd=sigma/sqrt(n)),"rejection region \u2190", adj = 1)
shade.norm(x=qpower,sigma=sigma/sqrt(n),mu=-1*effect,tail="lower",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightcoral" ,main=lower.titlel,xlim=xlim)
abline(v=qpower,col=1)
legend("topright",pch=22,pt.bg="lightcoral",pt.cex=2,legend=expression(paste("1-",beta)))
text(qpower,0.5*dnorm(effect,mean=effect,sd=sigma/sqrt(n)),"rejection region \u2190", adj = 1)
dev.flush()
}
if(test == "upper"){
dev.hold()
powerp<-power.z.test(alpha=alpha,sigma=sigma,effect=effect,power=NULL,n=n,test="one.tail")$power
power1<-round(powerp,3)
qalpha<-qnorm(1-alpha,mean=0,sd=sigma/sqrt(n))
qpower<-qnorm(1-powerp,mean=effect,sd=sigma/sqrt(n))
plot(seq(0,1),seq(0,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
text(.5,0.8,bquote(paste(alpha," = ",.(alpha),", 1 - ",beta," = ", .(power1),", ",italic(n)," = ",.(n),", ",sigma," = ",.(sigma))),cex=2)
text(.5,0.14,bquote(paste("effect size = ",.(effect),", 'tail' of test = '", .(test),"'")),cex=2)
shade.norm(x=qalpha,sigma=sigma/sqrt(n),mu=0,tail="upper",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightyellow2",main=upper.titleu,xlim=xlim)
abline(v=qalpha,col=1)
legend("topright",pch=22,pt.bg="lightyellow2",pt.cex=2,legend=expression(alpha))
text(qalpha,0.5*dnorm(0,mean=0,sd=sigma/sqrt(n)),"\u2192 rejection region", adj = 0)
shade.norm(x=qpower,sigma=sigma/sqrt(n),mu=effect,tail="upper",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightcoral" ,main=lower.titleu,xlim=xlim)
abline(v=qpower,col=1)
legend("topright",pch=22,pt.bg="lightcoral",pt.cex=2,legend=expression(paste("1-",beta)))
text(qpower,0.5*dnorm(effect,mean=effect,sd=sigma/sqrt(n)),"\u2192 rejection region", adj = 0)
dev.flush()
}
if(test == "two"){
dev.hold()
powerp<-power.z.test(alpha=alpha,sigma=sigma,effect=effect,power=NULL,n=n,test="two.tail")$power
if(strict == TRUE)powerp<-power.z.test(alpha=alpha,sigma=sigma,effect=effect,power=NULL,n=n,test="two.tail",strict=TRUE)$power
power1<-round(powerp,3)
qalpha<-qnorm(1-(alpha/2),mean=0,sd=sigma/sqrt(n))
qpower<-qnorm(1-power.z.test(alpha=alpha,sigma=sigma,effect=effect,power=NULL,n=n,test="two.tail")$power,mean=effect,sd=sigma/sqrt(n))
plot(seq(0,1),seq(0,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
text(.5,0.8,bquote(paste(alpha," = ",.(alpha),", 1 - ",beta," = ", .(power1),", ",italic(n)," = ",.(n),", ",sigma," = ",.(sigma))),cex=2)
text(.5,0.14,bquote(paste("effect size = ",.(effect),", 'tail' of test = '", .(test),"'")),cex=2)
shade.norm(x=qalpha,sigma=sigma/sqrt(n),mu=0,tail="two",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightyellow2",main=upper.titleb,xlim=xlim)
abline(v=qalpha,col=1)
abline(v=-qalpha,col=1)
legend("topright",pch=22,pt.bg="lightyellow2",pt.cex=2,legend=expression(alpha))
text(qalpha,0.5*dnorm(0,mean=0,sd=sigma/sqrt(n)),"\u2192 rejection region", adj = 0)
text(-qalpha,0.5*dnorm(0,mean=0,sd=sigma/sqrt(n)),"rejection region \u2190", adj = 1)
shade.norm(x=qpower,sigma=sigma/sqrt(n),mu=effect,tail="two",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightcoral" ,main=lower.titleb,xlim=xlim)
abline(v=qpower,col=1)
abline(v=-qpower,col=1)
legend("topright",pch=22,pt.bg="lightcoral",pt.cex=2,legend=expression(paste("1-",beta)))
text(qpower,0.5*dnorm(effect,mean=effect,sd=sigma/sqrt(n)),"\u2192 rejection region", adj = 0)
text(-qpower,0.5*dnorm(0,mean=0,sd=sigma/sqrt(n)),"rejection region \u2190", adj = 1)
dev.flush()
}
}
see.power.tck<-function ()
{
if (!exists("slider.env"))
slider.env <- NULL; suppressWarnings(rm(slider.env)); slider.env <<- new.env()# Dummy to trick R CMD check
alpha <- 0.05
sigma <- 1.5
effect <-1
n <- 5
test <-tclVar("lower")
strict <- tclVar("FALSE")
assign("alpha", tclVar(alpha),envir= slider.env)
assign("sigma", tclVar(sigma),envir= slider.env)
assign("effect", tclVar(effect),envir= slider.env)
assign("n", tclVar(n),envir= slider.env)
xmin <- -3
assign("xmin", tclVar(xmin),envir= slider.env)
xmax <- 3
assign("xmax", tclVar(xmax),envir= slider.env)
norm.refresh <- function(...) {
alpha <- as.numeric(evalq(tclvalue(alpha),envir= slider.env))
sigma <- as.numeric(evalq(tclvalue(sigma),envir= slider.env))
n <- as.numeric(evalq(tclvalue(n),envir= slider.env))
effect <- as.numeric(evalq(tclvalue(effect),envir= slider.env))
xmin <- as.numeric(evalq(tclvalue(xmin),envir= slider.env))
xmax <- as.numeric(evalq(tclvalue(xmax),envir= slider.env))
xixa <- c(xmin, xmax)
test <- tclvalue(test)
strict <- tclvalue(strict)
see.power(alpha=alpha,sigma=sigma,n=n,effect=effect,xlim=xixa,test=test, strict=strict)
}
m <- tktoplevel()
tkwm.title(m, "Visualizing Power")
tkpack(tklabel(m,text=" Visualizing Power "))
tkwm.geometry(m, "+0+0")
tkpack(tklabel(m,text=" "))
tkpack(fr <- tkframe(m), side = "top", anchor = "w")
tkpack(fr1 <- tkframe(fr), side = "left",anchor = "w")
tkpack(fr2 <- tkframe(fr), side = "right", anchor = "e")
tkpack(tklabel(fr1, text = " Test: "), side = "left")
for ( i in c("lower", "upper", "two")){
tmp <- tkradiobutton(fr1, text=i, variable=test, value=i)
tkpack(tmp,anchor="w")
}
tkpack(tklabel(fr2, text = " Strict: "), side = "left")
for ( i in c("TRUE", "FALSE")){
tmp <- tkradiobutton(fr2, text=i, variable=strict, value=i)
tkpack(tmp,anchor="w")
}
tkpack(fr <- tkframe(m), side = "top")
tkpack(tklabel(fr, text = '\u03b1',font=c("Helvetica","10","italic"), width = "20"),
side = "right")
tkpack(sc <- tkscale(fr, command = norm.refresh, from = 0.01,
to = 1, orient = "horiz", resolution = 0.01, showvalue = TRUE),
side = "left")
assign("sc", sc,envir= slider.env)
evalq(tkconfigure(sc, variable = alpha),envir= slider.env)
tkpack(fr <- tkframe(m), side = "top")
tkpack(tklabel(fr, text = '\u03c3',font=c("Helvetica","10","italic"), width = "20"), side = "right")
tkpack(sc <- tkscale(fr, command = norm.refresh, from = 0.5,
to = 3, orient = "horiz", resolution = 0.1, showvalue = TRUE),
side = "left")
assign("sc", sc,envir= slider.env)
evalq(tkconfigure(sc, variable = sigma),envir= slider.env)
tkpack(fr <- tkframe(m), side = "top")
tkpack(tklabel(fr, text = "n",font=c("Helvetica","10","italic"), width = "20"), side = "right")
tkpack(sc <- tkscale(fr, command = norm.refresh, from = 1,
to = 20, orient = "horiz", resolution = 1, showvalue = TRUE),
side = "left")
assign("sc", sc,envir= slider.env)
evalq(tkconfigure(sc, variable = n),envir= slider.env)
tkpack(fr <- tkframe(m), side = "top")
tkpack(tklabel(fr, text = "Effect size", font=c("Helvetica","10"),width = "20"), side = "right")
tkpack(sc <- tkscale(fr, command = norm.refresh, from = 0,
to = 3, orient = "horiz", resolution = .1, showvalue = TRUE),
side = "left")
assign("sc", sc,envir= slider.env)
evalq(tkconfigure(sc, variable = effect),envir= slider.env)
tkpack(tklabel(m,text=" "))
tkpack(fr <- tkframe(m), side = "top")
tkpack(tklabel(fr, text = "Xmin:", width = 6), side = "left")
tkpack(e <- tkentry(fr, width = 8), side = "left")
assign("e", e,envir= slider.env)
evalq(tkconfigure(e, textvariable = xmin),envir= slider.env)
tkpack(tklabel(fr, text = "Xmax:", width = 6), side = "left")
tkpack(e <- tkentry(fr, width = 8), side = "left")
assign("e", e,envir= slider.env)
evalq(tkconfigure(e, textvariable = xmax),envir= slider.env)
tkpack(tkbutton(m, text = "Refresh", command = norm.refresh),
side = "left")
tkpack(tkbutton(m, text = "Exit", command = function() tkdestroy(m)),
side = "right")
}
| /R/see.power.r | no_license | neiljun/asbio | R | false | false | 10,092 | r | see.power<-function(alpha=NULL,sigma=NULL,n=NULL,effect=NULL,test="lower",xlim=c(-3,3),strict=FALSE){
upper.titlel<-bquote(paste("Distribution assuming ",H[0],": ",mu >= 0))
upper.titleu<-bquote(paste("Distribution assuming ",H[0],": ",mu <= 0))
upper.titleb<-bquote(paste("Distribution assuming ",H[0],": ",mu," = 0"))
lower.titlel<-bquote(paste("Distribution assuming ",H[A],": ",mu," < 0"))
lower.titleu<-bquote(paste("Distribution assuming ",H[A],": ",mu," > 0"))
lower.titleb<-bquote(paste("Distribution assuming ",H[A],": ",mu != 0))
effect=abs(effect)
layout(matrix(c(1,rep(2,2),rep(3,2)), 5, 1, byrow = TRUE))
par(mar=c(4, 4, 2, 1))
if(test == "lower"){
dev.hold()
powerp<-power.z.test(alpha=alpha,sigma=sigma,effect=effect,power=NULL,n=n,test="one.tail")$power
power1<-round(powerp,3)
qalpha<-qnorm(alpha,mean=0,sd=sigma/sqrt(n))
qpower<-qnorm(powerp,mean=-1*effect,sd=sigma/sqrt(n))
plot(seq(0,1),seq(0,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
text(.5,0.8,bquote(paste(alpha," = ",.(alpha),", 1 - ",beta," = ", .(power1),", ",italic(n)," = ",.(n),", ",sigma," = ",.(sigma))),cex=2)
text(.5,0.14,bquote(paste("effect size = ",.(effect),", 'tail' of test = '", .(test),"'")),cex=2)
shade.norm(x=qalpha,sigma=sigma/sqrt(n),mu=0,tail="lower",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightyellow2",main=upper.titlel,xlim=xlim)
abline(v=qalpha,col=1)
legend("topright",pch=22,pt.bg="lightyellow2",pt.cex=2,legend=expression(alpha))
text(qalpha,0.5*dnorm(0,mean=0,sd=sigma/sqrt(n)),"rejection region \u2190", adj = 1)
shade.norm(x=qpower,sigma=sigma/sqrt(n),mu=-1*effect,tail="lower",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightcoral" ,main=lower.titlel,xlim=xlim)
abline(v=qpower,col=1)
legend("topright",pch=22,pt.bg="lightcoral",pt.cex=2,legend=expression(paste("1-",beta)))
text(qpower,0.5*dnorm(effect,mean=effect,sd=sigma/sqrt(n)),"rejection region \u2190", adj = 1)
dev.flush()
}
if(test == "upper"){
dev.hold()
powerp<-power.z.test(alpha=alpha,sigma=sigma,effect=effect,power=NULL,n=n,test="one.tail")$power
power1<-round(powerp,3)
qalpha<-qnorm(1-alpha,mean=0,sd=sigma/sqrt(n))
qpower<-qnorm(1-powerp,mean=effect,sd=sigma/sqrt(n))
plot(seq(0,1),seq(0,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
text(.5,0.8,bquote(paste(alpha," = ",.(alpha),", 1 - ",beta," = ", .(power1),", ",italic(n)," = ",.(n),", ",sigma," = ",.(sigma))),cex=2)
text(.5,0.14,bquote(paste("effect size = ",.(effect),", 'tail' of test = '", .(test),"'")),cex=2)
shade.norm(x=qalpha,sigma=sigma/sqrt(n),mu=0,tail="upper",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightyellow2",main=upper.titleu,xlim=xlim)
abline(v=qalpha,col=1)
legend("topright",pch=22,pt.bg="lightyellow2",pt.cex=2,legend=expression(alpha))
text(qalpha,0.5*dnorm(0,mean=0,sd=sigma/sqrt(n)),"\u2192 rejection region", adj = 0)
shade.norm(x=qpower,sigma=sigma/sqrt(n),mu=effect,tail="upper",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightcoral" ,main=lower.titleu,xlim=xlim)
abline(v=qpower,col=1)
legend("topright",pch=22,pt.bg="lightcoral",pt.cex=2,legend=expression(paste("1-",beta)))
text(qpower,0.5*dnorm(effect,mean=effect,sd=sigma/sqrt(n)),"\u2192 rejection region", adj = 0)
dev.flush()
}
if(test == "two"){
dev.hold()
powerp<-power.z.test(alpha=alpha,sigma=sigma,effect=effect,power=NULL,n=n,test="two.tail")$power
if(strict == TRUE)powerp<-power.z.test(alpha=alpha,sigma=sigma,effect=effect,power=NULL,n=n,test="two.tail",strict=TRUE)$power
power1<-round(powerp,3)
qalpha<-qnorm(1-(alpha/2),mean=0,sd=sigma/sqrt(n))
qpower<-qnorm(1-power.z.test(alpha=alpha,sigma=sigma,effect=effect,power=NULL,n=n,test="two.tail")$power,mean=effect,sd=sigma/sqrt(n))
plot(seq(0,1),seq(0,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
text(.5,0.8,bquote(paste(alpha," = ",.(alpha),", 1 - ",beta," = ", .(power1),", ",italic(n)," = ",.(n),", ",sigma," = ",.(sigma))),cex=2)
text(.5,0.14,bquote(paste("effect size = ",.(effect),", 'tail' of test = '", .(test),"'")),cex=2)
shade.norm(x=qalpha,sigma=sigma/sqrt(n),mu=0,tail="two",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightyellow2",main=upper.titleb,xlim=xlim)
abline(v=qalpha,col=1)
abline(v=-qalpha,col=1)
legend("topright",pch=22,pt.bg="lightyellow2",pt.cex=2,legend=expression(alpha))
text(qalpha,0.5*dnorm(0,mean=0,sd=sigma/sqrt(n)),"\u2192 rejection region", adj = 0)
text(-qalpha,0.5*dnorm(0,mean=0,sd=sigma/sqrt(n)),"rejection region \u2190", adj = 1)
shade.norm(x=qpower,sigma=sigma/sqrt(n),mu=effect,tail="two",show.p=FALSE,show.d=FALSE,show.dist=TRUE,shade.col="lightcoral" ,main=lower.titleb,xlim=xlim)
abline(v=qpower,col=1)
abline(v=-qpower,col=1)
legend("topright",pch=22,pt.bg="lightcoral",pt.cex=2,legend=expression(paste("1-",beta)))
text(qpower,0.5*dnorm(effect,mean=effect,sd=sigma/sqrt(n)),"\u2192 rejection region", adj = 0)
text(-qpower,0.5*dnorm(0,mean=0,sd=sigma/sqrt(n)),"rejection region \u2190", adj = 1)
dev.flush()
}
}
see.power.tck<-function ()
{
if (!exists("slider.env"))
slider.env <- NULL; suppressWarnings(rm(slider.env)); slider.env <<- new.env()# Dummy to trick R CMD check
alpha <- 0.05
sigma <- 1.5
effect <-1
n <- 5
test <-tclVar("lower")
strict <- tclVar("FALSE")
assign("alpha", tclVar(alpha),envir= slider.env)
assign("sigma", tclVar(sigma),envir= slider.env)
assign("effect", tclVar(effect),envir= slider.env)
assign("n", tclVar(n),envir= slider.env)
xmin <- -3
assign("xmin", tclVar(xmin),envir= slider.env)
xmax <- 3
assign("xmax", tclVar(xmax),envir= slider.env)
norm.refresh <- function(...) {
alpha <- as.numeric(evalq(tclvalue(alpha),envir= slider.env))
sigma <- as.numeric(evalq(tclvalue(sigma),envir= slider.env))
n <- as.numeric(evalq(tclvalue(n),envir= slider.env))
effect <- as.numeric(evalq(tclvalue(effect),envir= slider.env))
xmin <- as.numeric(evalq(tclvalue(xmin),envir= slider.env))
xmax <- as.numeric(evalq(tclvalue(xmax),envir= slider.env))
xixa <- c(xmin, xmax)
test <- tclvalue(test)
strict <- tclvalue(strict)
see.power(alpha=alpha,sigma=sigma,n=n,effect=effect,xlim=xixa,test=test, strict=strict)
}
m <- tktoplevel()
tkwm.title(m, "Visualizing Power")
tkpack(tklabel(m,text=" Visualizing Power "))
tkwm.geometry(m, "+0+0")
tkpack(tklabel(m,text=" "))
tkpack(fr <- tkframe(m), side = "top", anchor = "w")
tkpack(fr1 <- tkframe(fr), side = "left",anchor = "w")
tkpack(fr2 <- tkframe(fr), side = "right", anchor = "e")
tkpack(tklabel(fr1, text = " Test: "), side = "left")
for ( i in c("lower", "upper", "two")){
tmp <- tkradiobutton(fr1, text=i, variable=test, value=i)
tkpack(tmp,anchor="w")
}
tkpack(tklabel(fr2, text = " Strict: "), side = "left")
for ( i in c("TRUE", "FALSE")){
tmp <- tkradiobutton(fr2, text=i, variable=strict, value=i)
tkpack(tmp,anchor="w")
}
tkpack(fr <- tkframe(m), side = "top")
tkpack(tklabel(fr, text = '\u03b1',font=c("Helvetica","10","italic"), width = "20"),
side = "right")
tkpack(sc <- tkscale(fr, command = norm.refresh, from = 0.01,
to = 1, orient = "horiz", resolution = 0.01, showvalue = TRUE),
side = "left")
assign("sc", sc,envir= slider.env)
evalq(tkconfigure(sc, variable = alpha),envir= slider.env)
tkpack(fr <- tkframe(m), side = "top")
tkpack(tklabel(fr, text = '\u03c3',font=c("Helvetica","10","italic"), width = "20"), side = "right")
tkpack(sc <- tkscale(fr, command = norm.refresh, from = 0.5,
to = 3, orient = "horiz", resolution = 0.1, showvalue = TRUE),
side = "left")
assign("sc", sc,envir= slider.env)
evalq(tkconfigure(sc, variable = sigma),envir= slider.env)
tkpack(fr <- tkframe(m), side = "top")
tkpack(tklabel(fr, text = "n",font=c("Helvetica","10","italic"), width = "20"), side = "right")
tkpack(sc <- tkscale(fr, command = norm.refresh, from = 1,
to = 20, orient = "horiz", resolution = 1, showvalue = TRUE),
side = "left")
assign("sc", sc,envir= slider.env)
evalq(tkconfigure(sc, variable = n),envir= slider.env)
tkpack(fr <- tkframe(m), side = "top")
tkpack(tklabel(fr, text = "Effect size", font=c("Helvetica","10"),width = "20"), side = "right")
tkpack(sc <- tkscale(fr, command = norm.refresh, from = 0,
to = 3, orient = "horiz", resolution = .1, showvalue = TRUE),
side = "left")
assign("sc", sc,envir= slider.env)
evalq(tkconfigure(sc, variable = effect),envir= slider.env)
tkpack(tklabel(m,text=" "))
tkpack(fr <- tkframe(m), side = "top")
tkpack(tklabel(fr, text = "Xmin:", width = 6), side = "left")
tkpack(e <- tkentry(fr, width = 8), side = "left")
assign("e", e,envir= slider.env)
evalq(tkconfigure(e, textvariable = xmin),envir= slider.env)
tkpack(tklabel(fr, text = "Xmax:", width = 6), side = "left")
tkpack(e <- tkentry(fr, width = 8), side = "left")
assign("e", e,envir= slider.env)
evalq(tkconfigure(e, textvariable = xmax),envir= slider.env)
tkpack(tkbutton(m, text = "Refresh", command = norm.refresh),
side = "left")
tkpack(tkbutton(m, text = "Exit", command = function() tkdestroy(m)),
side = "right")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psrwe_survlrk.R
\name{rwe_lrk}
\alias{rwe_lrk}
\title{Log-rank Estimation}
\usage{
rwe_lrk(
dta_cur,
dta_ext,
dta_cur_trt,
n_borrow = 0,
pred_tps = NULL,
stderr_method = "naive"
)
}
\arguments{
\item{dta_cur}{Matrix of time and event from a PS stratum in current study
(control arm only)}
\item{dta_ext}{Matrix of time and event from a PS stratum in external data
source (control arm only)}
\item{dta_cur_trt}{Matrix of time and event from a PS stratum in current
study (treatment arm only)}
\item{n_borrow}{Number of subjects to be borrowed}
\item{pred_tps}{All time points of events (unique and sorted)}
\item{stderr_method}{Method for computing StdErr (available for naive only)}
}
\value{
Estimation of log-rank estimates at time \code{pred_tps}
}
\description{
Estimate log-rank estimates for a single PS
stratum
}
| /man/rwe_lrk.Rd | no_license | olssol/psrwe | R | false | true | 915 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psrwe_survlrk.R
\name{rwe_lrk}
\alias{rwe_lrk}
\title{Log-rank Estimation}
\usage{
rwe_lrk(
dta_cur,
dta_ext,
dta_cur_trt,
n_borrow = 0,
pred_tps = NULL,
stderr_method = "naive"
)
}
\arguments{
\item{dta_cur}{Matrix of time and event from a PS stratum in current study
(control arm only)}
\item{dta_ext}{Matrix of time and event from a PS stratum in external data
source (control arm only)}
\item{dta_cur_trt}{Matrix of time and event from a PS stratum in current
study (treatment arm only)}
\item{n_borrow}{Number of subjects to be borrowed}
\item{pred_tps}{All time points of events (unique and sorted)}
\item{stderr_method}{Method for computing StdErr (available for naive only)}
}
\value{
Estimation of log-rank estimates at time \code{pred_tps}
}
\description{
Estimate log-rank estimates for a single PS
stratum
}
|
cfaed8b7bbb3ec10b047c865add7c3e1 fpu-10Xh-correct04-nonuniform-depth-1.qdimacs 51573 135333 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Miller-Marin/fpu/fpu-10Xh-correct04-nonuniform-depth-1/fpu-10Xh-correct04-nonuniform-depth-1.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 91 | r | cfaed8b7bbb3ec10b047c865add7c3e1 fpu-10Xh-correct04-nonuniform-depth-1.qdimacs 51573 135333 |
trn.bal <- read.table("D:/Acads/8th sem/ime672/final/final_train.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
tst <- read.table("D:/Acads/8th sem/ime672/final/final_test.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
smp_size <- floor(0.75 * nrow(trn.bal))
set.seed(1)
train_ind <- sample(seq_len(nrow(trn.bal)), size = smp_size)
train <- trn.bal[train_ind, ]
test <- trn.bal[-train_ind, ]
#install.packages("gbm")
require(gbm)
gbmFit.ada = gbm(formula = train$TARGET ~.,
distribution = 'adaboost',
data = train,
n.trees = 5, #the number of trees in the model
interaction.depth = 4, #each tree will evaluate 4 decisions
n.minobsinnode = 2, #higher means more conservative fit
shrinkage = .01, #the learning rate
bag.fraction = 1, #subsampling fraction
train.fraction = 1, #fraction of data for training
cv.folds = 5) #running five-fold cross-validation
pred <- predict(gbmFit.ada, test, n.trees = 5, type = 'response')
require(ROCR)
pred2 <- prediction(pred, test$TARGET)
(auc <- performance(pred2, 'auc'))
roc = performance(pred2, measure = "tpr", x.measure = "fpr")
plot(roc)
abline(a=0,b=1)
require(caret)
confusionMatrix(as.numeric(pred>0.5), test$TARGET)
predtest <- predict(gbmFit.ada, tst, type = "response")
| /adaboost.R | no_license | AnupaS/project | R | false | false | 1,489 | r | trn.bal <- read.table("D:/Acads/8th sem/ime672/final/final_train.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
tst <- read.table("D:/Acads/8th sem/ime672/final/final_test.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
smp_size <- floor(0.75 * nrow(trn.bal))
set.seed(1)
train_ind <- sample(seq_len(nrow(trn.bal)), size = smp_size)
train <- trn.bal[train_ind, ]
test <- trn.bal[-train_ind, ]
#install.packages("gbm")
require(gbm)
gbmFit.ada = gbm(formula = train$TARGET ~.,
distribution = 'adaboost',
data = train,
n.trees = 5, #the number of trees in the model
interaction.depth = 4, #each tree will evaluate 4 decisions
n.minobsinnode = 2, #higher means more conservative fit
shrinkage = .01, #the learning rate
bag.fraction = 1, #subsampling fraction
train.fraction = 1, #fraction of data for training
cv.folds = 5) #running five-fold cross-validation
pred <- predict(gbmFit.ada, test, n.trees = 5, type = 'response')
require(ROCR)
pred2 <- prediction(pred, test$TARGET)
(auc <- performance(pred2, 'auc'))
roc = performance(pred2, measure = "tpr", x.measure = "fpr")
plot(roc)
abline(a=0,b=1)
require(caret)
confusionMatrix(as.numeric(pred>0.5), test$TARGET)
predtest <- predict(gbmFit.ada, tst, type = "response")
|
save_d3_html <- function(d3,f_path,...){
r2d3::save_d3_html(d3,here::here(d3_html_file(f_path)),libdir = "lib",...)
}
| /R/d3_write.R | no_license | zac-garland/d3.learn | R | false | false | 125 | r | save_d3_html <- function(d3,f_path,...){
r2d3::save_d3_html(d3,here::here(d3_html_file(f_path)),libdir = "lib",...)
}
|
rankhospital <- function(state,outcome,num ="best"){
## Read outcome data
## need to read columns related to 30-day mortality for the specified outcome (โheart attackโ, โheart failureโ, or โpneumoniaโ)
## [2] "Hospital.Name"
## [7] "State"
## [11] "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
## [17] "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure"
## [23] "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"
outcomeVector <- c('heart attack'=11,'heart failure'=17,'pneumonia'=23)
outcome <- outcomeVector[outcome]
## read data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state is valid
## a NA in value causes that level to be removed from the levels and the elements formerly with that level to be replaced by NA.
## Test if shorter vectors are in longer vectors
if( !( state %in% levels(factor(data$State))))
{
stop("invalid state")
}
## Check that outcome is valid
if (! (outcome %in% outcomeVector))
{
stop("invalid outcome")
}
## Check whether num is a number or a valid string
if (class(num) == "character"){
if (!(num=="best" || num=="worst"))
stop("invalid num value")
}
## Return hospital name in that state with lowest 30-day death
## transform data for the input outcome
data[,outcome] <- as.numeric(as.character(data[,outcome]))
data<-na.omit(data)
##write.table(data,file='result.txt',sep='\t')
## retrieve data for the input state
filteredPerState <- subset(data, State == state)
##write.table(filteredPerState,file='result2.txt',sep='\t')
filteredPerState <- filteredPerState[order(filteredPerState[,outcome],filteredPerState[,'Hospital.Name'],na.last=TRUE),'Hospital.Name']
##write.table(na.omit(filteredPerState),file='result3.txt',sep='\t')
if (num == 'best')
return(filteredPerState[1])
else if (num == 'worst')
return(filteredPerState[length(filteredPerState)])
else if (length(filteredPerState) < num)
return(NA)
else
return (filteredPerState[num])
} | /Week 4/rankhospital.R | no_license | rentze/R-programming | R | false | false | 2,133 | r | rankhospital <- function(state,outcome,num ="best"){
## Read outcome data
## need to read columns related to 30-day mortality for the specified outcome (โheart attackโ, โheart failureโ, or โpneumoniaโ)
## [2] "Hospital.Name"
## [7] "State"
## [11] "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
## [17] "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure"
## [23] "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"
outcomeVector <- c('heart attack'=11,'heart failure'=17,'pneumonia'=23)
outcome <- outcomeVector[outcome]
## read data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state is valid
## a NA in value causes that level to be removed from the levels and the elements formerly with that level to be replaced by NA.
## Test if shorter vectors are in longer vectors
if( !( state %in% levels(factor(data$State))))
{
stop("invalid state")
}
## Check that outcome is valid
if (! (outcome %in% outcomeVector))
{
stop("invalid outcome")
}
## Check whether num is a number or a valid string
if (class(num) == "character"){
if (!(num=="best" || num=="worst"))
stop("invalid num value")
}
## Return hospital name in that state with lowest 30-day death
## transform data for the input outcome
data[,outcome] <- as.numeric(as.character(data[,outcome]))
data<-na.omit(data)
##write.table(data,file='result.txt',sep='\t')
## retrieve data for the input state
filteredPerState <- subset(data, State == state)
##write.table(filteredPerState,file='result2.txt',sep='\t')
filteredPerState <- filteredPerState[order(filteredPerState[,outcome],filteredPerState[,'Hospital.Name'],na.last=TRUE),'Hospital.Name']
##write.table(na.omit(filteredPerState),file='result3.txt',sep='\t')
if (num == 'best')
return(filteredPerState[1])
else if (num == 'worst')
return(filteredPerState[length(filteredPerState)])
else if (length(filteredPerState) < num)
return(NA)
else
return (filteredPerState[num])
} |
APXS <- commandArgs(trailingOnly=TRUE)[1]
HTTPD <- commandArgs(trailingOnly=TRUE)[2]
options(warn=-1)
NextAvailablePort <- function(){
start <- 8181
while(TRUE){
if (start >= 65536) return(0)
con <- try(socketConnection(port=start),silent=TRUE)
if (inherits(con,'try-error')){
return(start)
}
close(con)
start <- start + 1
}
}
#
# Variables that will get replaced in httpd.conf.in
#
DOCROOT <- paste(getwd(),'/test',sep='')
PORT <- NextAvailablePort()
BREWINSTALLED <- 'brew' %in% .packages(all.available=TRUE)
unlink('test/httpd.conf')
con <- file('test/httpd.conf',open='w+')
lines <- readLines('test/httpd.conf.in')
if (BREWINSTALLED){
lines <- append(lines,c(
'<Directory @DOCROOT@/brew>\n',
' SetHandler r-script\n',
' RHandler brew::brew\n',
'</Directory>\n') )
}
lines <- gsub('@PORT@',PORT,lines)
lines <- gsub('@DOCROOT@',DOCROOT,lines)
writeLines(lines,con)
close(con)
unlink('test/confs/load_modules')
# Test if dir module is compiled into httpd
if (length(grep('dir',readLines(pipe(paste(HTTPD,'-l')))))==0){
# No, we need to add it. grab LIBEXECDIR
con <- file('test/confs/load_modules',open='a')
libexecdir <- readLines(pipe(paste(APXS,'-q LIBEXECDIR')))[1]
cat('LoadModule dir_module ',libexecdir,'/mod_dir.so\n',sep='',file=con)
close(con)
} else {
print('mod_dir built in')
}
# Test if log_config module is compiled into httpd
if (length(grep('log_config',readLines(pipe(paste(HTTPD,'-l')))))==0){
# No, we need to add it. grab LIBEXECDIR
con <- file('test/confs/load_modules',open='a')
libexecdir <- readLines(pipe(paste(APXS,'-q LIBEXECDIR')))[1]
cat('LoadModule log_config_module ',libexecdir,'/mod_log_config.so\n',sep='',file=con)
close(con)
} else {
print('mod_log_config built in')
}
# Test if mime module is compiled into httpd
if (length(grep('mime',readLines(pipe(paste(HTTPD,'-l')))))==0){
# No, we need to add it. grab LIBEXECDIR
con <- file('test/confs/load_modules',open='a')
libexecdir <- readLines(pipe(paste(APXS,'-q LIBEXECDIR')))[1]
cat('LoadModule mime_module ',libexecdir,'/mod_mime.so\n',sep='',file=con)
close(con)
} else {
print('mod_mime built in')
}
| /tools/config_http.R | permissive | jeffreyhorner/rapache | R | false | false | 2,144 | r | APXS <- commandArgs(trailingOnly=TRUE)[1]
HTTPD <- commandArgs(trailingOnly=TRUE)[2]
options(warn=-1)
NextAvailablePort <- function(){
start <- 8181
while(TRUE){
if (start >= 65536) return(0)
con <- try(socketConnection(port=start),silent=TRUE)
if (inherits(con,'try-error')){
return(start)
}
close(con)
start <- start + 1
}
}
#
# Variables that will get replaced in httpd.conf.in
#
DOCROOT <- paste(getwd(),'/test',sep='')
PORT <- NextAvailablePort()
BREWINSTALLED <- 'brew' %in% .packages(all.available=TRUE)
unlink('test/httpd.conf')
con <- file('test/httpd.conf',open='w+')
lines <- readLines('test/httpd.conf.in')
if (BREWINSTALLED){
lines <- append(lines,c(
'<Directory @DOCROOT@/brew>\n',
' SetHandler r-script\n',
' RHandler brew::brew\n',
'</Directory>\n') )
}
lines <- gsub('@PORT@',PORT,lines)
lines <- gsub('@DOCROOT@',DOCROOT,lines)
writeLines(lines,con)
close(con)
unlink('test/confs/load_modules')
# Test if dir module is compiled into httpd
if (length(grep('dir',readLines(pipe(paste(HTTPD,'-l')))))==0){
# No, we need to add it. grab LIBEXECDIR
con <- file('test/confs/load_modules',open='a')
libexecdir <- readLines(pipe(paste(APXS,'-q LIBEXECDIR')))[1]
cat('LoadModule dir_module ',libexecdir,'/mod_dir.so\n',sep='',file=con)
close(con)
} else {
print('mod_dir built in')
}
# Test if log_config module is compiled into httpd
if (length(grep('log_config',readLines(pipe(paste(HTTPD,'-l')))))==0){
# No, we need to add it. grab LIBEXECDIR
con <- file('test/confs/load_modules',open='a')
libexecdir <- readLines(pipe(paste(APXS,'-q LIBEXECDIR')))[1]
cat('LoadModule log_config_module ',libexecdir,'/mod_log_config.so\n',sep='',file=con)
close(con)
} else {
print('mod_log_config built in')
}
# Test if mime module is compiled into httpd
if (length(grep('mime',readLines(pipe(paste(HTTPD,'-l')))))==0){
# No, we need to add it. grab LIBEXECDIR
con <- file('test/confs/load_modules',open='a')
libexecdir <- readLines(pipe(paste(APXS,'-q LIBEXECDIR')))[1]
cat('LoadModule mime_module ',libexecdir,'/mod_mime.so\n',sep='',file=con)
close(con)
} else {
print('mod_mime built in')
}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% ParameterCelSet.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{ParameterCelSet}
\docType{class}
\alias{ParameterCelSet}
\title{The ParameterCelSet class}
\description{
Package: aroma.affymetrix \cr
\bold{Class ParameterCelSet}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[R.filesets]{FullNameInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[R.filesets]{GenericDataFileSet}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.core]{AromaMicroarrayDataSet}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.core]{AromaPlatformInterface}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{AffymetrixFileSet}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{AffymetrixCelSet}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.core]{ParametersInterface}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{ParameterCelSet}\cr
\bold{Directly known subclasses:}\cr
\link[aroma.affymetrix]{ChipEffectSet}, \link[aroma.affymetrix]{CnChipEffectSet}, \link[aroma.affymetrix]{ExonChipEffectSet}, \link[aroma.affymetrix]{FirmaSet}, \link[aroma.affymetrix]{SnpChipEffectSet}\cr
public static class \bold{ParameterCelSet}\cr
extends \link[aroma.core]{ParametersInterface}\cr
A ParameterCelSet object represents a set of \code{\link{ParameterCelFile}}:s.
}
\usage{
ParameterCelSet(...)
}
\arguments{
\item{...}{Arguments passed to \code{\link{AffymetrixCelSet}}.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{extractDataFrame} \tab -\cr
\tab \code{extractMatrix} \tab -\cr
}
\bold{Methods inherited from ParametersInterface}:\cr
getParameterSets, getParameters, getParametersAsString
\bold{Methods inherited from AffymetrixCelSet}:\cr
append, as, as.AffymetrixCelSet, as.character, averageQuantile, byName, byPath, clone, convertToUnique, doCRMAv1, doCRMAv2, doFIRMA, doGCRMA, doRMA, extractAffyBatch, extractFeatureSet, extractMatrix, extractSnpFeatureSet, findByName, getAverage, getAverageAsinh, getAverageFile, getAverageLog, getCdf, getChipType, getData, getIntensities, getPlatform, getTimestamps, getUnitGroupCellMap, getUnitIntensities, getUnitNamesFile, getUnitTypesFile, isDuplicated, justRMA, justSNPRMA, nbrOfArrays, normalizeQuantile, plotDensity, range, readUnits, setCdf, update2, writeSgr
\bold{Methods inherited from AffymetrixFileSet}:\cr
as, as.AffymetrixFileSet, byPath, getDefaultFullName
\bold{Methods inherited from AromaPlatformInterface}:\cr
getAromaPlatform, getAromaUflFile, getAromaUgpFile, getChipType, getPlatform, getUnitAnnotationDataFile, getUnitNamesFile, getUnitTypesFile, isCompatibleWith
\bold{Methods inherited from AromaMicroarrayDataSet}:\cr
as.AromaMicroarrayDataSetList, as.AromaMicroarrayDataSetTuple, getAromaFullNameTranslatorSet, getAverageFile, getChipType, getDefaultFullName, getPlatform, setAttributesBy, setAttributesBySampleAnnotationFile, setAttributesBySampleAnnotationSet, validate
\bold{Methods inherited from GenericDataFileSet}:\cr
[, [[, anyDuplicated, anyNA, append, appendFiles, appendFullNamesTranslator, appendFullNamesTranslatorByNULL, appendFullNamesTranslatorByTabularTextFile, appendFullNamesTranslatorByTabularTextFileSet, appendFullNamesTranslatorBydata.frame, appendFullNamesTranslatorByfunction, appendFullNamesTranslatorBylist, as.character, as.list, byName, byPath, c, clearCache, clearFullNamesTranslator, clone, copyTo, dsApply, dsApplyInPairs, duplicated, equals, extract, findByName, findDuplicated, getChecksum, getChecksumFileSet, getChecksumObjects, getDefaultFullName, getFile, getFileClass, getFileSize, getFiles, getFullNames, getNames, getOneFile, getPath, getPathnames, getSubdirs, gunzip, gzip, hasFile, indexOf, is.na, names, nbrOfFiles, rep, resetFullNames, setFullNamesTranslator, sortBy, unique, update2, updateFullName, updateFullNames, validate, getFullNameTranslatorSet, getParentName
\bold{Methods inherited from FullNameInterface}:\cr
appendFullNameTranslator, appendFullNameTranslatorByNULL, appendFullNameTranslatorByTabularTextFile, appendFullNameTranslatorByTabularTextFileSet, appendFullNameTranslatorBycharacter, appendFullNameTranslatorBydata.frame, appendFullNameTranslatorByfunction, appendFullNameTranslatorBylist, clearFullNameTranslator, clearListOfFullNameTranslators, getDefaultFullName, getFullName, getFullNameTranslator, getListOfFullNameTranslators, getName, getTags, hasTag, hasTags, resetFullName, setFullName, setFullNameTranslator, setListOfFullNameTranslators, setName, setTags, updateFullName
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, names, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
\keyword{IO}
| /man/ParameterCelSet.Rd | no_license | MimoriK/aroma.affymetrix | R | false | false | 5,417 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% ParameterCelSet.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{ParameterCelSet}
\docType{class}
\alias{ParameterCelSet}
\title{The ParameterCelSet class}
\description{
Package: aroma.affymetrix \cr
\bold{Class ParameterCelSet}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[R.filesets]{FullNameInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[R.filesets]{GenericDataFileSet}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.core]{AromaMicroarrayDataSet}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.core]{AromaPlatformInterface}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{AffymetrixFileSet}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{AffymetrixCelSet}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.core]{ParametersInterface}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{ParameterCelSet}\cr
\bold{Directly known subclasses:}\cr
\link[aroma.affymetrix]{ChipEffectSet}, \link[aroma.affymetrix]{CnChipEffectSet}, \link[aroma.affymetrix]{ExonChipEffectSet}, \link[aroma.affymetrix]{FirmaSet}, \link[aroma.affymetrix]{SnpChipEffectSet}\cr
public static class \bold{ParameterCelSet}\cr
extends \link[aroma.core]{ParametersInterface}\cr
A ParameterCelSet object represents a set of \code{\link{ParameterCelFile}}:s.
}
\usage{
ParameterCelSet(...)
}
\arguments{
\item{...}{Arguments passed to \code{\link{AffymetrixCelSet}}.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{extractDataFrame} \tab -\cr
\tab \code{extractMatrix} \tab -\cr
}
\bold{Methods inherited from ParametersInterface}:\cr
getParameterSets, getParameters, getParametersAsString
\bold{Methods inherited from AffymetrixCelSet}:\cr
append, as, as.AffymetrixCelSet, as.character, averageQuantile, byName, byPath, clone, convertToUnique, doCRMAv1, doCRMAv2, doFIRMA, doGCRMA, doRMA, extractAffyBatch, extractFeatureSet, extractMatrix, extractSnpFeatureSet, findByName, getAverage, getAverageAsinh, getAverageFile, getAverageLog, getCdf, getChipType, getData, getIntensities, getPlatform, getTimestamps, getUnitGroupCellMap, getUnitIntensities, getUnitNamesFile, getUnitTypesFile, isDuplicated, justRMA, justSNPRMA, nbrOfArrays, normalizeQuantile, plotDensity, range, readUnits, setCdf, update2, writeSgr
\bold{Methods inherited from AffymetrixFileSet}:\cr
as, as.AffymetrixFileSet, byPath, getDefaultFullName
\bold{Methods inherited from AromaPlatformInterface}:\cr
getAromaPlatform, getAromaUflFile, getAromaUgpFile, getChipType, getPlatform, getUnitAnnotationDataFile, getUnitNamesFile, getUnitTypesFile, isCompatibleWith
\bold{Methods inherited from AromaMicroarrayDataSet}:\cr
as.AromaMicroarrayDataSetList, as.AromaMicroarrayDataSetTuple, getAromaFullNameTranslatorSet, getAverageFile, getChipType, getDefaultFullName, getPlatform, setAttributesBy, setAttributesBySampleAnnotationFile, setAttributesBySampleAnnotationSet, validate
\bold{Methods inherited from GenericDataFileSet}:\cr
[, [[, anyDuplicated, anyNA, append, appendFiles, appendFullNamesTranslator, appendFullNamesTranslatorByNULL, appendFullNamesTranslatorByTabularTextFile, appendFullNamesTranslatorByTabularTextFileSet, appendFullNamesTranslatorBydata.frame, appendFullNamesTranslatorByfunction, appendFullNamesTranslatorBylist, as.character, as.list, byName, byPath, c, clearCache, clearFullNamesTranslator, clone, copyTo, dsApply, dsApplyInPairs, duplicated, equals, extract, findByName, findDuplicated, getChecksum, getChecksumFileSet, getChecksumObjects, getDefaultFullName, getFile, getFileClass, getFileSize, getFiles, getFullNames, getNames, getOneFile, getPath, getPathnames, getSubdirs, gunzip, gzip, hasFile, indexOf, is.na, names, nbrOfFiles, rep, resetFullNames, setFullNamesTranslator, sortBy, unique, update2, updateFullName, updateFullNames, validate, getFullNameTranslatorSet, getParentName
\bold{Methods inherited from FullNameInterface}:\cr
appendFullNameTranslator, appendFullNameTranslatorByNULL, appendFullNameTranslatorByTabularTextFile, appendFullNameTranslatorByTabularTextFileSet, appendFullNameTranslatorBycharacter, appendFullNameTranslatorBydata.frame, appendFullNameTranslatorByfunction, appendFullNameTranslatorBylist, clearFullNameTranslator, clearListOfFullNameTranslators, getDefaultFullName, getFullName, getFullNameTranslator, getListOfFullNameTranslators, getName, getTags, hasTag, hasTags, resetFullName, setFullName, setFullNameTranslator, setListOfFullNameTranslators, setName, setTags, updateFullName
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, names, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
\keyword{IO}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GridData.R
\name{Grid}
\alias{Grid}
\title{Create a new \code{Grid} object}
\usage{
Grid(levels, nlevels = NULL, dimNames = NULL, index = NULL)
}
\arguments{
\item{levels}{A list with the levels of the variables. The length
of the list gives the spatial dimension. The \eqn{i}-th element
must be a numeric vector containing the distinct values of the
corresponding variable.}
\item{nlevels}{Integer vector giving the number of levels by
dimension. This formal argument is used only when \code{levels} is
missing. In this case, the nodes for dimension \eqn{i} will be
regularly spaced between \code{0.0} and \code{1.0}.}
\item{dimNames}{If the list \code{levels} does not have names,
then the character vector \code{dimNames} will be used.}
\item{index}{An array with integer values and dimension
corresponding to the spatial dimensions. Each value represent the
number of the corresponding node in a "flat" representation
(data.frame or matrix), and will be used in tasks such as
interpolation.}
}
\value{
An object with S4 class \code{"Grid"}. If needed, this
object can be coerced to a data frame or to a matrix by using the
methods \code{as.data.frame} or \code{as.matrix}.
}
\description{
Create a Grid object.
}
\note{
If \code{index} is not provided, the vectors in the
\code{levels} list can be unsorted, and they will be
sorted. However, when a non-default \code{index} is provided, the
levels must be sorted. This rule is intended to reduce the risk of
error.
}
\examples{
myGD1 <- Grid(levels = list("X" = c(0, 1), "Y" = c(0.0, 0.5, 1.0)))
## the same with less effort
myGD2 <- Grid(nlevels = c("X" = 2, "Y" = 3))
nlevels(myGD2)
levels(myGD2)
}
\author{
Yves Deville
}
| /man/Grid.Rd | no_license | changhw/smint | R | false | true | 1,762 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GridData.R
\name{Grid}
\alias{Grid}
\title{Create a new \code{Grid} object}
\usage{
Grid(levels, nlevels = NULL, dimNames = NULL, index = NULL)
}
\arguments{
\item{levels}{A list with the levels of the variables. The length
of the list gives the spatial dimension. The \eqn{i}-th element
must be a numeric vector containing the distinct values of the
corresponding variable.}
\item{nlevels}{Integer vector giving the number of levels by
dimension. This formal argument is used only when \code{levels} is
missing. In this case, the nodes for dimension \eqn{i} will be
regularly spaced between \code{0.0} and \code{1.0}.}
\item{dimNames}{If the list \code{levels} does not have names,
then the character vector \code{dimNames} will be used.}
\item{index}{An array with integer values and dimension
corresponding to the spatial dimensions. Each value represent the
number of the corresponding node in a "flat" representation
(data.frame or matrix), and will be used in tasks such as
interpolation.}
}
\value{
An object with S4 class \code{"Grid"}. If needed, this
object can be coerced to a data frame or to a matrix by using the
methods \code{as.data.frame} or \code{as.matrix}.
}
\description{
Create a Grid object.
}
\note{
If \code{index} is not provided, the vectors in the
\code{levels} list can be unsorted, and they will be
sorted. However, when a non-default \code{index} is provided, the
levels must be sorted. This rule is intended to reduce the risk of
error.
}
\examples{
myGD1 <- Grid(levels = list("X" = c(0, 1), "Y" = c(0.0, 0.5, 1.0)))
## the same with less effort
myGD2 <- Grid(nlevels = c("X" = 2, "Y" = 3))
nlevels(myGD2)
levels(myGD2)
}
\author{
Yves Deville
}
|
# source("http://bioconductor.org/biocLite.R");
# biocLite("Heatplus");
library(Heatplus); #load Heatplus package into R
library(adimpro)
library(pixmap)
logo = read.pnm("spelman.pbm")
m = matrix(logo@grey, logo@size[1], logo@size[2])
image(m, col=gray(0:10/10), main="original") #this is the original image
m2 = m[sample(1:g@size[1], g@size[1]), ]
image(m2, col=gray(0:10/10), main="shuffled") #this is the shuffled image
h = hclust(dist(m2))
m3 = m2[h$order, ]
image(m3, col=gray(0:10/10), main="hclust result")
mymethod = 'ward';
heatmap_2(m, do.dendro = c(T, FALSE), legend = 1,
#legfrac = 10,
hclustfun = function(c) hclust( c, method= mymethod),
col = gray(0:10/10),
)
heatmap_2(m); #A simple heat map
heatmap_2(m, legend = 1 ) #Add a figure legend
heatmap_2(m, legend = 1, col = RGBColVec(64) ) #customerize the color
############
############ END
############
| /pics-hclust-demo/spelman_logo/spelLogo_key.R | no_license | hongqin/ORAU-R | R | false | false | 913 | r | # source("http://bioconductor.org/biocLite.R");
# biocLite("Heatplus");
library(Heatplus); #load Heatplus package into R
library(adimpro)
library(pixmap)
logo = read.pnm("spelman.pbm")
m = matrix(logo@grey, logo@size[1], logo@size[2])
image(m, col=gray(0:10/10), main="original") #this is the original image
m2 = m[sample(1:g@size[1], g@size[1]), ]
image(m2, col=gray(0:10/10), main="shuffled") #this is the shuffled image
h = hclust(dist(m2))
m3 = m2[h$order, ]
image(m3, col=gray(0:10/10), main="hclust result")
mymethod = 'ward';
heatmap_2(m, do.dendro = c(T, FALSE), legend = 1,
#legfrac = 10,
hclustfun = function(c) hclust( c, method= mymethod),
col = gray(0:10/10),
)
heatmap_2(m); #A simple heat map
heatmap_2(m, legend = 1 ) #Add a figure legend
heatmap_2(m, legend = 1, col = RGBColVec(64) ) #customerize the color
############
############ END
############
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kernelCollection.R
\name{mu_arcsin}
\alias{mu_arcsin}
\title{Arcsin moments}
\usage{
mu_arcsin(support = c(0, 1), param = NULL)
}
\arguments{
\item{support}{lower and upper bound of kernel window}
\item{param}{optional parameters to kernel}
}
\description{
Returns a vector of the first two uncentered moments \eqn{(\mu_1,\mu_2)} for the
arcsin kernel on the desired \code{support}.Equivalent to mu_beta(param=c(1/2,1/2)).
}
| /man/mu_arcsin.Rd | no_license | ajmcneil/spectralBacktest | R | false | true | 504 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kernelCollection.R
\name{mu_arcsin}
\alias{mu_arcsin}
\title{Arcsin moments}
\usage{
mu_arcsin(support = c(0, 1), param = NULL)
}
\arguments{
\item{support}{lower and upper bound of kernel window}
\item{param}{optional parameters to kernel}
}
\description{
Returns a vector of the first two uncentered moments \eqn{(\mu_1,\mu_2)} for the
arcsin kernel on the desired \code{support}.Equivalent to mu_beta(param=c(1/2,1/2)).
}
|
# Author: Robert J. Hijmans
# April 2010
# version 0.1
# license GPL3
# See http://local.wasp.uwa.edu.au/~pbourke/geometry/polyarea/
.basiccentroid <- function(p) {
p2 = rbind(p[-1,], p[1,])
P = p[,1] * p2[,2] - p2[,1] * p[,2]
area6 <- 6 * sum(P) / 2
lon <- sum((p[,1] + p2[,1]) * P)
lat <- sum((p[,2] + p2[,2]) * P)
return(cbind(lon, lat) / area6 )
}
if (!isGeneric("centroid")) {
setGeneric("centroid", function(x, ...)
standardGeneric("centroid"))
}
setMethod("centroid", signature(x='data.frame'),
function(x) {
centroid(as.matrix(x))
})
setMethod("centroid", signature(x='matrix'),
function(x) {
x <- .pointsToMatrix(x, poly=TRUE)
dif1 <- max(x[,1]) - min(x[,1])
rotated <- FALSE
if (dif1 > 180) {
x2 <- x
x2[,1] <- x2[,1]%%(360) - 180
dif1 <- max(x[,1]) - min(x[,1])
dif2 <- max(x2[,1]) - min(x2[,1])
if (dif2 < dif1) {
rotated <- TRUE
x <- x2
}
}
x <- mercator(x, r=1)
cenM <- .basiccentroid(x)
cenG <- mercator(cenM, r=1, inverse=TRUE)
if (rotated) {
cenG[,1] <- cenG[,1] + 180
cenG[,1] <- .normalizeLonDeg(cenG[,1])
}
rownames(cenG) <- NULL
return(cenG)
}
)
setMethod("centroid", signature(x='SpatialPolygons'),
function(x) {
if ( isTRUE(is.projected(x)) ) {
return( coordinates(x))
}
x <- x@polygons
n <- length(x)
res <- matrix(nrow=n, ncol=2)
for (i in 1:n) {
parts <- length(x[[i]]@Polygons )
parea <- sapply(x[[i]]@Polygons, function(y){ methods::slot(y, "area")} )
hole <- sapply(x[[i]]@Polygons, function(y){ methods::slot(y, "hole")} )
parea[hole] <- -1
j <- which.max(parea)
crd <- x[[i]]@Polygons[[j]]@coords
res[i,] <- centroid(crd)
}
return(res)
} )
| /R/centroid.R | no_license | edzer/geosphere | R | false | false | 1,759 | r | # Author: Robert J. Hijmans
# April 2010
# version 0.1
# license GPL3
# See http://local.wasp.uwa.edu.au/~pbourke/geometry/polyarea/
.basiccentroid <- function(p) {
p2 = rbind(p[-1,], p[1,])
P = p[,1] * p2[,2] - p2[,1] * p[,2]
area6 <- 6 * sum(P) / 2
lon <- sum((p[,1] + p2[,1]) * P)
lat <- sum((p[,2] + p2[,2]) * P)
return(cbind(lon, lat) / area6 )
}
if (!isGeneric("centroid")) {
setGeneric("centroid", function(x, ...)
standardGeneric("centroid"))
}
setMethod("centroid", signature(x='data.frame'),
function(x) {
centroid(as.matrix(x))
})
setMethod("centroid", signature(x='matrix'),
function(x) {
x <- .pointsToMatrix(x, poly=TRUE)
dif1 <- max(x[,1]) - min(x[,1])
rotated <- FALSE
if (dif1 > 180) {
x2 <- x
x2[,1] <- x2[,1]%%(360) - 180
dif1 <- max(x[,1]) - min(x[,1])
dif2 <- max(x2[,1]) - min(x2[,1])
if (dif2 < dif1) {
rotated <- TRUE
x <- x2
}
}
x <- mercator(x, r=1)
cenM <- .basiccentroid(x)
cenG <- mercator(cenM, r=1, inverse=TRUE)
if (rotated) {
cenG[,1] <- cenG[,1] + 180
cenG[,1] <- .normalizeLonDeg(cenG[,1])
}
rownames(cenG) <- NULL
return(cenG)
}
)
setMethod("centroid", signature(x='SpatialPolygons'),
function(x) {
if ( isTRUE(is.projected(x)) ) {
return( coordinates(x))
}
x <- x@polygons
n <- length(x)
res <- matrix(nrow=n, ncol=2)
for (i in 1:n) {
parts <- length(x[[i]]@Polygons )
parea <- sapply(x[[i]]@Polygons, function(y){ methods::slot(y, "area")} )
hole <- sapply(x[[i]]@Polygons, function(y){ methods::slot(y, "hole")} )
parea[hole] <- -1
j <- which.max(parea)
crd <- x[[i]]@Polygons[[j]]@coords
res[i,] <- centroid(crd)
}
return(res)
} )
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addInfoFilename.R
\name{addInfoFilename}
\alias{addInfoFilename}
\title{Add extra info to file}
\usage{
addInfoFilename(albumFolder, extraInfo, musicExtension = ".mp3")
}
\arguments{
\item{albumFolder}{Folder where the files are located.}
\item{extraInfo}{String with the extra info that will be located at the end of the file.}
\item{musicExtension}{Extension of the files that will be matched for the substitution.}
}
\value{
\code{NULL}. This function just makes changes on filen names.
}
\description{
Add extra info to file
}
| /man/addInfoFilename.Rd | no_license | LuisLauM/ruisu | R | false | true | 611 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addInfoFilename.R
\name{addInfoFilename}
\alias{addInfoFilename}
\title{Add extra info to file}
\usage{
addInfoFilename(albumFolder, extraInfo, musicExtension = ".mp3")
}
\arguments{
\item{albumFolder}{Folder where the files are located.}
\item{extraInfo}{String with the extra info that will be located at the end of the file.}
\item{musicExtension}{Extension of the files that will be matched for the substitution.}
}
\value{
\code{NULL}. This function just makes changes on filen names.
}
\description{
Add extra info to file
}
|
# accersi: fetch/summon
# divido: divide!
# expello: banish
# mundus: clean
# percursant: scour
# revelare: reveal
#### General Graphic Stuff ####
default_minor_time_axis_size <- 4.285714285;
make_font_Arial <- function() {
arial <- quartzFonts(arial = c("Arial","Arial-Bold","Arial-Italic","Avenir-BoldItalic"))
}
makeTransparent <- function(someColor, alpha=100) {
newColor <- col2rgb(someColor)
apply(newColor, 2, function(curcoldata)X = {
rgb(red=curcoldata[1], green=curcoldata[2],blue=curcoldata[3],alpha=alpha, maxColorValue=255)}
)
}
shape_size_rosetta_stone <- function() {
return(rosetta_stone_shape_size <- data.frame(size = c(1.5,1.0,2/3),circles=c(8.11,5.4,3.6),squares=c(7.18,4.79,3.19),triangles=c(10.905,7.27,4.846667),diamonds=c(10.15,6.765,4.51)));
}
accersi_cex_size_for_shape <- function(cex_size,shape_pch) {
rosetta_stone <- shape_size_rosetta_stone();
circle_pchs <- c(1,8,10,16,19,20,21);
square_pchs <- c(0,3,4,7,12,13,14,15,22);
triangle_pchs <- c(2,6,11,17,24,25);
diamond_pchs <- c(5,9,18,23);
area_to_match <- (rosetta_stone$squares[2]*cex_size)^2
if (!is.na(match(shape_pch,circle_pchs))) {
rosetta_circle <- match("circles",colnames(rosetta_stone));
new_cex <- 2*sqrt(area_to_match/pi)/rosetta_stone[2,rosetta_circle];
} else if (!is.na(match(shape_pch,triangle_pchs))) {
rosetta_triangle <- match("triangles",colnames(rosetta_stone));
# h <- sqrt(((rosetta_stone[2,rosetta_triangle]/2)^2)+(rosetta_stone[2,rosetta_triangle]^2))
b <- rosetta_stone[2,rosetta_triangle];
tri_area <- sqrt(3)*(b^2)/4
new_cex <- sqrt(area_to_match/tri_area)
# tri_area2 <- sqrt(3)*((new_cex*b)^2)/4
# sqrt(3)*(((sqrt(area_to_match/tri_area)*rosetta_stone[2,rosetta_triangle]))^2)/4
} else if (!is.na(match(shape_pch,diamond_pchs))) {
rosetta_diamond <- match("diamonds",colnames(rosetta_stone));
diagon <- rosetta_stone[2,rosetta_diamond];
dia_area <- (diagon^2)/2
new_cex <- sqrt(area_to_match/dia_area)
# side <- sqrt((rosetta_stone[2,rosetta_diamond]^2)/2)
# dia_area <- (side^2)
} else {
new_cex <- cex_size;
}
return(new_cex);
#rossetta_stone_shape_size[1,]/rossetta_stone_shape_size[2,]}
}
#### Stratigraphic Figures ####
draw_time_scale <- function(time_scale,onset,end,strat_colors="",strat_names="",line_color="black",ord,height,strat_label_size,axis=1) {
if (onset[1]<0 && time_scale[1]>0)
time_scale <- -1*time_scale;
first_period <- sum(abs(time_scale)>=abs(onset));
last_period <- sum(abs(time_scale)>=abs(end));
if (length(strat_colors)==0) strat_colors <- rep("white",last_period-first_period);
for (b in first_period:last_period)
rect(max(onset,time_scale[b]),ord,min(end,time_scale[b+1]),height,col=as.character(strat_colors[b]),border=as.character(strat_colors[b]))
for (s in first_period:last_period) segments(time_scale[s],ord,time_scale[s],height,lwd=0.5,col=line_color)
# put in decisive final line
if (!is.na(match(end,time_scale))) segments(end,ord,end,height,col=line_color);
segments(onset,ord,end,ord,col=line_color,lwd=2)
segments(onset,height,end,col=line_color,height,lwd=2)
if (length(strat_names)>0) {
dummy <- mid <- c();
for (i in first_period:last_period) {
mid <- c(mid,(max(onset,time_scale[i])+min(end,time_scale[(i+1)]))/2)
dummy <- c(dummy,(height+ord)/2);
}
#add something
text(mid[first_period:last_period],dummy[first_period:last_period],label=strat_names[first_period:last_period],cex=strat_label_size);
}
}
# use this for simulated expectations (Supplementary figures)
Phanerozoic_Timescale_Plot <- function(onset=-541, end=0, time_scale, mxy, mny, ordinate, xsize=6, ysize=4.285714285, hues=FALSE, alt_back=FALSE, main_time_tick=100) {
if (time_scale[1]>0) time_scale <- -1*time_scale;
if (onset>0) onset <- -1*onset;
if (end>0) end <- -1*end;
lst <- length(time_scale)
par(pin=c(xsize,ysize));
ages <- vector(length=1+abs(ceiling(onset/100))-abs(ceiling(end/100)))
st <- abs(ceiling(onset/100))+1
for (i in 1:length(ages)) {
ages[i] <- 100*(st-i)
}
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab=ordinate,xlim=c(onset,end),ylim=c(mny,mxy));
#axis(1,at=seq(onset,end,by=1),tcl=-0.0,labels=FALSE,lwd=4/3)
axis(1,at=seq(100*ceiling(onset/100),end,by=100),tcl=-0.3,labels=ages,lwd=0,lwd.ticks=4/3)
axis(1,at=seq(100*ceiling(onset/100),end,by=50)[!seq(100*ceiling(onset/100),end,by=50)%in%seq(100*ceiling(onset/100),end,by=100)],tcl=-0.2,labels=FALSE,lwd=0,lwd.ticks=4/3)
axis(1,at=seq(10*round(onset/10),end,by=10)[!seq(10*round(onset/10),end,by=10)%in%seq(100*ceiling(onset/100),end,by=50)],tcl=-0.1,labels=FALSE,lwd=0,lwd.ticks=4/3)
exy <- (mxy-mny)/25
if (alt_back!=FALSE) {
for (b in 1:(lst-1)) {
if (b%%2==0) {
rect(time_scale[b],(mny+exy),time_scale[b+1],mxy,col=alt_back,border=alt_back)
}
}
}
#segments(onset,mny+exy,onset,mny-exy)
if (hues) {
per_colors <- Phanerozoic_Period_Colors();
for (b in 1:(lst-1)) {
rect(time_scale[b],(mny+exy),time_scale[b+1],(mny-exy),col=per_colors[b],border=per_colors[b])
}
}
for (s in 1:lst) segments(max(onset,time_scale[s]),mny+exy,max(onset,time_scale[s]),mny-exy)
segments(onset,mny+exy,end,mny+exy,lwd=2)
segments(onset,mny-exy,end,mny-exy,lwd=2)
mid <- vector(length=(lst-1))
for (i in 1:(lst-1)) mid[i] <- (max(onset,time_scale[i])+time_scale[(i+1)])/2
text(mid,y=c(mny,mny,mny,mny,mny,mny,mny,mny,mny,mny,mny),label=c("Cm","O","S","D","C","P","Tr","J","K","Pg","Ng"),cex=0.9)
}
Phanerozoic_Timescale_Plot_Partial <- function(onset=-541, end=0, time_scale, mxy, mny, ordinate, stage_names, xsize=6, ysize=4.285714285, hues=FALSE, alt_back=FALSE, main_time_tick=100) {
#quartzFonts(arial = c("Arial", "Arial Black", "Arial Oblique","Arial Black Oblique"))
if (time_scale[1]>0) time_scale <- -1*time_scale;
if (onset>0) onset <- -1*onset;
if (end>0) end <- -1*end;
time_scale <- time_scale[time_scale>=onset]
time_scale <- time_scale[time_scale<=end]
lst <- length(time_scale)
par(pin=c(xsize,ysize));
#ages <- vector(length=1+abs(ceiling(onset/main_time_tick))-abs(ceiling(end/main_time_tick)))
st <- abs(ceiling(onset/main_time_tick))+1 # get increment distance
#ages <- c()
#for (i in 1:length(ages)) {
# ages[i] <- main_time_tick*(st-i)
# ages <- c(ages,main_time_tick*(st-i))
# }
ages1 <- seq(main_time_tick*floor(abs(onset)/main_time_tick),main_time_tick*ceiling(abs(end)/main_time_tick),by=-1*abs(main_time_tick))
med_time_tick <- main_time_tick/2
ages2 <- seq(med_time_tick*floor(abs(onset)/med_time_tick),med_time_tick*ceiling(abs(end)/med_time_tick),by=-1*abs(med_time_tick))[!seq(med_time_tick*floor(abs(onset)/med_time_tick),med_time_tick*ceiling(abs(end)/med_time_tick),by=-1*abs(med_time_tick)) %in% ages1]
minor_time_tick <- main_time_tick/10
ages3 <- seq(minor_time_tick*floor(abs(onset)/minor_time_tick),minor_time_tick*ceiling(abs(end)/minor_time_tick),by=-1*abs(minor_time_tick))
ages3 <- ages3[!ages3 %in% c(ages1,ages2)]
exy <- (mxy-mny)/25
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab=ordinate,xlim=c(onset,end),ylim=c(mny,mxy))
#axis(1,at=seq(onset,end,by=abs(onset-end)),tcl=-0.0,labels=FALSE,lwd=4/3)
#axis(1,at=seq(main_time_tick*ceiling(onset/main_time_tick),end,by=main_time_tick),tcl=-0.3,labels=ages,lwd=0,lwd.ticks=4/3)
#axis(1,at=seq(main_time_tick*ceiling(onset/main_time_tick),end,by=(main_time_tick/2))[!seq(main_time_tick*ceiling(onset/main_time_tick),end,by=(main_time_tick/2))%in%seq(main_time_tick*ceiling(onset/main_time_tick),end,by=main_time_tick)],tcl=-0.2,labels=FALSE,lwd=0,lwd.ticks=4/3)
#axis(1,at=seq(10*round(onset/10),end,by=10)[!seq(10*round(onset/10),end,by=10)%in%seq(main_time_tick*ceiling(onset/main_time_tick),end,by=(main_time_tick/2))],tcl=-0.1,labels=FALSE,lwd=0,lwd.ticks=4/3)
if (hues) {
# per_colors <- Phanerozoic_Period_Colors()
for (b in 1:(lst-1)) {
per_col <- infer_interval_color(abs(time_scale[b]),abs(time_scale[b+1]))
rect(time_scale[b],(mny+exy),time_scale[b+1],(mny-exy),col=per_col,border=per_col)
# rect(time_scale[b],(mny+exy),time_scale[b+1],(mny-exy),col=per_colors[b],border=per_colors[b])
}
}
#segments(onset,mny+exy,onset,mny-exy)
for (s in 1:lst) segments(time_scale[s],mny+exy,time_scale[s],mny-exy)
segments(onset,mny+exy,end,mny+exy,lwd=2)
segments(onset,mny-exy,end,mny-exy,lwd=2)
axis(1,at=-1*ages1,tcl=-0.3,labels=ages1,lwd=0,lwd.ticks=4/3)
axis(1,at=-1*ages2,tcl=-0.2,labels=FALSE,lwd=0,lwd.ticks=4/3)
axis(1,at=-1*ages3,tcl=-0.1,labels=FALSE,lwd=0,lwd.ticks=4/3)
if (alt_back!=FALSE) {
for (b in 1:(lst-1)) {
if (b%%2==0) {
rect(time_scale[b],(mny+exy),time_scale[b+1],mxy,col=alt_back,border=alt_back)
}
}
}
#mid <- c()
for (i in 1:(lst-1)) {
# mid <- c(mid,(time_scale[i]+time_scale[(i+1)])/2)
mid <- (time_scale[i]+time_scale[(i+1)])/2
text(mid,mny,label=stage_names[i],cex=0.9)
}
}
Phanerozoic_Timescale_Plot_Flexible <- function(onset=-541,end=0,time_scale_to_plot,mxy,mny,use_strat_labels=TRUE,strat_names,strat_colors,plot_title="",ordinate="",abscissa="Ma",yearbreaks,xsize=6, ysize=4.285714285,hues=TRUE,colored="base",alt_back=FALSE,alt_back_hue="gray90",strat_label_size=1) {
# UPDATED 2016-12-31
# Smaller updates 2018-08-31
# onset: when to start x-axis (use -100 for 100 Million years ago)
# end: when to end x-axis 0
# time_scale: vector giving start of each bin, with one more than bins
# mxy: maximum value for y-axis
# mny: minimum value for y-axis
# use_strat_labels: true means that you put labels on stratigraphic unts
# strat_names: names to use for those labels
# strat_colors
# ordinate: label for Y-axis
# yearbreaks: where to put minor & major breaks; c(10,50,100) will put minor
# medium and major breaks at 10, 50 & 100 Ma marks
# xsize: dimension of X-axis for plot for par(pin=c(xsize,ysize)) command
# ysize: dimension of Y-axis for plot for par(pin=c(xsize,ysize)) command
# hues: we want colors
# colored: where the colors should be: base or background
# alt_back: alternating white and light gray backgrouns
# use_strat_labels added 2016/09/23
if (time_scale_to_plot[1]>0) time_scale_to_plot <- -1*time_scale_to_plot;
if (onset>0) onset <- -1*onset;
if (end>0) end <- -1*end;
lst <- length(time_scale_to_plot)
first_period <- sum(time_scale_to_plot<=onset)
last_period <- sum(time_scale_to_plot<=end)
#if (is.na(match(end,time_scale))) {
# last_period <- match(end,time_scale)
# } else {
# last_period <- match(end,time_scale)-1
# }
draws <- length(yearbreaks)
stx <- vector(length=draws)
for (i in 1:draws) stx[i] <- yearbreaks[i]*ceiling(onset/yearbreaks[i])
exy <- (mxy-mny)/25;
#print(c(xsize,ysize));
par(pin=c(xsize,ysize));
plot(NA,type='n',axes=FALSE,main=plot_title,xlab=abscissa,ylab=ordinate,xlim=c(onset,end),ylim=c(mny-exy,mxy))
if (max(time_scale_to_plot)<end)
end <- max(time_scale_to_plot);
#axis(1,at=seq(onset,end,by=abs(onset-end)),tcl=0.0,labels=FALSE,lwd=2)
if (draws==2) {
pts <- c(-0.15,-0.30)
} else if (draws==3) {
pts <- c(-0.1,-0.2,-0.3)
}
for (i in 1:draws) {
on <- yearbreaks[i]*ceiling(onset/yearbreaks[i]);
en <- yearbreaks[i]*floor(end/yearbreaks[i]);
tcs <- seq(on,en,by=yearbreaks[i]);
if (i < draws) {
on2 <- yearbreaks[i+1]*ceiling(onset/yearbreaks[i+1]);
en2 <- yearbreaks[i+1]*floor(end/yearbreaks[i+1]);
tcs <- tcs[!tcs %in% seq(on2,en2,by=yearbreaks[i+1])];
axis(1,at=tcs,tcl=pts[i],labels=FALSE,lwd=0,lwd.ticks=4/3);
} else {
axis(1,at=tcs,tcl=pts[i],labels=abs(tcs),lwd=0.0,lwd.ticks=4/3);
}
}
if (alt_back && colored!="backdrop")
for (b in first_period:last_period)
if (gtools::even(b)) rect(max(onset,time_scale_to_plot[b]),mny,min(end,time_scale_to_plot[b+1]),mxy,col=alt_back_hue,border=alt_back_hue)
if (hues) {
if (colored=="base") {
for (b in first_period:last_period)
rect(max(onset,time_scale_to_plot[b]),mny,min(end,time_scale_to_plot[b+1]),(mny-2*exy),col=as.character(strat_colors[b]),border=as.character(strat_colors[b]),lwd=0);
} else {
for (b in first_period:last_period)
rect(max(onset,time_scale_to_plot[b]),mny,min(end,time_scale_to_plot[b+1]),mxy,col=strat_colors[b],border=strat_colors[b],lwd=0);
rect(onset,mny+exy,end,mxy,col=makeTransparent("white",50),border=makeTransparent("white",50))
}
}
# put in lines separating stages
for (s in first_period:last_period) segments(time_scale_to_plot[s],mny,time_scale_to_plot[s],mny-2*exy,lwd=0.5,col="gray25")
# put in decisive final line
if (!is.na(match(end,time_scale_to_plot))) segments(end,mny,end,mny-2*exy);
segments(onset,mny,end,mny,lwd=2)
segments(onset,mny-2*exy,end,mny-2*exy,lwd=2)
if (use_strat_labels) {
thin_bin <- min(abs(time_scale_to_plot[2:length(time_scale_to_plot)]-time_scale_to_plot[1:(length(time_scale_to_plot)-1)])/abs(onset-end));
mid <- vector(length=(lst-1));
for (i in 1:(lst-1)) mid[i] <- (max(onset,time_scale_to_plot[i])+min(end,time_scale_to_plot[(i+1)]))/2;
dummy <- vector(length=(lst-1));
for (i in 1:(lst-1)) dummy[i] <- mny-exy;
#add something
font_size <- min(2,thin_bin*xsize*strat_label_size);
# print(c(thin_bin,xsize,strat_label_size));
# print(font_size);
text(mid[first_period:last_period],dummy[first_period:last_period],label=strat_names[first_period:last_period],cex=font_size);
}
}
Phanerozoic_Timescale_Plot_Flexible_Ordinate <- function(onset=-541,end=0,time_scale_to_plot,mxx,mnx,use_strat_labels=TRUE,strat_names,strat_colors,plot_title="",abscissa="",ordinate="Ma",yearbreaks,ysize=6,xsize=4.285714285,hues=TRUE,colored="base",alt_back=FALSE,alt_back_hue="gray90",strat_label_size=1,stage_box_width=0.025) {
# UPDATED 2016-12-31
# Smaller updates 2018-08-31
# Smaller updates 2019-09-16
# Smaller updates 2019-10-18
# onset: when to start x-axis (use -100 for 100 Million years ago)
# end: when to end x-axis 0
# time_scale: vector giving start of each bin, with one more than bins
# mxy: maximum value for y-axis
# mny: minimum value for y-axis
# use_strat_labels: true means that you put labels on stratigraphic unts
# strat_names: names to use for those labels
# strat_colors
# ordinate: label for Y-axis
# yearbreaks: where to put minor & major breaks; c(10,50,100) will put minor
# medium and major breaks at 10, 50 & 100 Ma marks
# xsize: dimension of X-axis for plot for par(pin=c(xsize,ysize)) command
# ysize: dimension of Y-axis for plot for par(pin=c(xsize,ysize)) command
# hues: we want colors
# colored: where the colors should be: base or background
# alt_back: alternating white and light gray backgrouns
# use_strat_labels added 2016/09/23
if (time_scale_to_plot[1]>0) time_scale_to_plot <- -1*time_scale_to_plot;
if (onset>0) onset <- -1*onset;
if (end>0) end <- -1*end;
lst <- length(time_scale_to_plot)
first_period <- sum(time_scale_to_plot<=onset)
last_period <- sum(time_scale_to_plot<=end)
draws <- length(yearbreaks)
stx <- vector(length=draws)
for (i in 1:draws) stx[i] <- yearbreaks[i]*ceiling(onset/yearbreaks[i])
exx <- stage_box_width*(mxx-mnx);
#print(c(xsize,ysize));
par(pin=c(xsize,ysize));
#plot(NA,type='n',axes=FALSE,main=plot_title,ylab=ordinate,xlab=abscissa,ylim=c(onset,end),xlim=c(mnx-exx,mxx));
plot(NA,type='n',axes=FALSE,main=plot_title,ylab=ordinate,xlab=abscissa,ylim=c(onset,end),xlim=c(mnx-exx,mxx),xaxs="i",yaxs="i");
#axis(2,at=seq(onset,end,by=abs(onset-end)),tcl=0.0,labels=FALSE,lwd=2);
# set size of ticks
if (draws==2) {
pts <- c(-0.15,-0.30)
} else if (draws==3) {
pts <- c(-0.1,-0.2,-0.3)
}
for (i in 1:draws) {
on <- yearbreaks[i]*ceiling(onset/yearbreaks[i]);
en <- yearbreaks[i]*floor(end/yearbreaks[i]);
tcs <- seq(on,en,by=yearbreaks[i]);
if (i < draws) {
on2 <- yearbreaks[i+1]*ceiling(onset/yearbreaks[i+1]);
en2 <- yearbreaks[i+1]*floor(end/yearbreaks[i+1]);
tcs <- tcs[!tcs %in% seq(on2,en2,by=yearbreaks[i+1])]
axis(2,at=tcs,tcl=pts[i],labels=FALSE,lwd=0,lwd.ticks=4/3);
} else {
axis(2,at=tcs,tcl=pts[i],labels=abs(tcs),lwd=0.0,lwd.ticks=4/3,las=2)
}
}
if (alt_back && colored!="backdrop")
for (b in first_period:last_period)
if (gtools::even(b)) rect(max(onset,time_scale_to_plot[b]),mnx,min(end,time_scale_to_plot[b+1]),mxx,col=alt_back_hue,border=alt_back_hue)
if (hues) {
if (colored=="base") {
for (b in first_period:last_period)
rect(0,max(onset,time_scale_to_plot[b]),(mnx-exx),min(end,time_scale_to_plot[b+1]),col=as.character(strat_colors[b]),border=as.character(strat_colors[b]));
# rect(mnx,max(onset,time_scale_to_plot[b]),0,min(end,time_scale_to_plot[b+1]),col=as.character(strat_colors[b]),border=as.character(strat_colors[b]));
} else {
for (b in first_period:last_period)
rect(mnx,max(onset,time_scale_to_plot[b]),mxx,min(end,time_scale_to_plot[b+1]),col=strat_colors[b],border=strat_colors[b])
rect(mnx+exx,onset,mxx,end,col=makeTransparent("white",50),border=makeTransparent("white",50))
}
}
# put in lines separating stages
for (s in first_period:last_period) segments(0,time_scale_to_plot[s],mnx-exx,time_scale_to_plot[s],lwd=0.5,col="gray25");
# put in decisive final line
if (!is.na(match(end,time_scale_to_plot))) segments(0,end,mnx-exx,end);
segments(0,onset,0,end,lwd=2);
axis(2,at=seq(onset,end,by=abs(onset-end)),tcl=0.0,labels=FALSE,lwd=2);
#points(0,-450)
#points(mnx,-450)
#points(mnx-exx,-450)
if (use_strat_labels) {
thin_bin <- min(abs(time_scale_to_plot[2:length(time_scale_to_plot)]-time_scale_to_plot[1:(length(time_scale_to_plot)-1)])/abs(onset-end));
mid <- vector(length=(lst-1));
for (i in 1:(lst-1)) mid[i] <- (max(onset,time_scale_to_plot[i])+min(end,time_scale_to_plot[(i+1)]))/2;
dummy <- vector(length=(lst-1));
for (i in 1:(lst-1)) dummy[i] <- (mnx-exx)/2;
#add something
text(dummy[first_period:last_period],mid[first_period:last_period],label=strat_names[first_period:last_period],cex=min(2,thin_bin*xsize*strat_label_size));
}
}
#### routines to get IGS colors for geological ages
Phanerozoic_Period_Colors <- function(Carboniferous=TRUE) {
# if Carboniferous is TRUE, then use that; otherwise, use Miss. & Penn.
if (Carboniferous) {
period_col <- vector(length=11)
names(period_col) <- c("Cambrian","Ordovician","Silurian","Devonian","Carboniferous","Permian","Triassic","Jurassic","Cretaceous","Paleogene","Neogene")
period_col[5] <- "#67A599" # Carboniferous
period_col[6] <- "#F04028" # Permian
period_col[7] <- "#812B92" # Triassic
period_col[8] <- "#34B2C9" # Jurassic
period_col[9] <- "#7FC64E" # Cretaceous
period_col[10] <- "#F4B470" # Neogene
period_col[11] <- "#FFE619" # Paleogene
} else {
names(period_col) <- c("Cambrian","Ordovician","Silurian","Devonian","Mississippian","Pennsylvanian","Permian","Triassic","Jurassic","Cretaceous","Paleogene","Neogene")
period_col <- vector(length=12)
period_col[5] <- "#678F66" # Mississippian
period_col[6] <- "#99C2B5" # Pennsylvanian
period_col[7] <- "#F04028" # Permian
period_col[8] <- "#812B92" # Triassic
period_col[9] <- "#34B2C9" # Jurassic
period_col[10] <- "#7FC64E" # Cretaceous
period_col[11] <- "#F4B470" # Neogene
period_col[12] <- "#FFE619" # Paleogene
}
period_col[1] <- "#7FA056" # Cambrian
period_col[2] <- "#009270" # Ordovician
period_col[3] <- "#B3E1B6" # Silurian
period_col[4] <- "#CB8C37" # Devonian
return (period_col)
}
Phanerozoic_Interval_Colors <- function() {
interval_colors <- c("Cenozoic","#F2F91D")
interval_colors <- rbind(interval_colors,c("Mesozoic","#67C5CA"))
interval_colors <- rbind(interval_colors,c("Paleozoic","#99C08D"))
return(interval_colors)
}
accersi_time_scale_color <- function() {
interval <- c("Eoarchean","Archean","Precambrian","Precambrian-Cambrian","Precambrian-Paleozoic","Precambrian-Phanerozoic","Paleoarchean","Mesoarchean","Kenoran","Neoarchean","Siderian","Aphebian","Paleoproterozoic","Proterozoic","Proterozoic-Cambrian","Proterozoic-Paleozoic","Rhyacian","Orosirian","Hudsonian","Statherian","Calymmian","Paleohelikian","Mesoproterozoic","Elsonian","Ectasian","Neohelikian","Stenian","Tonian","Neoproterozoic","Neoproterozoic-Cambrian","Neoproterozoic-Paleozoic","Cryogenian","Hadrynian");
interval <- c(interval,"Ediacaran","Fortunian","Terreneuvian","Begadean","Early Cambrian","Waucoban","Cambrian","Cambrian-Ordovician","Paleozoic","Phanerozoic","Stage 2","Wyattia","Fritzaspis","Stage 3","Epoch 2","Series 2","Fallotaspis","Montezuman","Nevadella","Olenellus","Dyeran","Stage 4","Middle Cambrian","Albertan","Eokochaspis nodosa","Delamaran","Amecephalus arrojosensis","Plagiura-Poliella","Wuliuan","Stage 5","Epoch 3","Series 3","Albertella","Ptychagnostus praecurrens","Topazan","Ptychagnostus gibbus","Drumian","Bolaspidella","Marjuman","Dresbachian","Late Cambrian","Guzhangian","Cedaria","Crepicephalus","Aphelaspis","Paibian","Steptoean","Furongian","Franconian","Dunderbergia","Jiangshanian","Elvinia","Taenicephalus","Sunwaptan","Pseudoyuepingia asaphoides","Trempealeauan","Ellipsocephaloides-Idahoia","Saukiella junia/Saukiella pyrene","Nelegerian","Stage 10","Saukiella serotina","Eurekia apopsis","Missisquoia","Skullrockian","Symphysurina brevispicata","Symphysurina bulbosa","Iapetognathus fluctivagus","Tremadocian","Early Ordovician","Ordovician","Ordovician-Silurian","Cordylodus angulatus","Rossodus manitouensis","Macerodus dianae","Stairsian","Acodus delatus/Oneotodus costatus","Tulean","Floian","Arenigian","Oepikodus communis","Blackhillsian","Reutterodus andinus","Microzarkodina flabellum/Tripodus laevus","Dapingian","Middle Ordovician","Whiterock","Histiodella altifrons","Histiodella sinuosa","Darriwilian","Llanvirnian","Histiodella holodentata","Chazyan","Phragmodus polonicus","Cahabagnathus friendsvillensis","Llandeilo","Cahabagnathus sweeti","Blackriverian","Sandbian","Caradocian","Late Ordovician","Plectodina aculeata","Rocklandian-Kirkfield","Rocklandian-Shermanian","Rocklandian","Mohawkian","Erismodus quadridactylus","Kirkfield","Belodina compressa","Shermanian","Phragmodus undatus","Edenian-Maysvillian","Plectodina tenuis","Katian","Belodina confluens","Edenian","Oulodus velicuspis","Maysvillian","Richmondian-Hirnantian","Oulodus robustus","Aphelognathus grandis","Richmondian","Ashgill","Aphelognathus divergens","Aphelognathus shatzeri","Hirnantian","Gamachian","Distomodus kentuckyensis","Rhuddanian","Alexandrian","Llandovery","Early Silurian","Silurian","Silurian-Devonian","Aspelunda expansa","Aeronian","Pterospathodus tenuis","Distomodus staurognathoides","Telychian","Niagaran","Pterospathodus eopennatus","Pterospathodus amorphognathoides angulatus","Pterospathodus amorphognathoides lennarti","Pterospathodus amorphognathoides lithuanicus","Pterospathodus amorphognathoides amorphognathoides","Pterospathodus pennatus procerus","Sheinwoodian","Wenlock","Kockelella ranuliformis","Ozarkodina sagitta rhenana","Kockelella walliseri","Kockelella ortus ortus","Ozarkodina sagitta sagitta","Lockportian","Homerian","Ozarkodina bohemica longa","Kockelella ortus obsidata","Kockelella crassa","Gorstian","Ludlow","Late Silurian","Kockelella variabilis variabilis","Ancoradella ploeckensis","Ludfordian","Cayugan","Polygnathoides siluricus","Ozarkodina snajdri","Ozarkodina crispa","Ozarkodina eosteinhornensis","Pridoli","Oulodus elegans detortus","Gedinnian","Lochkovian","Helderbergian","Early Devonian","Ulsterian","Devonian","Devonian-Mississippian","Caudicriodus hesperius","Caudicriodus postwoschmidti","Lanea omoalpha","Lanea eleanorae","Leanea transitans","Ancryodelloides trigonicus","Masaraella pandora morpho. Beta","Pedavis gilberti","Gondwania irregularis","Siegenian","Pragian","Gondwania kindlei","Eocostapolygnathus pireneae","Eocostapolygnathus kitabicus","Emsian","Sawkillian","Deerparkian","Eocostapolygnathus excavatus");
interval <- c(interval,"Eocostapolygnathus gronbergi","Eocostapolygnathus nothoperbonus","Polygnathus inversus","Linguipolygnathus serotinus","Polygnathus costatus patulus","Polygnathus costatus partitus","Eifelian","Southwoodian","Erian","Cazenovian","Middle Devonian","Cazenovia","Polygnathus costatus costatus","Tortodus knockelianus knockelianus","Polygnathus ensensis","Polygnathus hemiansatus","Givetian","Tioughniogan","Polygnathus varcus","Schmidtognathus hermanni","Senecan","Klapperina disparilis","Mesotaxis guanwushanensis","Fingerlakesian","Frasnian","Late Devonian","Palmatolepis transitans","Palmatolepis punctata","Palmatolepis hassi","Chemungian","Palmatolepis rhenana","Palmatolepis linguiformis","Palmatolepis triangularis","Cassadagan","Famennian","Palmatolepis crepida","Chatauquan","Palmatolepis rhomboidea","Palmatolepis marginifera","Conewangan","Palmatolepis rugosa trachytera","Palmatolepis perlobata postera","Palmatolepis gracilis expansa","Siphonodella praesulcata","Siphonodella sulcata","Kinderhookian","Tournaisian","Mississippian","Carboniferous","Siphonodella duplicata","Siphonodella sandbergi-Siphonodella belkai","Siphonodella quadruplicata-Patrognathus andersoni","Gnathodus typicus-Siphonodella isosticha","Osagean","Dollimae bouckaerti","Gnathodus semiglaber-Polygnathus communis","Gnathodus pseudosemiglaber-Scallioganthus anchoralis","Visean","Gnathodus texanus","Meramecian","Gnathodus praebillineatus","Gnathodus bilineatus","Lochriea mononodosa","Chesterian","Lochriea nodosa","Lochriea ziegleri","Serpukhovian","Namurian","Lochriea cruciformis","Gnathodus bollandensis","Gnathodus postbilineatus","Declinognathodus noduliferus","Morrowan","Bashkirian","Pennsylvanian","Pennsylvanian-Permian","Idiognathoides sinuatus","Neognathodus askynensis","Idiognathodus sinuosus","Atokan","Declinognathodus marginodosus","Neognathodus atokaensis","Declinognathodus donetzianus","Moscovian","Westphalian","Neognathodus uralicus","Streptognathodus dissectus","Desmoinian","Neoghanthodus medexultimus-Streptognathodus-concinnus","Neoghanthodus round-Streptognathodus cancellosus","Streptognathodus subexcelsus","Kasimovian","Stephanian","Missourian","Idiognathodus sagittalis","Streptognathodus cancellosus","Idiognathodus toretzianus","Streptognathodus firmus","Streptognathodus simulator","Gzhelian","Virgilian","Streptognathodus vitali","Streptognathodus virgilicus","Streptognathodus simplex-Streptognathodus bellus","Streptognathodus wabaunsensis","Streptognathodus isolatus","Asselian","Wolfcampian","Early Permian","Cisuralian","Permian","Streptognathodus sigmoidalis-Streptognathodus cristellaris","Streptognathodus constrictus-Mesogondolella belladontae","Streptognathodus fusus","Streptognathodus postfusus","Sweetognathus merrilli-Mesogondolella uralensis","Sakmarian","Sweetognathus binodosus","Sweetognathus anceps-Mesogondolella bisselli","Sweetognathus whitei","Artinskian","Sweetognathus clarki","Neostreptognathodus pequopensis","Leonardian","Neostreptognathodus pnevi","Kungurian","Neostreptognathodus prayi","Sweetognathus guizhouensis","Mesogondolella lamberti-Neostreptognathodus sulcoplicatus","Roadian","Ufimian","Jinogondolella nankingensis","Kazanian","Guadalupian","Wordian","Jinogondolella aserrata","Jinogondolella postserrata","Capitanian","Jinogondolella shannoni","Jinogondolella altudaensis","Jinogondolella prexuanhanensis","Jinogondolella granti","Clarkina postitteri hongshuiensis","Ochoan","Clarkina postitteri postitteri","Dzhulfian","Wuchiapingian","Tatarian","Late Permian","Lopingian","Clarkina dukouensis","Clarkina asymmetrica","Clarkina leveni","Clarkina guangyuanensis","Clarkina transcaucasica","Clarkina orientalis","Clarkina longicuspidata","Clarkina wangi","Changhsingian","Clarkina subarinata","Clarkina changxingensis","Clarkina deflecta-Clarkina yini","Clarkina zhejiangensis-Clarkina meishanensis","lower Otoceras boreale","Hindeodus parvus","upper Otoceras boreale","Griesbachian","Induan","Scythian","Early Triassic","Triassic","Triassic-Jurassic","Mesozoic","Ophiceras commune");
interval <- c(interval,"Isarcicella isarcica","Proptychites rosenkrantzi strigatus","Neogondollela krystyni","Sweetospathodus kummeli","Proptychites candidus","Dienerian","Neospathodus dieneri Morph 3","Vavilovites sverdrupi","Neospathodus waageni","Hedenstroemia hedenstroemi","Smithian","Olenekian","Euflemingites romunderi","Borinella buurensis-Scythogondolella milleri","Anawasatchites tardus","Bajarunia eumphala","Spathian","Neospathodus pingdingshanensis","Icriospathodus collinsoni","Olenikites pilaticus","Neospathodus triangularis","Triassospathodus sosioensis","Chiosella gondolelloides","Chiosella timorensis","Anisian","Middle Triassic","Siberlingites mulleri","Neogondolella? regalis","Lenotropites caurus","Anagymnotoceras varium","Paragondolella excelsa","Eogymnotoceras deleeni","Frechites chischa","Eoprotrachyceras matutinum","Ladinian","Budurovignathus truempyi","Tuchodiceras poseidon","Budurovignathus hungaricus","Meginoceras meginae","Macl. maclearni","Paragondolella inclinata","Frankites sutherlandi","Metapolygnathus intermedius","Daxatina canadensis","Julian","Carnian","Late Triassic","Metapolygnathus tadpole","Tachyceras desatoyense","Austrotrachyceras obesum","Sirenites nanseni","Metapolygnathus polygnathiformis","Tropites dilleri","Metapolygnathus carpathicus","Tropites welleri","Metapolygnathus nodosus","Tuvalian","Klamathites macrolobatus","Metapolygnathus primitius","Lacian","Stikinoceras kerri","Norian","Epigondolella quadrata","Malayites dawsoni","Epigondolella triangularis","Juvavites magnus","Drepanites rutherfordi","Cypriodella postera","Alaunian","Mesohimavatites columbianus","Cypriodella spiculata","Cypriodella postera","Sevatian","Cypriodella serrulata","Cypriodella bidentata","Gnomohalorites cordilleranus","Cypriodella mosheri","Chochloceras amoenum","Rhaetian","Norigondolella sp.","Choristoceras crickmayi","Misikella posternstenini","Psiloceras planorbis","Hettangian","NJ1","Early Jurassic","Jurassic","Jurassic-Cretaceous","Alsatites liasicus","Schlotheimia angulata","Arietites bucklandi","NJ2","Sinemurian","Arnioceras semicostatum","Caenisites turneri","Asteroceras obtusum","Oxynoticeras oxynotum","NJ3","Echinoceras raricostatum","Uptonia jamesoni","Pliensbachian","Tragophylloceras ibex","NJ4","Prodactylioceras davoei","Amaltheus margaritatus","NJ5","Pleuroceras spinatum","NJ6","Dactylioceras tenuicostatum","Toarcian","NJ7","Harpoceras falciferum","Hildoceras bifrons","Haugia variabilis","NJ8","Grammoceras thouarsense","Dumortieria levesquei","Pleydellia aalensis","Leioceras opalinum","Aalenian","Middle Jurassic","Ludwigia murchisonae","Brasilia bradfordensis","NJ9","Graphoceras concavum","Hyperlioceras discites","Bajocian","NJ10","Witchellia laeviuscula","Stephanoceras humphriesianum","Strenoceras niortense","Garantiana garantiana","Parkinsonia parkinsoni","Zigzagiceras zigzag","Bathonian","NJ11","Procerites progracilis","Tulites subcontractus","Morrisiceras morrisi","Procerites hodsoni","Oxycerites orbis","Clydoniceras discus","Macrocephalites herveyi","Callovian","Prolanulites koenigi","NJ12","Sigloceras calloviense","Kosmoceras jason","Erymnoceras coronatum","Peltoceras athleta","Quenstedtoceras lamberti","Quenstedtoceras mariae","Oxfordian","Late Jurassic","NJ13","Cardioceras cordatum","NJ14","Perisphinctes plicatilis","Perisphinctes pumilus","Perisphinctes cautisnigrae","NJ15","Ringsteadia pseudocordata","Pictonia baylei","Kimmeridgian","Rasenia cymodoce","Aulacostephanus mutabilis","Aulacostephanus eudoxus","Aulacostephanus autissiodorensis","Tithonian","Pectinatites elegans","Pectinatites scitulus","NJ16","Pectinatites wheatleyensis","Pectinatites hudlestoni","Pavlovia pallasioides","NJ17","Pavlovia rotunda","Virgatopavlovia fittoni","Progalbanites albani","Glaucolithites glaucolithus","Galbanites okusensis","Galbanites kerberus","Titanites anguiformes","Paracraspedites oppressus","Subcraspedites primitivus","Subcraspedites preplicomphalus","Portlandian","Berriasian","Early Cretaceous","Cretaceous","Subcraspedites lamplughi","CC1","Runctonia runctoni");
interval <- c(interval,"CC2","Hectoroceras kochi","Surites icenii","Surites stenomphalus","Peregrinoceras albidum","CC3","Valanginian","Paratollia/Platylenticeras","Polyptychites","CC4","Prodichotomites","Dichotomites","Stolcoceras tuberulatum","Eleniceras paucinodum","Endemoceras amblygonium","Endemoceras noricum","Endemoceras regale","Speetoniceras inversum","Milanowskia speetonensis","Hauterivian","Craspedodiscus gottschei","CC5","Simbirskites marginatus","Simbirskites variabilis","Paracrioceras rarocinctum","Barremian","Haplocrioceras fissicostatum","CC6","Paracrioceras elegans","Paracrioceras denckmanni","Ancyloceras inexum/S. pingue","Simanocyloceras stolleyi","Parancyloceras bidentatum/Parancyloceras scalare","CC7","Deshayesites forbesi","Aptian","Deshayesites deshayesi","Tropaeum bowerbanki","Epicheloniceras martinoides","Parahoplites nutfieldiensis","Korangan","Hypacanthoplites jacobi","Leymeriella schrammeni","Albian","CC8","Douvilleiceras mammillatum","Hoplites dentatus","Euhoplites loricatus","Urutawan","Euhoplites lautus","Diploceras cristatum","Mortoniceras pricei","Mortoniceras inflatum","Mortoniceras fallax","Motuan","CC9","Mortoniceras rostratum","Mortoniceras perinflatum","Arrhaphoceras briacensis","Cenomanian","Late Cretaceous","Cretaceous-Paleogene","Neogastroplites haasi","Mantelliceras mantelli","Ngaterian","Neogastroplites cornutus","Neogastroplites muelleri","Neogastroplites americanus","Mantelliceras dixoni","Neogastroplites maclearni","Conlinoceras tarrantense - Conlinoceras gilberti","Acanthoceras granerosense","Acanthoceras muldoonense","Acanthoceras bellense","Acanthoceras amphibolum","Pleisacanthoceras wyomingense","CC10","Dunveganoceras pondi","Dunveganoceras problematicum","Dunveganoceras albertense","Arowhanan","Dunveganoceras conditum","Sciponoceras gracile (Vasoceras diartianum)","Sciponoceras gracile (Euomphaloceras septemseriatum)","Burroceras clydense","Neocardioceras juddii","Nigericeras scotti","Watinoceras devonense","Pseudaspidoceras flexuosum","CC11","Turonian","Vascoceras birchbyi","Mammites nodosoides","Collingnoniceras woollgari","Collingnoniceras praecox","Mangaotanean","Prionocyclus hyatti","Prionocyclus macombi","Scaphites warreni","Scaphites whitfieldi","CC12","Scaphites nigricollensis","Prionocyclus germari","Scaphites mariasensis","Scaphites preventricosus","CC13","Coniacian","Scaphites ventricosus","Teratan","Scaphites depressus","CC14","Clioscaphites saxitonianus","Piripauan","CC15","Santonian","Clioscaphites vermiformis","Clioscaphites choteauensis","Desmoscaphites erdmanni","CC16","Desmoscaphites bassleri","CC17","Scaphites leei","Haumurian","CC18","Scaphites hippocrepis","Campanian","Scaphites hippocrepis II","Scaphites hippocrepis III","Baculites sp. (smooth)","Baculites sp. (weak flank ribs)","Baculites obtusus","CC19","Baculites maclearni","Baculites asperiformis","Baculites sp. (smooth)","Baculites perplexus","Baculites gregoryensis","CC20","Baculites reduncus","Baculites scotti","CC21","Didymoceras nebrascense","CC22","Didymoceras stevensoni","CC23","Exiteloceras jenneyi","Didymoceras cheyennense","Baculites compressus","Baculites cuneatus","Baculites reesidei","Baculites jenseni","Baculites eliasi","Baculites baculus","CC24","Maastrichtian","Baculites clinolobatus","Hoploscaphites birkelundae","CC25","Hoploscaphites nicolleti","Jeletzkytes nebrascensis","CC26","Puercan","NP1","Danian","Early Paleocene","Paleocene","Teurian","Early Tertiary","Paleogene","Tertiary","Cenozoic","Torrejonian","NP2","NP3","Tiffanian","NP4","Selandian","Middle Paleocene","Selandian-Thanetian","NP5","Thanetian","Late Paleocene","NP6","NP7","Clarkforkian","NP8","NP9","Wasatchian","Ypresian","Early Eocene","Eocene","Waipawan","NP10","NP11","Mangaorapan","NP12","Bridgerian","NP13","NP14","Heretaungan","Uintan","Lutetian","Middle Eocene","NP15","Porangan","NP16","Bortonian","Bartonian","NP17","Duchesnean","Jacksonian","Priabonian","Chadronian","Late Eocene","NP18","Kaiatan","Runangan","NP19-20","Whaingaroan","NP21","Orellan","Rupelian","Early Oligocene");
interval <- c(interval,"Oligocene","Middle Tertiary","NP22","NP23","Whitneyan","Geringian","NP24","Arikareean","Chattian","Late Oligocene","NP25","Duntroonian","Monroecreekian","Waitakian","Harrisonian","NN1","Aquitanian","Early Miocene","Miocene","Neogene","Late Tertiary","NN2","Otaian","Burdigalian","NN3","Altonian","Hemingfordian","NN4","Barstovian","Langhian","Middle Miocene","Clifdenian","NN5","Lillburnian","Serravallian","NN6","Clarendonian","Waiauan","NN7","Tortonian","Late Miocene","Tongaporutuan","NN8","NN9","Hemphillian","NN10","NN11","Messinian","Kapitean","NN12","Zanclean","Early Pliocene","Pliocene","Opoitian","NN13","Blancan","NN14","NN15","NN16","Waipipian","Piacenzian","Late Pliocene","Mangapanian","NN17","Gelasian","Early Pleistocene","Pleistocene","Quaternary","NN18","Nukumaruan","NN19","Calabrian","Irvingtonian","Castlecliffian","Middle Pleistocene","Ionian","NN20","Haweran","Rancholabrean","NN21","Wisconsinan","Late Pleistocene","Holocene");
interval <- c(interval,"Oandu","Rakvere","Nabala","Vormsi","Pirgu","Tremadoc");
interval <- c(interval,"Sa1","Sa2","Ka1","Ka2-3","Ka4","Stage 10");
color <- c("#DA037F","#F0047F","#F04370","#D6D6D6","#EBEBEB","#FFFFFF","#F444A9","#F768A9","#F99BC1","#F99BC1","#F74F7C","#F74370","#F74370","#F73563","#C7C7C7","#B5B5B5","#F75B89","#F76898","#F875A7","#F875A7","#FDC07A","#FDC07A","#FDB462","#F8C682","#F3CC8A","#FED99A","#FED99A","#FEBF4E","#FEB342","#A6A6A6","#B5B5B5","#FECC5C","#FECC5C");
color <- c(color,"#FED96A","#99B575","#8CB06C","#8CB06C","#8CB06C","#8CB06C","#7FA056","#409963","#99C08D","#9AD9DD","#A6BA80","#A6BA80","#A6C583","#A6C583","#99C078","#99C078","#A6C583","#A6C583","#A6C583","#99C078","#99C078","#B3CA8E","#BFD99D","#BFD99D","#B3CA8E","#BFD99D","#B3CA8E","#B3D492","#B3D492","#B3D492","#A6CF86","#A6CF86","#B3D492","#B3D492","#B3D492","#B3D492","#BFD99D","#BFD99D","#A6CF86","#D3E5B2","#B3E095","#CCDFAA","#CCDFAA","#CCDFAA","#CCEBAE","#CCEBAE","#B3E095","#B3E095","#D9EABA","#B3E095","#D9F0BB","#D9F0BB","#D9F0BB","#B3E095","#D9F0BB","#E0F0C1","#D9F0BB","#B3E095","#E6F5C9","#E6F5C9","#E6F5C9","#E6F5C9","#E6F5C9","#99C08D","#E6F5C9","#33A97E","#33A97E","#33A97E","#1A9D6F","#009270","#86CDA5","#33A97E","#33A97E","#33A97E","#33A97E","#1A9D6F","#1A9D6F","#41B087","#139B77","#41B087","#41B087","#41B087","#009270","#66C092","#4DB47E","#27A37F","#66C092","#4DB47E","#74C69C","#3AAC86","#74C69C","#4DB58D","#74C69C","#74C69C","#61BD95","#009270","#8ED195","#8CD094","#8FD297","#7FCA93","#8CD094","#93D39A","#94D49B","#91D298","#7FCA93","#8CD094","#96D59C","#8CD094","#97D59E","#8CD094","#9DD8A3","#99D69F","#99D69F","#99D69F","#9BD7A1","#99D69F","#A0D9A5","#A4DAA9","#99D69F","#99D69F","#A2D9A7","#A4DAA9","#99D69F","#7FCA93","#A6DBAB","#A6DBAB","#A6DCB5","#A6DCB5","#A6DCB5","#99D7B3","#99D7B3","#B3E1B6","#BFB777","#99D7B3","#B3E1C2","#B3E1C2","#99D7B3","#BFE6CF","#BFE6CF","#BFE6CF","#BFE6CF","#BFE6CF","#BFE6CF","#BFE6CF","#BFE6C3","#BFE6C3","#B3E1C2","#BFE6C3","#BFE6C3","#BFE6C3","#BFE6C3","#CCEBD1","#CCEBD1","#CCEBD1","#CCEBD1","#CCEBD1","#CCECDD","#CCECDD","#BFE6CF","#E6F5FF","#CCECDD","#D9F0DF","#D9F0DF","#E0F3E0","#D9F0DF","#D9F0DF","#D9F0DF","#E6F5E1","#E6F5E1","#E6F5E1","#E5B75A","#E5B75A","#E5AC4D","#E5AC4D","#E5AC4D","#CB8C37","#998E4F","#E5B75A","#E5B75A","#E5B75A","#E5B75A","#E5B75A","#E5B75A","#E5B75A","#E5B75A","#E5AC4D","#E5C468","#E5C468","#E5C468","#E5C468","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#F1D576","#F1D576","#F1D576","#F1C868","#F1C868","#F1C868","#F1C868","#F1D576","#F1D576","#F1D576","#F1E185","#F1E185","#F1E185","#F1E185","#F1E185","#F1CE78","#F1E185","#CB8C37","#F1D487","#F2EDAD","#F1E19D","#F2EDAD","#F2EDAD","#F2EDAD","#F2DB97","#F2EDAD","#F2EDAD","#F2EDC5","#F2E1A6","#F2EDC5","#F2EDC5","#F2EDC5","#F2EDC5","#F2EDC5","#F2E7B6","#F2EDC5","#F2EDC5","#F2EDC5","#F2EDC5","#8CB06C","#96B46C","#8CB06C","#678F66","#67A599","#8CB06C","#8CB06C","#8CB06C","#8CB06C","#A0B76C","#8CB06C","#8CB06C","#A6B96C","#A6B96C","#A6B96C","#ABBB6B","#A6B96C","#A6B96C","#A6B96C","#B5BE6B","#A6B96C","#BFC26B","#BFC26B","#A6C093","#BFC26B","#BFC26B","#BFC26B","#99C2B5","#9FC4B7","#99C2B5","#99C2B5","#C5816F","#99C2B5","#99C2B5","#99C2B5","#ACC9BC","#99C2B5","#99C2B5","#C7CBB9","#C7CBB9","#A6C7BA","#C7CBB9","#C7CBB9","#B3CBBE","#C7CBB9","#C7CBB9","#BFD0C5","#BFD0C5","#BFD0C3","#B9CDC0","#BFD0C5","#BFD0C5","#BFD0C5","#BFD0C5","#CCD4C7","#CCD4C7","#C6D2C5","#CCD4C7","#CCD4C7","#CCD4C7","#CCD4C7","#E36350","#E36350","#E36956","#EE5845","#EF5845","#F04028","#E36350","#E36350","#E36350","#E36350","#E36F5C","#E36F5C","#E36F5C","#E36F5C","#E37B68","#E37B68","#E37B68","#E37B68","#E3816F","#E38776","#E38776","#E38776","#E38776","#E38776","#FB8069","#F57F71","#FB8069","#EE7E79","#FB745C","#FB8D76","#FB8D76","#FB9A85","#FB9A85","#FB9A85","#FB9A85","#FB9A85","#FB9A85","#FB9A85","#E27C88","#FCB4A2","#E87D80","#FCB4A2","#E27C88","#FBA794","#FBA794","#FCB4A2","#FCB4A2","#FCB4A2","#FCB4A2","#FCB4A2","#FCB4A2","#FCB4A2","#FCC0B2","#FCC0B2","#FCC0B2","#FCC0B2","#FCC0B2","#FCC0B2","#FCC0B2","#A4469F","#A4469F","#DC7B90","#A4469F","#D57998","#983999","#812B92","#5B6FAE","#67C5CA","#A4469F","#A4469F","#A4469F","#983999","#A4469F","#A4469F");
color <- c(color,"#CF78A0","#A4469F","#A4469F","#B051A5","#B051A5","#C977A7","#B051A5","#B051A5","#B051A5","#B051A5","#B051A5","#C276AF","#B051A5","#B051A5","#B051A5","#B051A5","#B051A5","#B051A5","#BC75B7","#BC75B7","#B168B1","#BC75B7","#BC75B7","#BC75B7","#BC75B7","#B168B1","#BC75B7","#BC75B7","#C983BF","#C983BF","#C983BF","#C983BF","#C983BF","#C983BF","#C983BF","#C983BF","#C983BF","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#BD8CC3","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#BD8CC3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#E3B9DB","#E3B9DB","#E3B9DB","#E3B9DB","#E3B9DB","#E3B9DB","#4EB3D3","#4EB3D3","#56B6D5","#42AED0","#34B2C9","#5ABC8C","#4EB3D3","#4EB3D3","#67BCD8","#5FB9D6","#67BCD8","#67BCD8","#67BCD8","#67BCD8","#67BCD8","#74C1DB","#67BCD8","#80C5DD","#80C5DD","#80C5DD","#88C8DF","#80C5DD","#80C5DD","#91CBE1","#80C5DD","#99D1E2","#99CEE3","#99CEE3","#9AD4E0","#99CEE3","#99CEE3","#99CEE3","#9AD6DF","#99CEE3","#99CEE3","#9AD9DD","#9AD9DD","#9AD9DD","#80CFD8","#9AD9DD","#9AD9DD","#A9DEE1","#9AD9DD","#A6DDE0","#A6DDE0","#ADE0E2","#A6DDE0","#A6DDE0","#A6DDE0","#A6DDE0","#A6DDE0","#B3E2E3","#B3E2E3","#B0E1E2","#B3E2E3","#B3E2E3","#B3E2E3","#B3E2E3","#B3E2E3","#B3E2E3","#BFE7E5","#BFE7E5","#BFE7E5","#B9E5E4","#BFE7E5","#BFE7E5","#BFE7E5","#BFE7E5","#BFE7E5","#BFE7F1","#BFE7F1","#B3E3EE","#BFE7E9","#BFE7F1","#BFE7ED","#BFE7F1","#BFE7F1","#BFE7F1","#C6EAF3","#BFE7F1","#CCECF4","#CCECF4","#CCECF4","#CCECF4","#CCECF4","#CCECF4","#D9F1F7","#D9F1F7","#D9F1F7","#D3EFF6","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1EE","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#8CCD60","#8CCD60","#8CCD57","#7FC64E","#8CCD60","#90CF63","#8CCD60","#95D167","#8CCD60","#8CCD60","#8CCD60","#99D36A","#9DD56E","#99D36A","#99D36A","#99D36A","#A2D771","#99D36A","#99D36A","#99D36A","#99D36A","#99D36A","#99D36A","#99D36A","#99D36A","#8CCD57","#A6D975","#A6D975","#ADDC7A","#A6D975","#A6D975","#B3DF7F","#B3DF7F","#B3DF7F","#B7E183","#B3DF7F","#B3DF7F","#B3DF7F","#B3DF7F","#B3DF7F","#BBE286","#BFE48A","#BFE48A","#BFE48A","#BFE48A","#BFE48A","#BFE48A","#70C189","#BFE48A","#CCEA97","#CCEA97","#C6E791","#CCEA97","#CCEA97","#CCEA97","#77C48E","#CCEA97","#CCEA97","#CCEA97","#CCEA97","#CCEA97","#7DC693","#C0E475","#CCEA97","#CCEA97","#B3DE55","#B3DE53","#A6D84A","#D2C055","#B3DE53","#B3DE53","#84C998","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B7E056","#B3DE53","#B3DE53","#B3DE53","#91CEA3","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#BFE35D","#BBE15A","#BFE35D","#BFE35D","#BFE35D","#BFE35D","#BFE35D","#98D1A8","#BFE35D","#BFE35D","#BFE35D","#BFE35D","#C3E561","#BFE35D","#BFE35D","#BFE35D","#A6D84A","#C8E764","#CCE968","#CCE968","#9ED3AE","#CCE968","#D0EB6C","#A6D84A","#AEDABA","#D5ED70","#D9EF74","#D9EF74","#D9EF74","#D9EF74","#DCF077","#D9EF74","#E0F27A","#D9EF74","#B8DEC2","#E3F37C","#A6D84A","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E8F581","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E9F683","#E6F47F","#E6F47F","#EBF785","#E6F47F","#EDF786","#E6F47F","#EFF888","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#A6D84A","#F0F98A","#F2FA8C","#F2FA8C","#F2FA8C","#F2FA81","#F2FA8C","#F2FA8C","#F2FA76","#FDB469","#FDB663","#FDB462","#FDB462","#FDA75F","#F4AA70","#FDB46C","#FD9A52","#F4B470","#F2F91D","#FEBA64","#FDB863","#FEBB64","#FEBF6A","#FEBD64","#FEBF65","#FEBF65","#FEBF6A","#FEBF6A","#FDBF6F","#FDBF6F","#FDBA70","#FDB571","#FDB371","#FCB171","#FCAC72","#FCAB78","#FCA773","#FCA773","#FDB46C","#EFA76E","#FCA976","#FCAB78","#EAA46D","#FCAE7B","#FCB07D","#FCB07D","#FCB280","#E4A06B","#FCB887","#FCB482","#FCB482","#FCB887","#DF9D69","#FDBC8C","#E3A570","#FDC091","#FDC799","#FDBC8C","#FDCDA1","#FDCDA1","#FDC799","#FED99A","#FDD09F","#EBB07A","#F4BA83");
color <- c(color,"#FED39E","#FCCA96","#FED69C","#FED39E","#FED99A","#FED99A","#FDC07A","#FDC07A","#FEDC9E","#FEE0A2","#FEE39F","#FFECA5","#FEE3A6","#FEE9B2","#FEE6AA","#FEE6AA","#FEE6AA","#FDCFA0","#FEEE82","#FDD5AA","#FFF75B","#FFF75B","#FFFF33","#FFFF33","#FFFF00","#FFE619","#FFFF00","#FFFF3A","#FEDAB4","#FFFF41","#FFFF45","#FDD6A7","#FFFF3A","#FFFF49","#FFFF47","#FFFF4D","#FFFF4D","#FDCD90","#FFFF53","#FDCA7B","#FFFF59","#FFFF5D","#FFFF60","#FDD07C","#FFFF62","#FFFF66","#FFFF66","#FED67E","#FFFF69","#FFFF6B","#FFFF6D","#FFFF6E","#FFFF70","#FFFF73","#FEE582","#FFFF93","#FFFFB3","#FFFFB3","#FFFF99","#FFF78A","#FFFFB5","#FFFFB9","#FFFFB8","#FFFFBA","#FFFFBD","#FFF88E","#FFFFBF","#FFFFBF","#FFF891","#FFF6B9","#FFEDB3","#FFEDB3","#FFF2AE","#F9F97F","#FFEFBA","#FFF994","#FFF0C0","#FFF2C7","#FFF0BD","#FFF997","#FFF2C7","#FFF2D3","#FFF2D8","#FFFA9B","#FFF2E1","#FFF2DC","#FFF2D3","#FFF2C7","#FEF2E0");
color <- c(color,"#7FCA93","#97D59E","#99D69F","#99D69F","#A2D9A7","#33A97E");
color <- c(color,"#8ED195","#96D59C","#99D69F","#A0D9A5","#A2D9A7","#E6F5C9");
onset <- c(4000,4000,4000,4000,4000,4000,3600,3200,2800,2800,2500,2500,2500,2500,2500,2500,2300,2050,1800,1800,1600,1600,1600,1400,1400,1300,1200,1000,1000,1000,1000,850,850);
onset <- c(onset,635,541,541,541,541,541,541,541,541,541,529,524,521,521,521,521,520,520,518.5,515.5,515.5,514,513,513,511.1,511,510.5,509,509,509,509,509,507.3,506.5,506.5,505.25,504.5,504.5,504.5,501,501,500.5,499.5,498.5,497,497,497,497,496.8,495.2,494,493.9,493,493,493,492.5,492.3,490.9,489.5,489.5,487.8,487.1,486.7,486.7,486.2,485.4,485.4,485.4,485.4,485.4,485.4,484.4,480.3,480,480,479.8,479.5,477.7,477.7,477.5,475,473.7,470.3,470.0,470.0,470.0,468.7,467.4,467.3,465.5,464.4,464,463.7,462.4,461.4,461.2,458.4,458.4,458.4,458.4,456.6,456.5,456.5,456.5,456.5,455.8,455.5,454.7,454.5,453.7,453.5,453,453,452.3,451,450,450,450,449.7,449,449,449,447,445.5,445.2,445.2,443.8,443.8,443.8,443.8,443.8,443.8,443.8,441.1,440.8,440.5,439.4,438.5,438.5,437.5,436.4,435.8,435.5,435.2,433.4,433.4,433.4,432.6,432.3,431.8,431.2,430.5,430.5,430.5,429.4,428.3,427.4,427.4,427.4,427.4,427,425.6,425.6,425.6,425.2,424.8,423.6,423,423,421.2,419.2,419.2,419.2,419.2,419.2,419.2,419.2,419,417.2,415.2,414.5,414,413.5,412.5,412,411,410.8,410.8,410,408.5,407.6,407.6,407.6,407.6,406,404.2,403.5,401,397.5,395,393.3,393.3,393.3,393.3,393.3,393.3,393.3,391.5,389.2,388,387.7,387.7,387.7,387.5,385,385,384,383,382.7,382.7,382.7,381.2,380.2,379,379,376.5,373,372.2,372.2,372.2,370,370,367.5,366.5,365,364,363,362.2,360,358.9,358.9,358.9,358.9,358.9,358.4,357.4,355,353,351.5,349,348,346.7,346.7,344,343.5,340,336.8,335,335,332,330.9,330.9,330.9,329.5,327.2,325.5,323.2,323.2,323.2,323.2,323.2,322.5,320.8,320,319,318.6,317,315.2,315.2,315,314.5,312.5,312,310.8,309,307,307,307,306,305.7,305.2,304.5,304,303.7,303.7,303.7,302.7,302,301,300,298.9,298.9,298.9,298.9,298.9,298.9,298,297.5,297,296.5,295.5,295.5,294.1,293.5,290.1,290.1,285,282,282,279.3,279.3,275.5,274.7,273.7,272.3,272.3,272.3,272.3,272.3,268.8,268.8,265.1,265.1,264.5,264,263.7,261.8,260.3,259.9,259.9,259.9,259.9,259.9,259.9,259.9,259,258.5,257.7,257.2,256.5,255.3,254.5,254.14,254.14,253.5,253.1,252.8,252.5,252.4,252.17,252.17,252.17,252.17,252.17,252.17,252.17,252.17,252.17,251.9,251.9,251.8,251.7,251.6,251.6,251.6,251.4,251.3,251.2,251.2,251.2,251.2,249.3,249.3,248.55,248.5,248.5,248.4,248.3,248,247.6,247.4,247.3,247.2,247.2,247.2,246.8,246.7,246.5,246.3,244.3,244,243.5,242,242,241,240.4,240.3,239,238.3,238,238,237,237,237,237,237,236.5,236.3,234.6,234,233.5,233.5,233,232.6,231.4,230.5,229.7,229.5,228.4,228,228,226.5,224.5,221.4,218.3,217.5,217.4,217.4,217,216.9,216.5,215.3,214.8,214,214,208.5,208.5,208.5,202.3,202.2,201.9,201.3,201.3,201.3,201.3,201.3,201.3,201.1,200.2,199.3,199.3,199.3,197.8,196.4,195.4,193.8,193.3,192.8,190.8,190.8,189.5,189,188.5,187.5,185.5,184.2,182.8,182.7,182.7,181,180.7,180.4,178.4,177.2,176.4,175,174.4,174.1,174.1,174.1,172.2,171.3,171.2,170.9,170.3,170.3,170.3,170,169.8,169.5,169,168.6,168.3,168.3,168,167.3,167,166.8,166.6,166.4,166.2,166.1,166.1,165.5,165.4,165,164.6,164.5,164.2,163.8,163.5,163.5,163.5,163.4,161.4,161.4,160.8,160.1,159.7,159.7,159,157.3,157.3,156,154.7,153.7,152.4,152.1,152,151.5,151.2,151,150.7,150.2,149.2,149,148.3,148,147.6,147.4,147.2,147,146.7,146.4,146,146,145,145,145,144.6,144,144,142.4,141.6,141,140.2,139.8,139.8,139.8,138.5,137.6,137.4,136.8,135.6,135,134.3,133.95,133.85,133.5,133.4,133,132.9,132.8,132.7,132.5,132.3,129.4,129.4,129,129,128.8,128.2,127.8,127.3,125.5,125.4,125,125,124.5,124,123,118,117.5,116.8,113,113,112.2,111.3,110.3,109.5,108.4,108.3,107.5,107,104.9,104.1,103.3,103,101.7,101.3,100.8,100.5,100.5,100.5,100.25,100.2,100.2,99.81,99.17,98.75,98.5,98.19,97.26,96.24,96.08,95.98,95.9,95.81,95.7,95.67,95.47,95.24,95.2,95.01,94.78,94.57,94.39,94.27,94.15,93.98,93.9,93.9,93.9,93.55,93.45,93.35,92.9,92.1,92.08,91.6,91.41,91.34,91,90.65,90.24,89.98,89.87,89.8,89.8,89.77,89.1,88.77,88,87.86,86.5,86.3,86.3,86.26,85.56,85.23,85,84.52,84.2,84.08,84,83.8,83.64,83.6,82.7,82,81.53,81.28,81.13,81,80.97,80.67,80.21,79.64,79.01,79,78.34,77.63,77.6,76.94,76.8,76.27,76,75.64,75.08,74.6,74.21)
onset <- c(onset,73.91,73.63,73.27,72.74,72.1,72.1,72.05,70.44,70.2,69.91,69.3,67.8,66,66,66,66,66,66,66,66,66,66,64.75,64.5,63.8,62.25,62.2,61.6,61.6,61.6,59.7,59.2,59.2,58.4,57.5,57.5,57.3,56.2,56,56,56,56,55.5,55,53.61,53,52.85,52,50.6,49.7,49.5,47.9,47.8,47.8,47.3,46.2,43.4,43,41.3,41.3,39.9,38,37.8,37.8,37.8,37,37,36,36,34.3,34.2,33.9,33.9,33.9,33.9,33.9,32.8,32.3,32.1,30.8,29.9,29.8,28.1,28.1,27.5,27.3,26.3,25.2,24.8,23.9,23.03,23.03,23.03,23.03,23.03,22.2,21.7,20.44,19,19,18.6,18.3,16.3,15.97,15.97,15.9,15.6,15.1,13.82,13.6,13.6,12.7,11.8,11.62,11.62,10.92,10.9,10.7,10.3,9.4,8.6,7.246,6.5,5.6,5.333,5.333,5.333,5.28,5,4.9,4.15,4,3.75,3.6,3.6,3.6,3,2.6,2.588,2.588,2.588,2.588,2.5,2.4,2,1.806,1.806,1.63,0.781,0.781,0.5,0.34,0.3,0.3,0.15,0.126,0.0117)
onset <- c(onset,453.0,452.0,450.4,449.4,448.5,485.4)
onset <- c(onset,458.4,456.5,453.0,450.3,448.3,489.5)
end <- c(3600,2500,541,485.4,252.17,0,3200,2800,2500,2500,2300,1600,1600,541,485.4,252.17,2050,1800,1600,1600,1400,1300,1000,1300,1200,850,1000,850,541,485.4,252.17,635)
end <- c(end,541,541,529,521,520,513,513,485.4,443.8,252.17,0,521,521,520,514,509,509,518.5,515.5,515.5,511.1,511,509,501,501,510.5,506.5,509,507.3,504.5,504.5,497,497,506.5,505.25,504.5,504.5,500.5,499.5,497,496.8,485.4,497,498.5,497,495.2,494,493,485.4,492.5,493.9,489.5,493,492.3,486.7,488.27,485.4,490.9,487.8,485.4,485.4,487.1,486.7,486.2,480,485.4,484.4,484.4,477.7,470.0,443.8,419.2,480.3,480,479.8,479.5,477.5,475,470.0,465.5,473.7,470.0,470.3,468.7,467.3,458.4,456.5,467.4,464.4,458.4,461.9,463.7,458.4,462.4,461.2,458.4,456.6,456.5,453,449,443.8,455.8,457,453.5,455.5,451,454.7,454.5,453.7,451,453,449,452.3,445.2,450,450,449.7,449,443.8,449,447,445.2,443.8,445.5,443.8,443.8,443.8,441.1,440.8,438.5,433.4,427.4,419.2,358.9,440.5,438.5,439.4,437.5,433.4,425.6,436.4,435.8,435.5,435.2,433.4,432.6,430.5,427.4,432.3,431.8,431.2,430.5,429.4,427.4,427.4,428.3,427.4,427,425.6,423,419.2,425.6,425.2,423,419.2,424.8,423.6,423,421.2,419.2,419,410.8,410.8,393.3,393.3,393.3,358.9,323.2,417.2,415.2,414.5,414,413.5,412.5,412,411,410,407.6,407.6,408.5,407.6,406,393.3,393.3,393.3,404.2,403.5,401,397.5,395,393.3,391.5,387.7,387.7,385,382.7,382.7,382.7,389.2,388,387.7,387.5,382.7,382.7,385,384,370,383,381.2,379,372.2,358.9,380.2,379,376.5,372.2,373,372.2,370,365,358.9,367.5,358.9,366.5,364,358.9,363,362.2,360,358.9,358.4,351.5,346.7,323.2,298.9,357.4,355,353,349,343.5,348,346.7,344,330.9,340,335,336.8,335,332,323.2,330.9,329.5,323.2,315,327.2,325.5,323.2,322.5,319,315.2,298.9,252.17,320.8,320,318.6,312,317,315.2,314.5,307,307,312.5,310.8,306,309,307,305.7,303.7,298.9,303.7,305.2,304.5,304,303.7,302.7,298.9,298.9,302,301,300,298.9,298,295.5,282,272.3,272.3,252.17,297.5,297,296.5,295.5,294.1,290.1,293.5,290.1,285,279.3,282,279.3,272.3,275.5,272.3,274.7,273.7,272.3,268.8,268.8,268.8,259.9,259.9,265.1,265.1,264.5,259.9,264,263.7,261.8,260.3,259.9,259,259,254.14,254.14,252.17,252.17,252.17,258.5,257.7,257.2,256.5,255.3,254.5,254.14,253.5,252.17,253.1,252.8,252.5,252.17,252.17,251.9,251.9,251.6,251.2,247.2,247.2,201.3,145,66,251.8,251.7,251.6,251.6,251.4,251.3,251.2,251.2,251.2,249.3,249.3,248.5,247.2,248.55,248.4,248.5,248,247.2,248.3,247.6,247.7,247.4,247.3,247.2,246.7,242,237,246.5,244.3,246.3,244,241,243.5,242,240.4,237,240.3,239,238,238.3,238,237,237,236.5,236.3,230.5,228,201.3,233.5,234.6,234,233.5,233,232.6,231.4,229.7,229.5,228,228,226.5,217.4,224.5,208.5,221.4,218.3,217.4,217.5,217,216.9,215.3,214,216.5,214.8,208.5,214,208.5,208.5,202.3,202.2,201.3,201.9,201.3,201.3,201.1,199.3,199.3,174.1,145,66,200.2,199.3,197.8,193.3,190.8,196.4,195.4,193.8,192.8,189,190.8,189.5,182.7,188.5,185.5,187.5,184.2,182.8,182.7,181,180.7,174.1,177.2,180.4,178.4,176.4,171.2,175,174.4,174.1,172.2,170.3,163.5,171.3,170.9,170.3,170.3,170,168.3,168,169.8,169.5,169,168.6,168.3,167.3,166.1,165.4,167,166.8,166.6,166.4,166.2,166.1,165.5,163.5,165,163.4,164.6,164.5,164.2,163.8,163.5,161.4,157.3,145,161.4,160.8,159.7,160.1,159.7,159,151.2,157.3,156,152.1,154.7,153.7,152.4,152,145,151.5,151,149.2,150.7,150.2,149,144,148.3,148,147.6,147.4,147.2,147,146.7,146.4,146,144.6,142,139.8,100.5,66,144,142.4,141.6,139.8,141,140.2,139.8,138.5,137.4,132.9,137.6,136.8,132.7,135.6,135,134.3,133.95,133.85,133.5,133.4,133,132.8,129.4,132.5,129,132.3,129.4,129,125,128.8,125.4,128.2,127.8,127.3,125.5,125,112.2,124.5,113,124,123,118,116.8,108.4,113,111.3,100.5,103,110.3,109.5,108.3,103.3,107.5,107,104.9,104.1,101.7,100.2,95.7,101.3,100.8,100.2,93.9,66,56,99.81,98.5,95.2,99.17,98.75,98.19,97.5,97.76,96.24,96.08,95.98,95.9,95.81,95.67,93.9,95.47,95.24,95.01,92.1,94.78,94.57,94.39,94.27,94.15,93.98,93.9,93.55,91,89.8,93.45,93.35,92.9,92.08,89.1,91.6,91.41,91.34,90.65,89.8,90.24,89.98,89.87,89.77,88,86.3,88.77,86.5,87.86,86.3,86.26,84,85,83.6,85.56,85.23,84.52,84.2,84.08,83.8,83.64,66,81,82.7,72.1,82,81.53,81.28,81.13,80.97,79,80.67,80.21,79.64,79.01,78.34,77.6,77.63,76.94,76.8,76.27,76,75.64,72.1,75.08,74.6,74.21,73.91,73.63,73.27);
end <- c(end,72.74,72.05,70.2,66,70.44,69.91,67.8,69.3,68.69,66,64.75,64.5,61.6,61.6,56,55.5,33.9,23.03,2.588,0,62.25,63.8,62.2,57.5,59.7,59.2,59.2,56,58.4,56,56,57.5,57.3,56,56.2,55,52,47.8,47.8,33.9,53,53.61,52.85,49.5,50.6,47.9,49.7,47.3,46.2,39.9,41.3,38,43.4,43,41.3,37,37.8,37,37.8,33.9,33.9,33.9,33.9,36,36,34.3,34.2,27.3,32.8,32.1,28.1,28.1,23.03,23.03,32.3,29.9,29.8,26.3,27.5,18.6,23.03,23.03,23.9,25.2,24.8,21.7,20.6,22.2,20.44,15.97,5.333,2.588,2.588,19,19,15.97,18.3,15.9,16.3,15.6,13.6,13.82,11.62,15.1,13.6,12.7,11.62,11.8,10.3,10.92,10.9,7.246,5.333,6.5,10.7,9.4,4.9,8.6,5.6,5.333,5.28,5,3.6,3.6,2.588,3.6,4.15,1.806,4,3.75,2.6,3,2.588,2.588,2.4,2.5,1.806,1.806,0.0117,0,2,1.63,0.5,0.781,0.3,0.34,0.126,0.0117,0.3,0,0.0114,0,0.05,0.0117,0);
end <- c(end,452.0,450.4,449.4,448.5,445.2,477.7);
end <- c(end,456.5,453.0,450.3,448.3,445.2,485.4);
output <- data.frame(cbind(interval,onset,end,color));
return(output)
}
accersi_stage_colors <- function() {
interval <- c("Ediacaran","Fortunian","Stage 2","Stage 3","Stage 4","Wuliuan","Drumian","Guzhangian","Paibian","Jiangshanian","Stage 10","Tremadoc","Floian","Dapingian","Darriwilian","Sandbian","Katian","Hirnantian","Rhuddanian","Aeronian","Telychian","Sheinwoodian","Homerian","Gorstian","Ludfordian","Pridoli","Lochkovian","Pragian","Emsian","Eifelian","Givetian","Frasnian","Famennian","Tournaisian","Visean","Serpukhovian","Bashkirian","Asselian","Sakmarian","Artinskian","Kungurian","Roadian","Wordian","Capitanian","Wuchiapingian","Changhsingian");
onset <- c(635.0,541.0,529.0,521.0,514.0,509.0,504.5,500.5,497.0,494.0,489.5,485.4,477.7,470.0,467.3,458.4,453.0,445.2,443.8,440.8,438.5,433.4,430.5,427.4,425.6,423.0,419.2,410.8,407.6,393.3,387.7,382.7,372.2,358.9,346.7,330.9,323.2,298.9,295.5,290.1,279.3,272.3,268.8,265.1,259.9,254.1);
end <- c(542.0,529.0,521.0,514.0,509.0,504.5,500.5,497.0,494.0,489.5,485.4,477.7,470.0,467.3,458.4,453.0,445.2,443.8,440.8,438.5,433.4,430.5,427.4,425.6,423.0,419.2,410.8,407.6,393.3,387.7,382.7,372.2,358.9,346.7,330.9,323.2,315.5,295.5,290.1,279.3,272.3,268.8,265.1,259.9,254.1,252.2);
color <- c("#FED96A","#99B575","#A6BA80","#A6C583","#B3CA8E","#B3D492","#BFD99D","#CCDFAA","#CCEBAE","#D9F0BB","#E6F5C9","#33A97E","#41B087","#66C092","#74C69C","#8CD094","#99D69F","#A6DBAB","#A6DCB5","#B3E1C2","#BFE6CF","#BFE6C3","#CCEBD1","#CCECDD","#D9F0DF","#E6F5E1","#E5B75A","#E5C468","#E5D075","#F1D576","#F1E185","#F2EDAD","#F2EDC5","#8CB06C","#A6B96C","#BFC26B","#99C2B5","#E36350","#E36F5C","#E37B68","#E38776","#FB8069","#FB8D76","#FB9A85","#FCB4A2","#FCC0B2");
return(data.frame(cbind(interval,onset,end,color)));
}
infer_interval_color <- function(onset,end) {
color_scale <- accersi_time_scale_color();
onsets <- as.numeric(as.character(color_scale$onset));
ends <- as.numeric(as.character(color_scale$end));
start_diff <- abs(abs(onsets)-abs(onset))
end_diff <- abs(abs(ends)-abs(end))
ttl_diff <- start_diff+end_diff
color <- color_scale$color[match(min(ttl_diff),ttl_diff)]
return(color)
}
infer_stage_color_given_age <- function(ma) {
get_colors <- accersi_time_scale_color()
after <- as.numeric(as.character(get_colors$onset))-abs(ma);
before <- abs(ma)-as.numeric(as.character(get_colors$end));
get_colors <- cbind(get_colors,after,before);
get_colors <- subset(get_colors,get_colors$after>=0);
get_colors <- subset(get_colors,get_colors$before>=0);
span <- get_colors$before+get_colors$after;
best_bet <- match(min(span),span)
#ma_color <- c(xx[best_bet,4],xx[best_bet,1])
ma_color <- get_colors$color[best_bet];
return(ma_color)
}
infer_stage_color_given_stage_onset_and_end <- function(age_range) {
print(age_range);
get_colors <- accersi_time_scale_color()
after <- as.numeric(as.character(get_colors$onset))-abs(age_range[1]);
before <- abs(age_range[2])-as.numeric(as.character(get_colors$end));
get_colors <- cbind(get_colors,after,before);
get_colors <- subset(get_colors,get_colors$after>=0);
get_colors <- subset(get_colors,get_colors$before>=0);
span <- get_colors$before+get_colors$after;
best_bet <- match(min(span),span)
#ma_color <- c(xx[best_bet,4],xx[best_bet,1])
ma_color <- get_colors$color[best_bet];
return(ma_color)
}
infer_stage_color_given_strat_unit <- function(strat_unit) {
get_colors <- accersi_time_scale_color()
su <- match(strat_unit,get_colors$interval)
return(get_colors[su,4])
}
#### Taxon Shapes ####
pentagon_symbol <- function(x,y,abc,ord,size) {
pent <- matrix(0,2,5)
an <- 18
for (r in 1:5) {
# pent[1,r] <- x+cos(pi*an/180)*(abc*size)
# pent[2,r] <- y+sin(pi*an/180)*(ord*size)
pent[1,r] <- cos(pi*an/180)
pent[2,r] <- sin(pi*an/180)
an <- an+72
}
# center and rescale
pent <- size*pent
pent[1,] <- abc*pent[1,]
pent[2,] <- ord*pent[2,]
pent[1,] <- x+pent[1,]
pent[2,] <- y+pent[2,]
return(pent)
}
bug_symbol <- function(x,y,abc,ord,size) {
bug <- matrix(0,2,6)
bug[1,1] <- x-(0.75*abc*size*(sqrt(2)/2))
bug[2,1] <- y+(0.75*ord*size*(sqrt(2)/2))
bug[1,2] <- x
bug[2,2] <- y+(ord*size)
bug[1,3] <- x+(0.75*abc*size)*(sqrt(2)/2)
bug[2,3] <- y+(0.75*ord*size*(sqrt(2)/2))
bug[1,4] <- x+(0.75*abc*size)*(sqrt(2)/2)
bug[2,4] <- y-(0.75*ord*size*(sqrt(2)/2))
bug[1,5] <- x
bug[2,5] <- y-(ord*size)
bug[1,6] <- x-(0.75*abc*size*(sqrt(2)/2))
bug[2,6] <- y-(0.75*ord*size*(sqrt(2)/2))
return(bug)
}
star_symbol <- function(x,y,abc,ord,size) {
# x: x-coordinate
# y: y-coordinate
# abc: relative size of X-axis
# ord: relative size of Y-axis
# size: size of shell
star <- matrix(0,2,10)
an <- 18
for (r in 1:10) {
if (r%%2==1) {
# star[1,r] <- x+cos(pi*an/180)*(abc*size)
# star[2,r] <- y+sin(pi*an/180)*(ord*size)
star[1,r] <- cos(pi*an/180)/2
star[2,r] <- sin(pi*an/180)/2
}
if (r%%2==0) {
# star[1,r] <- x+cos(pi*an/180)*(size*abc/2)
# star[2,r] <- y+sin(pi*an/180)*(size*ord/2)
star[1,r] <- cos(pi*an/180)/4
star[2,r] <- sin(pi*an/180)/4
}
an <- an+36
}
# rescale
star <- size*star
star[1,] <- abc*star[1,]
star[2,] <- ord*star[2,]
star[1,] <- x+star[1,]
star[2,] <- y+star[2,]
return(star)
}
flower_symbol <- function(x,y,abc,ord,size) {
flower <- matrix(0,2,12)
an <- 0
for (r in 1:12) {
if (r%%2==1) {
flower[1,r] <- cos(pi*an/180)/2
flower[2,r] <- sin(pi*an/180)/2
}
if (r%%2==0) {
flower[1,r] <- cos(pi*an/180)/4
flower[2,r] <- sin(pi*an/180)/4
}
an <- an+30
}
# rescale
flower <- size*flower
flower[1,] <- abc*flower[1,]
flower[2,] <- ord*flower[2,]
flower[1,] <- x+flower[1,]
flower[2,] <- y+flower[2,]
return(flower)
}
mollusc_symbol <- function(x,y,abc,ord,size,whorls,W) {
# x: x-coordinate
# y: y-coordinate
# abc: relative size of X-axis
# ord: relative size of Y-axis
# size: size of shell
# whorls: number of whorls to draw
# W: Raup's W.
stops <- (72*whorls)+1+(72*(whorls-1)+1)
snail <- matrix(0,2,stops)
#r <- (size*abc)*/(W^whorls)
r <- 1/(W^whorls)
#snail[1,1] <- x-r
snail[1,1] <- -r
#snail[2,1] <- y
snail[2,1] <- 0
ang <- 5
for (i in 2:((72*whorls)+1)) {
ri <- r*W^(5*(i-1)/360)
# snail[1,i] <- x+ri*cos(pi*((180-ang)/180))
# snail[2,i] <- y-(ord/abc)*ri*sin(pi*((180-ang)/180))
snail[1,i] <- ri*cos(pi*((180-ang)/180))
snail[2,i] <- -ri*sin(pi*((180-ang)/180))
ang <- ang+5
}
a <- 72*(whorls-1)+1
i <- (72*whorls)+2
for (i in ((72*whorls)+2):stops) {
snail[1,i] <- snail[1,a]
snail[2,i] <- snail[2,a]
a <- a-1
}
mn <- (max(snail[1,])-min(snail[1,])+max(snail[2,])-min(snail[2,]))/2
snail <- size*snail/mn
snail[1,] <- abc*snail[1,]
snail[2,] <- ord*snail[2,]
snail[1,] <- x+snail[1,]
snail[2,] <- y+snail[2,]
return (snail)
}
brachiopod_symbol <- function(x,y,abc,ord,pedicle_width=5,sulcus_width=10,sulcus_depth=0.1,shell_width=2,size) {
# have the angle fo the hinge start high and decrease
#seq(sin(pedicle_angle),0,by=-sin(pedicle_angle)/60)
#yy <- seq(sin(pedicle_angle),0,by=-sin(pedicle_angle)/60)/60
#for (i in 2:length(yy)) yy[i] <- yy[i]+yy[i-1]
#plot(xx,yy);
#pa <- pedicle_angle
## draw the "base" (hinge) of the shell
xx <- seq(0,shell_width/2,by=(shell_width/2)/59)
yy <- c()
pa <- 0.01
for (i in 1:pedicle_width) {
yy <- c(yy,sin(pa));
pa <- 1.3*pa;
# i <- i+1;
}
for (i in (pedicle_width+1):60) {
yy <- c(yy,sin(pa));
pa <- 0.95*pa;
# i <- i+1;
}
for (i in 2:length(yy)) yy[i] <- yy[i]+yy[i-1];
yy <- 0.2*yy
#plot(xx,yy,xlim=c(-1,1),ylim=c(-1,1),type="l");
radius <- ((xx[length(xx)]^2)+(yy[length(yy)])^2)^0.5;
base <- tan(yy[length(yy)]/xx[length(xx)]);
### draw the outer margin up to the sulcus
sul_wdth <- pi*sulcus_width/180
top <- (pi/2)-sul_wdth;
edges <- seq(base+((top-base)/108),top,(top-base)/108);
yyy <- radius*sin(edges);
xxx <- radius*cos(edges);
#lines(xxx,yyy);
if (sulcus_width > 0) {
### draw the sulcus
edges2 <- seq(0+(pi/20),pi/2,by=pi/20);
xxx <- c(xxx,0,xxx[length(xxx)])
yyy <- c(yyy,0,yyy[length(yyy)])
xxxx <- xxx[length(xxx)]*cos(edges2);
yyyy <- max(yy,yyy)-xxx[length(xxx)]*sin(edges2);
brachiopod_x <- c(xx,xxx,xxxx);
brachiopod_y <- c(yy,yyy,yyyy);
} else {
# if there is no sulcus, then we skip that part
brachiopod_x <- c(xx,xxx);
brachiopod_y <- c(yy,yyy);
}
# draw the left side of the shell
brachiopod_x <- c(brachiopod_x,-brachiopod_x[length(brachiopod_x):1]);
brachiopod_y <- c(brachiopod_y,brachiopod_y[length(brachiopod_y):1]);
#recenter and rescale
brachiopod_y <- brachiopod_y-mean(brachiopod_y);
brachiopod <- rbind(brachiopod_x,brachiopod_y);
brachiopod <- size*brachiopod;
brachiopod[1,] <- abc*brachiopod[1,];
brachiopod[2,] <- ord*brachiopod[2,];
brachiopod[1,] <- x+brachiopod[1,];
brachiopod[2,] <- y+brachiopod[2,];
return(brachiopod);
}
trilobite_symbol <- function(x,y,abc,ord,size,cph,tho,thow,pyg,pygwu,pygwb) {
# x, y: x & y coabcissa (center of symbol)
# abc, ord: length of x & y axes (to make it "square")
# cph: relative height of cephalon (head)
# tho: relative height of thorax
# thow: relative width of thorax
# pyg: relative height of pygidium (tail)
# pygwu: relative width of pygidium at top
# pygwb: relative width of pygidium at bottom
l <- cph+tho+pyg # total length
cph <- cph/l
tho <- tho/l
pyg <- pyg/l
hby <- (0.5-cph) #where body starts relative to y
bty <- (0.5-(cph+tho)) #where body ends relative to y
a <- 0.5
b <- cph
xxx <- vector(length=34)
yyy <- vector(length=34)
trilo <- matrix(0,2,62)
j <- 1
trilo[1,j+0] <- -(thow*a) #head/body connction
trilo[1,j+1] <- -(((1+thow)/2)*a) #head/body connction
trilo[2,j+3] <- trilo[2,j+1] <- trilo[2,j+0] <- hby
trilo[1,j+3] <- trilo[1,j+2] <- -a #tip of spine
trilo[2,j+2] <- -(hby-(bty/2))
ang <- 175
j <- 4
for (i in 1:35) {
# go in 5?? increments
# ddd[i] <- (0.25*(abs(ang-90)/90))
trilo[1,i+j] <- (a*cos(ang*pi/180))
xxx[i] <- a*cos(ang*pi/180)
yyy[i] <- sqrt(((b^2)-((b^2)*xxx[i]^2)/(a^2)))
trilo[2,i+j] <- trilo[2,1]+yyy[i]
ang <- ang-5
}
j <- j+35
trilo[1,j+3] <- (thow*a) #head/body connction
trilo[1,j+2] <- (((1+thow)/2)*a) #head/body connction
trilo[2,j] <- trilo[2,j+2] <- trilo[2,j+3] <- hby
trilo[1,j] <- trilo[1,j+1] <- a #tip of spine
trilo[2,j+1] <- -(hby-(bty/2))
j <- j+4
trilo[1,j] <- trilo[1,j+2] <- trilo[1,j+4] <- trilo[1,j+6] <- ((7/6)*thow*a)
trilo[1,j+1] <- trilo[1,j+3] <- trilo[1,j+5] <- trilo[1,j+7] <- (thow*a)
tip <- (hby-bty)/8
for (i in 1:8) {
trilo[2,j] <- hby-(i*tip)
j <- j+1
}
rev <- j-1
# do pygidium
trilo[1,j] <- pygwu*a
trilo[2,j] <- bty
j <- j+1
trilo[1,j] <- pygwb*a
trilo[2,j] <- -0.5
j <- j+1
trilo[1,j] <- -pygwb*a
trilo[2,j] <- -0.5
j <- j+1
trilo[1,j] <- -pygwu*a
trilo[2,j] <- bty
j <- j+1
# do rest of thorax
for (i in 1:8) {
k <- i-1
trilo[1,j] <- -trilo[1,(rev-k)]
trilo[2,j] <- trilo[2,(rev-k)]
j <- j+1
}
# center and rescale
trilo <- size*trilo
trilo[1,] <- abc*trilo[1,]
trilo[2,] <- ord*trilo[2,]
trilo[1,] <- x+trilo[1,]
trilo[2,] <- y+trilo[2,]
#polygon(trilo[1,],trilo[2,],col="orange",lwd=0.25)
return(trilo)
}
fish_symbol <- function(x,y,abc,ord,size,a,b) {
# a <- 0.5
# b <- 0.25
spin <- 25
rev <- 3+(2*spin)
xxx <- vector(length=52)
yyy <- vector(length=52)
#ddd <- vector(length=34)
fish <- matrix(0,2,rev)
ang <- 160
for (i in 1:spin) {
# go in 5?? increments
# ddd[i] <- (0.25*(abs(ang-90)/90))
fish[1,i] <- fish[1,rev-i] <- (a*cos(ang*pi/180))
xxx[i] <- a*cos(ang*pi/180)
yyy[i] <- sqrt(((b^2)-((b^2)*xxx[i]^2)/(a^2)))
fish[2,i] <- yyy[i]
fish[2,rev-i] <- yyy[i]*-1
ang <- ang-5
}
fish[1,i+1] <- fish[1,i+2] <- 0.5
fish[2,i+1] <- 1.5*max(fish[2,])
fish[2,i+2] <- fish[2,i+1]*-1
fish[1,rev] <- -0.30
fish[2,rev] <- 0
# center and rescale fish
scx <- max(fish[1,])-min(fish[1,])
fish <- fish/scx
scxx <- max(fish[1,])-0.5
fish[1,] <- fish[1,]-scxx
# set the size so that the area is about equal to a circle with r=a
fish <- (a/b)*size*fish
fish[1,] <- abc*fish[1,]
fish[2,] <- ord*fish[2,]
fish[1,] <- x+fish[1,]
fish[2,] <- y+fish[2,]
return (fish)
}
#### Spindle like it's the 70's ####
spindle_diagram <- function(bin_onsets,spindled_midpts,spindled_counts,bin_colors,plot_on_y=T,bar_legend=T,legend_width=1,legend_case="Unit") {
# bin_onsets: vector giving where (on X or Y axis)
# spindled_midpts: the position where the middle of each spindle segment is plotted
# spindled_counts: matrix giving the number of times an observation is made
# with the mean of that variable
# bin_colors: colors for separate spindle diagrams
# plot_on_y: if TRUE, then spindles go "up" Y-axis, with different ones plotted
# on X-axis
# bar_legend: if TRUE,then print a legend for width
# legend_width: the width being plotted
# legend_case: Unit name (e.g., "case" or "clade") that one example represents
bins <- dim(spindled_counts)[1];
bin_mids <- (bin_onsets[1:bins]+bin_onsets[2:(bins+1)])/2;
bin_widths <- abs(bin_onsets[1:bins]-bin_onsets[2:(bins+1)]);
bin_maxs <- vector(length=bins)
for (i in 1:bins) bin_maxs[i] <- max(spindled_counts[i,]);
width_of_one <- min(bin_widths/bin_maxs);
y_hts <- abs(spindled_midpts[1]-spindled_midpts[2])/2
#cbind(bin_maxs,bin_widths,bin_widths/bin_maxs)
#which(spindled_counts==mxwd,arr.ind=TRUE)
print(c(width_of_one,legend_width));
for (s in 1:bins) {
toplot <- spindled_midpts[spindled_counts[s,]>0]
tocount <- spindled_counts[s,spindled_counts[s,]>0]
for (i in 1:length(toplot)) {
if (plot_on_y) {
x1 <- as.numeric(bin_mids[s]-(width_of_one/2)*tocount[i])
x2 <- as.numeric(bin_mids[s]+(width_of_one/2)*tocount[i])
y1 <- as.numeric(toplot[i])-y_hts
y2 <- as.numeric(toplot[i])+y_hts
} else {
y1 <- as.numeric(bin_mids[s]-(width_of_one/2)*tocount[i])
y2 <- as.numeric(bin_mids[s]+(width_of_one/2)*tocount[i])
x1 <- as.numeric(toplot[i])-y_hts
x2 <- as.numeric(toplot[i])+y_hts
}
rect(x1,y1,x2,y2,col=bin_colors[s],lwd=0.5)
}
}
# do legend if requested
if (bar_legend) {
xleg <- max(bin_onsets)-(0.2*(max(bin_onsets)-min(bin_onsets)));
if (plot_on_y) {
x1 <- xleg-((width_of_one/2)*legend_width);
x2 <- xleg+((width_of_one/2)*legend_width);
x3 <- xleg+((width_of_one/2)*legend_width);
y1 <- max(spindled_midpts)+y_hts;
y2 <- max(spindled_midpts)-y_hts;
y3 <- (y1+y2)/2;
} else {
y1 <- xleg-((width_of_one/2)*legend_width)
y2 <- xleg+((width_of_one/2)*legend_width)
y3 <- xleg+((width_of_one/2)*legend_width)
x1 <- max(spindled_midpts)+y_hts
x2 <- max(spindled_midpts)-y_hts
x3 <- (x1+x2)/2
}
rect(x1,y1,x2,y2,col="gray50",lwd=0.5)
legend_text <- paste(":",legend_width,legend_case,sep=" ")
text(x3,y3,legend_text,cex=0.75,pos=4)
}
}
single_spindle <- function(axe=2,midpoint,spindle,max_width,min_axe,max_axe,spindle_color="white",spindle_lwdth=4/3,spindle_linecol="black") {
# axe: 1 for x-axis (going left-right), 2 for y-axis (going up-down)
# midpoint: midpoint on the other axis
# spindle: width of spindel at each point along axis (= height of
# histogram for the same data)
# max_width: maximum width on the other axis
# spindle_color: color of spindel bars
# spindle_lwdth: width of lines around spindel
# spindel_linecol: color of lines
spinds <- length(spindle)
axe_incr <- seq(min_axe,max_axe,by=(max_axe-min_axe)/spinds)
scaled_spindle <- max_width*spindle
for (sp in 1:spinds) {
if (scaled_spindle[sp]>0 && (axe==2 || axe==4)) {
rect(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],col=spindle_color,lwd=4/3,border=spindle_color)
} else if (scaled_spindle[sp]>0) {
rect(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,col=spindle_color,lwd=4/3,border=spindle_color)
}
}
if (axe==2 || axe==4) {
for (sp in 1:spinds) {
# draw bottom if there is no segment beneath it
if (scaled_spindle[sp]>0) {
if (sp==1 || (sp>1 && scaled_spindle[sp-1]==0))
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp],lwd=spindle_lwdth,col=spindle_linecol)
# draw sides
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
# draw top if necessary
if (sp==spinds || (sp<spinds && scaled_spindle[sp+1]==0)) {
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]>scaled_spindle[sp+1]) {
# dev <- (scaled_spindle[sp]-scaled_spindle[sp+1])/2
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]<scaled_spindle[sp+1]) {
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
}
}
}
# top <- 1+max(seq(1:spinds)*(scaled_spindle>0))
# segments(midpoint-scaled_spindle[top]/2,min_axe,midpoint+scaled_spindle[1]/2,min_axe,lwd=spindle_lwdth,col=spindle_linecol)
} else {
for (sp in 1:spinds) {
# draw bottom if there is no segment beneath it
if (scaled_spindle[sp]>0) {
if (sp==1 || (sp>1 && scaled_spindle[sp-1]==0))
segments(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
# draw sides
segments(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
# draw top if necessary
if (sp==spinds || (sp<spinds && scaled_spindle[sp+1]==0)) {
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]>scaled_spindle[sp+1]) {
# dev <- (scaled_spindle[sp]-scaled_spindle[sp+1])/2
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]<scaled_spindle[sp+1]) {
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
}
}
}
# top <- 1+max(seq(1:spinds)*(scaled_spindle>0))
# segments(midpoint-scaled_spindle[top]/2,min_axe,midpoint+scaled_spindle[1]/2,min_axe,lwd=spindle_lwdth,col=spindle_linecol)
}
}
single_spindle_set_increments <- function(axe=2,midpoint,axe_incr,spindle,max_width,spindle_color="white",spindle_lwdth=4/3,spindle_linecol="black") {
# axe: 1 for x-axis (going left-right), 2 for y-axis (going up-down)
# midpoint: midpoint on the other axis
# spindle: width of spindel at each point along axis (= height of
# histogram for the same data)
# max_width: maximum width on the other axis
# spindle_color: color of spindel bars
# spindle_lwdth: width of lines around spindel
# spindel_linecol: color of lines
spindle <- spindle/max(spindle)
spinds <- length(spindle)
scaled_spindle <- max_width*spindle
for (sp in 1:spinds) {
if (scaled_spindle[sp]>0 && (axe==2 || axe==4)) {
rect(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],col=spindle_color,lwd=4/3,border=spindle_color)
} else if (scaled_spindle[sp]>0) {
rect(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,col=spindle_color,lwd=4/3,border=spindle_color)
}
}
if (axe==2 || axe==4) {
for (sp in 1:spinds) {
# draw bottom if there is no segment beneath it
if (scaled_spindle[sp]>0) {
if (sp==1 || (sp>1 && scaled_spindle[sp-1]==0))
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp],lwd=spindle_lwdth,col=spindle_linecol)
# draw sides
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
# draw top if necessary
if (sp==spinds || (sp<spinds && scaled_spindle[sp+1]==0)) {
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]>scaled_spindle[sp+1]) {
# dev <- (scaled_spindle[sp]-scaled_spindle[sp+1])/2
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]<scaled_spindle[sp+1]) {
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
}
}
}
# top <- 1+max(seq(1:spinds)*(scaled_spindle>0))
# segments(midpoint-scaled_spindle[top]/2,min_axe,midpoint+scaled_spindle[1]/2,min_axe,lwd=spindle_lwdth,col=spindle_linecol)
} else {
for (sp in 1:spinds) {
# draw bottom if there is no segment beneath it
if (scaled_spindle[sp]>0) {
if (sp==1 || (sp>1 && scaled_spindle[sp-1]==0))
segments(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
# draw sides
segments(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
# draw top if necessary
if (sp==spinds || (sp<spinds && scaled_spindle[sp+1]==0)) {
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]>scaled_spindle[sp+1]) {
# dev <- (scaled_spindle[sp]-scaled_spindle[sp+1])/2
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]<scaled_spindle[sp+1]) {
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
}
}
}
# top <- 1+max(seq(1:spinds)*(scaled_spindle>0))
# segments(midpoint-scaled_spindle[top]/2,min_axe,midpoint+scaled_spindle[1]/2,min_axe,lwd=spindle_lwdth,col=spindle_linecol)
}
}
#### Stylized Axes ####
log_axes <- function(axe,min_ax,max_ax,increment,numbers,linewd=4/3,orient) {
axis(axe,at=seq(min_ax,max_ax,by=(max_ax-min_ax)),tcl=0,labels=NA,lwd.ticks=NA,lwd=linewd)
ticks <- length(numbers)
#if ((numbers[2]/numbers[1])>=increment) {
for (i in 1:ticks) {
l <- numbers[i]
axis(2,at=log(l),tcl=-.3,labels=numbers[i],lwd=0,lwd.ticks=linewd,las=orient)
}
for (i in 1:(ticks-1)) {
l <- numbers[i]+increment
axis(2,at=log(l),tcl=-.15,labels=NA,lwd=0,lwd.ticks=linewd)
}
}
#log10_axes(axe=2,min_ax=mny,max_ax=mxy,numbers=10^(mny:mxy),linewd=4/3,orient=2)
# produces y-axis with 10^-7 to 1.0 with increments from 2:9 & labels on 10^x for x=integer
log10_axes <- function(axe,min_ax,max_ax,numbers,linewd=4/3,font_size=1,orient=1) {
#log10_axes(axe=2,min_ax=mny,max_ax=mxy,numbers,linewd=1.5,orient=2)
# axe: 1 for x; 2 for y
# min_ax: minimum, already log10 transformed
# max_ax: maximum, already log10 transformed
# numbers: array of numbers
# linewd: line width
# orient: orientation of text, with 2 making y-axis the way I like it
axis(axe,at=seq(min_ax,max_ax,by=(max_ax-min_ax)),tcl=0,labels=NA,lwd.ticks=NA, lwd=linewd, las=orient);
mnn <- ceiling(min_ax);
mxn <- floor(max_ax)
for (i in mnn:mxn) {
# l <- numbers[i]
axis(axe,at=i,tcl=-.3,labels=FALSE,lwd=0,lwd.ticks=linewd)
}
for (i in 1:length(numbers)) {
l <- numbers[i]
axis(axe,at=log10(l),tcl=-.3,labels=numbers[i],lwd=0,lwd.ticks=0,las=orient,cex.axis=font_size)
}
if (ceiling(min_ax)>min_ax) {
strt <- match(min_ax,ceiling(min_ax)-(1-log10(1:9)))
if (is.na(strt)) {
strt <- 1+sum((ceiling(min_ax)-(1-log10(1:9)))<min_ax)
}
ticks <- (strt:10)*10^floor(min_ax)
} else {
ticks <- c()
}
if (ceiling(min_ax)<floor(max_ax)) {
for (i in ceiling(min_ax):(floor(max_ax)-1)) {
ticks <- c(ticks,(1:10)*10^i)
}
}
if (max_ax>floor(max_ax)) {
add <- max(ticks)
while (log10(max(ticks)+add)<=max_ax) ticks <- c(ticks,max(ticks)+add)
# end <- match(round(max_ax,4),round(floor(max_ax)+log10(1:9),4))
# ticks <- c(ticks,(1:end)*10^floor(max_ax))
}
ticks <- ticks[!ticks %in% 10^(mnn:mxn)]
#axis(axe,at=log10(0.2),tcl=-.15,labels=FALSE,lwd=0,lwd.ticks=linewd)
#axis(axe,at=log10(0.4),tcl=-.15,labels=FALSE,lwd=0,lwd.ticks=linewd)
axis(axe,at=log10(ticks),tcl=-.15,labels=FALSE,lwd=0,lwd.ticks=linewd)
}
log2_axes <- function(axe,min_ax,max_ax,numbers,linewd=4/3,orient) {
#log2_axes(axe=2,min_ax=mny,max_ax=mxy,numbers,linewd=1.5,orient=2)
# axe: 1 for x; 2 for y
# min_ax: minimum, already log2 transformed
# max_ax: maximum, already log2 transformed
# numbers: array of numbers
# linewd: line width
# orient: orientation of text, with 2 making y-axis the way I like it
axis(axe,at=seq(min_ax,max_ax,by=(max_ax-min_ax)),tcl=0,labels=NA,lwd.ticks=NA, lwd=linewd, las=orient)
ticks <- length(numbers)
for (i in 1:ticks) {
if (numbers[1]==0) {
l <- max(1,2*numbers[i])
} else l <- numbers[i]
axis(axe,at=log2(l),tcl=-.3,labels=numbers[i],lwd=0,lwd.ticks=linewd,las=orient)
# i <- i+1
}
}
set_axis_breaks <- function(max_no,min_no) {
if ((max_no-min_no)<=10) {
maj_break <- 1;
med_break <- 0.5;
min_break <- 0.5;
} else if ((max_no-min_no)<=20) {
maj_break <- 2;
med_break <- 1;
min_break <- 0.5;
} else if ((max_no-min_no)<=50) {
maj_break <- 5;
med_break <- 1;
min_break <- 1;
} else if ((max_no-min_no)<=100) {
maj_break <- 10;
med_break <- 5;
min_break <- 1;
} else if ((max_no-min_no)<=200) {
maj_break <- 20;
med_break <- 10;
min_break <- 5;
} else if ((max_no-min_no)<=500) {
maj_break <- 50;
med_break <- 10;
min_break <- 5;
} else if ((max_no-min_no)<=1000) {
maj_break <- 100;
med_break <- 50;
min_break <- 10;
} else if ((max_no-min_no)<=2000) {
maj_break <- 200;
med_break <- 100;
min_break <- 50;
} else if ((max_no-min_no)<=5000) {
maj_break <- 500;
med_break <- 100;
min_break <- 50;
} else {
maj_break <- 1000;
med_break <- 500;
min_break <- 100;
}
tick_tock <- data.frame(maj_break=as.numeric(maj_break),med_break=as.numeric(med_break),min_break=as.numeric(min_break),stringsAsFactors = F);
return(tick_tock);
}
set_axis_breaks_new <- function(max_no,min_no=0) {
fact10 <- floor(log10(max_no));
max_no <- max_no/(10^fact10)
if ((max_no-min_no)<=1) {
maj_break <- 0.10;
med_break <- 0.05;
min_break <- 0.01;
} else if ((max_no-min_no)<=2) {
maj_break <- 0.20;
med_break <- 0.10;
min_break <- 0.05;
} else if ((max_no-min_no)<5) {
maj_break <- 0.50;
med_break <- 0.10;
min_break <- 0.05;
} else if ((max_no-min_no)<=10) {
maj_break <- 1.0;
med_break <- 0.5;
min_break <- 0.1;
}
tick_tock <- (10^fact10)*data.frame(maj_break=as.numeric(maj_break),med_break=as.numeric(med_break),min_break=as.numeric(min_break),stringsAsFactors = F);
return(tick_tock);
}
wagner_set_axes <- function (ax_min,ax_max,y_add=0) {
if ((ax_max-ax_min)<=10) {
min_ax <- floor(ax_min)
max_ax <- ceiling(ax_max)
lbl_prn <- tcs <- c()
tcs <- c(1,0.5,0.1)
tcs <- rbind(tcs,seq(min_ax,max_ax,by=1))
lbl_prn <- rbind(lbl_prn,-1*(min_ax:max_ax))
} else if ((ax_max-ax_min)<=25) {
min_ax <- floor(ax_min)
max_ax <- ceiling(ax_max)
tcs <- seq(2*ceiling(min_ax/2),2*floor(max_ax/2),by=2)
lbl_prn <- -1*tcs
add <- length(tcs)-length((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
if (add==0) {
tcs <- rbind(tcs,added)
lbl_prn <- rbind(lbl_prn,rep("",length(added)))
} else if (add>0) {
added <- c(added,rep(0,add))
tcs <- rbind(tcs,added)
lbl_prn <- rbind(lbl_prn,rep("",length(added)))
} else {
tcs <- rbind(c(tcs,rep(0,abs(add))),added)
lbl_prn <- rbind(c(lbl_prn,rep("",abs(add)),rep("",length(added))))
}
tick_str <- c(-0.30,-0.15)
} else if ((ax_max-ax_min)<=50) {
min_ax <- floor(ax_min);
max_ax <- ceiling(ax_max);
tcs <- seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)
lbl_prn <- -1*tcs
add <- length(tcs)-length((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
if (add==0) {
tcs <- rbind(tcs,added)
lbl_prn <- rbind(lbl_prn,rep("",length(added)))
} else if (add>0) {
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs],rep(0,abs(add)))
tcs <- rbind(tcs,added);
lbl_prn <- rbind(c(lbl_prn,rep("",add)),rep("",length(added)))
} else {
tcs <- rbind(c(tcs,rep(0,abs(add))),added)
lbl_prn <- rbind(c(lbl_prn,rep("",abs(add))),rep("",length(added)))
}
tick_str <- c(-0.30,-0.15)
# tcs <- rbind(tcs,(min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
# lbl_prn <- c(TRUE,FALSE)
} else if ((ax_max-ax_min)<=200) {
min_ax <- floor(ax_min);
max_ax <- ceiling(ax_max);
tcs <- seq(10*ceiling(min_ax/10),10*floor(max_ax/10),by=10);
tcs <- rbind(tcs,seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)[!seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5) %in% seq(10*ceiling(min_ax/10),10*floor(max_ax/10),by=10)])
tcs <- rbind(tcs,(min_ax:max_ax)[!(min_ax:max_ax) %in% seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)])
tick_str <- c(-0.30,-0.20,-0.10)
lbl_prn <- c(TRUE,FALSE,FALSE)
} else {
}
output <- list(tcs,lbl_prn,tick_str)
names(output) <- c("Ticks","Labels","Tick_Strength")
return(output)
}
wagner_set_axes_old <- function (ax_min,ax_max,y_add=0) {
if ((ax_max-ax_min)<=10) {
min_ax <- floor(ax_min)
max_ax <- ceiling(ax_max)
lbl_prn <- tcs <- c()
tcs <- rbind(tcs,seq(min_ax,max_ax,by=1))
lbl_prn <- rbind(lbl_prn,-1*(min_ax:max_ax))
} else if ((ax_max-ax_min)<=25) {
min_ax <- floor(ax_min)
max_ax <- ceiling(ax_max)
tcs <- seq(2*ceiling(min_ax/2),2*floor(max_ax/2),by=2)
lbl_prn <- -1*tcs
add <- length(tcs)-length((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
if (add==0) {
tcs <- rbind(tcs,added)
lbl_prn <- rbin(lbl_prn,rep("",length(added)))
} else if (add>0) {
added <- c(added,rep(0,add))
tcs <- rbind(tcs,added)
lbl_prn <- rbind(lbl_prn,rep("",length(added)))
} else {
tcs <- rbind(c(tcs,rep(0,abs(add))),added)
lbl_prn <- rbind(c(lbl_prn,rep("",abs(add)),rep("",length(added))))
}
tick_str <- c(-0.30,-0.15)
} else if ((ax_max-ax_min)<=50) {
min_ax <- floor(ax_min)
max_ax <- ceiling(max(divergences))
tcs <- seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)
lbl_prn <- -1*tcs
add <- length(tcs)-length((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
if (add==0) {
tcs <- rbind(tcs,added)
lbl_prn <- rbin(lbl_prn,rep("",length(added)))
} else if (add>0) {
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs],rep(0,abs(add)))
tcs <- rbind(tcs,added)
lbl_prn <- rbind(c(lbl_prn,rep("",add)),rep("",length(added)))
} else {
tcs <- rbind(c(tcs,rep(0,abs(add))),added)
lbl_prn <- rbind(c(lbl_prn,rep("",abs(add))),rep("",length(added)))
}
tick_str <- c(-0.30,-0.15)
# tcs <- rbind(tcs,(min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
# lbl_prn <- c(TRUE,FALSE)
} else if ((ax_max-ax_min)<=200) {
min_ax <- floor(ax_min)
max_ax <- ceiling(max(divergences))
tcs <- seq(10*ceiling(min_ax/10),10*floor(max_ax/10),by=10);
tcs <- rbind(tcs,seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)[!seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5) %in% seq(10*ceiling(min_ax/10),10*floor(max_ax/10),by=10)])
tcs <- rbind(tcs,(min_ax:max_ax)[!(min_ax:max_ax) %in% seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)])
tick_str <- c(-0.30,-0.20,-0.10)
lbl_prn <- c(TRUE,FALSE,FALSE)
} else {
}
output <- list(tcs,lbl_prn,tick_str)
names(output) <- c("Ticks","Labels","Tick_Strength")
return(output)
}
fitted_linear_axis <- function(axe,max_val,min_val,linewd=4/3,orient=1,decimals=TRUE) {
if (max_val<=10) {
axis(axe,at=seq(min_val,max_val,by=max_val-min_val),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient)
if (decimals==TRUE) {
ticks <- old_ticks <- seq(min_val,max_val,by=1)
axis(axe,at=ticks,tcl=-0.30,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
bs <- 0.5
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.20,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
axis(axe,at=seq(min_val,max_val,by=bs),tcl=-0.0,labels=TRUE,lwd=0.0,lwd.ticks=0.0,las=orient)
bs <- 0.1
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.10,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
} else {
axis(axe,at=seq(min_val,max_val,by=1),tcl=-0.3,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
}
} else if (max_val <=20) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=2)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 1
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.15,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
} else if (max_val <=50) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=5)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 1
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.15,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
} else if (max_val <=100) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=10)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 5
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.20,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
old_ticks <- ticks
bs <- 1
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.10,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
} else if (max_val <=250) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=25)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 5
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.15,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
} else if (max_val <=500) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=50)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 25
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.20,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
bs <- 5
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.10,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
} else if (max_val <=1000) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=100)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 50
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.20,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
bs <- 10
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.10,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
}
}
specify_basic_plot <- function(mxx, mnx, mxy, mny, main="",subtitle="",abcissa="", ordinate="", xsize=3, ysize=3, cexaxis=1, cexlab=1, cexmain=1, cexsub=1) {
par(pin=c(xsize,ysize));
plot(NA,type='n',axes=FALSE,main=main,sub=subtitle,xlab=abcissa,ylab=ordinate,xlim=c(mnx,mxx),ylim=c(mny,mxy),cex.axis=cexaxis,cex.lab=cexlab,cex.main=cexmain,cex.sub=cexsub);
}
# routine to make axes as you want them.
# axe: axis # (1 = x; 2= y)
# max_val: maximum value
# min_val: minimum value
# maj_break: major (labelled) breaks
# med_break: intermediate breaks
# min_break: minor breaks. NOTE: if med_break or min_break=0, then just two breaks
# specified_axis(axe=2,max_val=mxy,min_val=mny,maj_break=100,med_break=50,min_break=10,linewd=4/3,orient=2) gives y-axis with 0:700 labeled,
# 2ndary ticks on 50 & tertiary ticks on 10
specified_axis <- function(axe,max_val,min_val,maj_break,med_break,min_break,linewd=4/3,font_size=1,orient=1,print_label=TRUE) {
#axis(axe,at=seq(min_val,max_val,by=max_val-min_val),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient)
if ((min_val/maj_break)-floor(min_val/maj_break)<(10^-10)) {
# this is a kluge necessitated by tiny rounding errors....
mnv1 <- min_val;
mxv1 <- max_val;
} else {
mnv1 <- (maj_break*ceiling(min_val/maj_break));
mxv1 <- med_break*ceiling(max_val/med_break);
}
strt <- min(min_val,mnv1);
endy <- max(max_val,mxv1);
axis(axe,at=seq(strt,endy,by=endy-strt),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient);
ticks <- old_ticks <- seq(mnv1,mxv1,by=maj_break);
axis(axe,at=ticks,tcl=-0.30,labels=print_label,lwd=0.0,lwd.ticks=linewd,las=orient,cex.axis=font_size);
if (med_break!=0) {
mnv2 <- (med_break*ceiling(min_val/med_break));
ticks <- seq(mnv2,mxv1,by=med_break)[!seq(mnv2,max_val,by=med_break) %in% old_ticks]
if (min_break!=0) {
tck_sz <- -0.20
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- sort(c(ticks,old_ticks))
}
if (min_break!=0) {
ticks <- seq(min(min_val,mnv1),max(mxv1,max_val),by=min_break);
ticks <- ticks[!ticks %in% old_ticks];
if (med_break!=0) {
tck_sz <- -0.10
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
}
}
specified_axis_w_labels <- function(axe,max_val,min_val,maj_break,med_break,min_break,axis_labels,linewd=4/3,label_pos="tick",font_size=1,orient=1,print_label=T) {
#axis(axe,at=seq(min_val,max_val,by=max_val-min_val),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient)
if ((min_val/maj_break)-floor(min_val/maj_break)<(10^-10)) {
# this is a kluge necessitated by tiny rounding errors....
mnv1 <- min_val;
mxv1 <- max_val;
} else {
mnv1 <- (maj_break*ceiling(min_val/maj_break));
mxv1 <- med_break*ceiling(max_val/med_break);
}
strt <- min(min_val,mnv1);
endy <- max(max_val,mxv1);
if (label_pos=="mid" && is.numeric(axis_labels[1])) {
label_span <- abs(axis_labels[2]-axis_labels[1]);
mxv1 <- endy <- min(endy,max(axis_labels)+(label_span-1));
}
axis(axe,at=seq(strt,endy,by=endy-strt),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient);
ticks <- old_ticks <- seq(mnv1,mxv1,by=maj_break);
axis(axe,at=ticks,tcl=-0.30,labels=F,lwd=0.0,lwd.ticks=linewd,las=orient);
axis_span <- max_val-min_val;
if (label_pos=="mid" && is.numeric(axis_labels[1])) {
labels_ticks <- axis_labels-0.5
pass_one <- (1:length(axis_labels))[(1:length(axis_labels)) %% 2==1];
pass_two <- (1:length(axis_labels))[(1:length(axis_labels)) %% 2==0];
# axis(axe,at=labels_ticks,tcl=-0.30,labels=axis_labels,lwd=0.0,lwd.ticks=0.0,las=orient,cex.axis=font_size);
axis(axe,at=labels_ticks[pass_one],tcl=-0.30,labels=axis_labels[pass_one],lwd=0.0,lwd.ticks=0.0,las=orient,cex.axis=font_size);
axis(axe,at=labels_ticks[pass_two],tcl=-0.30,labels=axis_labels[pass_two],lwd=0.0,lwd.ticks=0.0,las=orient,cex.axis=font_size);
} else {
labels_ticks <- seq(min_val,max_val,by=axis_span/(length(axis_labels)-1));
pass_one <- (1:length(axis_labels))[(1:length(axis_labels)) %% 2==1];
pass_two <- (1:length(axis_labels))[(1:length(axis_labels)) %% 2==0];
axis(axe,at=labels_ticks[pass_one],tcl=-0.30,labels=axis_labels[pass_one],lwd=0.0,lwd.ticks=linewd,las=orient,cex.axis=font_size);
axis(axe,at=labels_ticks[pass_two],tcl=-0.30,labels=axis_labels[pass_two],lwd=0.0,lwd.ticks=linewd,las=orient,cex.axis=font_size);
}
if (med_break!=0 && med_break!=maj_break) {
mnv2 <- (med_break*ceiling(min_val/med_break));
ticks <- seq(mnv2,mxv1,by=med_break)[!seq(mnv2,max_val,by=med_break) %in% old_ticks]
if (min_break!=0) {
tck_sz <- -0.20
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- sort(c(ticks,old_ticks))
}
if (min_break!=0 && !min_break %in% c(maj_break,med_break)) {
ticks <- seq(min(min_val,mnv1),max(mxv1,max_val),by=min_break);
ticks <- ticks[!ticks %in% old_ticks];
if (med_break!=0) {
tck_sz <- -0.10
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
}
}
specified_axis_w_labels_old <- function(axe,max_val,min_val,maj_break,med_break=0,min_break=0,axis_labels,axis_label_pts=NULL,axis_label_size=1,linewd=4/3,orient=1) {
axis(axe,at=seq(min_val,max_val,by=max_val-min_val),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient);
#if ((min_val/maj_break)-floor(min_val/maj_break)<(10^-10)) {
# this is a kluge necessitated by tiny rounding errors....
# mnv1 <- min_val;
# } else {
# mnv1 <- (maj_break*ceiling(min_val/maj_break));
# }
ticks <- old_ticks <- seq(min_val,max_val,by=maj_break);
axis(axe,at=ticks,tcl=-0.30,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient);
if (med_break!=0) {
mnv2 <- (med_break*ceiling(min_val/med_break))
ticks <- seq(mnv2,max_val,by=med_break)[!seq(mnv2,max_val,by=med_break) %in% old_ticks]
if (min_break!=0) {
tck_sz <- -0.20
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- sort(c(ticks,old_ticks))
}
#if (min_break!=0) {
# ticks <- seq(min_val,max_val,by=min_break)[!seq(min_val,max_val,by=min_break) %in% old_ticks]
# if (med_break!=0) {
# tck_sz <- -0.10
# } else tck_sz <- -0.15
# axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
# }
if (min_break!=0) {
ticks <- seq(min(min_val,mnv1),max_val,by=min_break);
ticks <- ticks[!ticks %in% old_ticks];
if (med_break!=0) {
tck_sz <- -0.10
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
}
#axis(axe,at=axis_labels,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=0.0,las=orient)
if(is.numeric(axis_labels[1]) && is.null(axis_label_pts)) {
axis_label_pts <- axis_labels;
} else {
axis_label_pts <- seq(min_val,max_val,by=maj_break);
}
axis(axe,at=axis_label_pts,tcl=-0.30,labels=axis_labels,lwd=0.0,lwd.ticks=0.0,las=orient,cex.axis=axis_label_size);
}
specified_right_y_axis <- function(mxy1,mny1,max_val,min_val,maj_break,med_break,min_break,linewd=4/3,orient=2,print_label=TRUE) {
# mxy1: maximum value on main (left) y-axis
# mny1: minimum value on main (left) y-axis; we rescale (max_val-min_val)/(mxy1-mny1)
axis(side=4,at=seq(mny1,mxy1,by=(mxy-mny)),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient)
if ((min_val/maj_break)-floor(min_val/maj_break)<(10^-10)) {
# this is a kluge necessitated by tiny rounding errors....
mnv1 <- min_val
} else {
mnv1 <- (maj_break*ceiling(min_val/maj_break))
}
labels <- seq(mnv1,max_val,by=maj_break);
ticks <- old_ticks <- mny1 + (mxy1-mny1)*labels/(max(labels)-min(labels))
#ticks <- old_ticks <- seq(mnv1,mxy1,by=(mxy1-mny1)/(max_val/maj_break))
if (print_label) {
axis(side=4,at=ticks,tcl=-0.30,labels=seq(mnv1,max_val,by=maj_break),lwd=0.0,lwd.ticks=linewd,las=orient)
} else {
axis(side=4,at=ticks,tcl=-0.30,labels=print_label,lwd=0.0,lwd.ticks=linewd,las=orient)
}
if (med_break!=0) {
ticks <- mny1 + (mxy1-mny1)*seq(min_val,max_val,by=med_break)/(max(labels)-min(labels))
mnv2 <- (med_break*ceiling(min_val/med_break))
ticks <- ticks[!ticks %in% old_ticks]
if (min_break!=0 && min_break!=med_break) {
tck_sz <- -0.20
} else tck_sz <- -0.15
axis(side=4,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- sort(c(ticks,old_ticks))
}
if (min_break!=0 && min_break!=med_break) {
ticks <- mny1 + (mxy1-mny1)*seq(min_val,max_val,by=min_break)/(max(labels)-min(labels))
# ticks <- seq(mny1,mxy1,by=(mxy1-mny1)/(max_val/min_break))[!seq(mny1,mxy1,by=(mxy1-mny1)/(max_val/min_break)) %in% old_ticks]
ticks <- ticks[!ticks %in% old_ticks]
if (med_break!=0) {
tck_sz <- -0.10
} else tck_sz <- -0.15
axis(side=4,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
}
}
#slice_coll
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
#### Phylogeny drawing routines ####
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# updated 2020-05-11
center_budding_phylogeny <- function(vector_tree,durations,sampled_ancestors) {
# function to get relative positions of lineages on a phylogeny;
venn_tree <- transform_vector_tree_to_venn_tree(vector_tree);
mtree <- transform_vector_tree_to_matrix_tree(vector_tree);
node_richness <- tally_node_richness_from_vector_tree(vector_tree = vector_tree);
nNode <- nrow(mtree);
notu <- length(vector_tree)-nNode;
node_ages <- c();
if (nrow(durations)==(notu+nNode)) {
branch_ages <- durations[,1];
} else {
for (nd in 1:nNode) node_ages <- c(node_ages,min(durations[venn_tree[nd,venn_tree[nd,]>0],1]));
branch_ages <- c(durations[1:notu,1],node_ages);
}
if (length(sampled_ancestors) < (notu+nNode))
sampled_ancestors <- c(rep(0,notu),sampled_ancestors);
ttl_richness <- c(rep(1,notu),node_richness);
##patristic_distances <- accersi_patristic_distance_from_base(atree=mtree);#max_nodes <- max(patristic_distances);
last_left <- "left"; # move up the axis
last_right <- "right"; # move down the axis
accounted <- c();
nd <- 0;
phy_pos <- rep(0,nNode+notu);
#for (nd in 1:nNode) {
while (nd < nNode) {
nd <- nd+1;
htu <- nd+notu; # htu number of node;
tf1 <- sum(mtree[nd,]>0);
if (sampled_ancestors[htu]!=0) {
tf1 <- tf1-1;
phy_pos[sampled_ancestors[htu]] <- phy_pos[notu+nd];
}
f1 <- mtree[nd,!mtree[nd,] %in% c(sampled_ancestors[htu],0)];
f1 <- f1[order(-ttl_richness[f1])];
if (length(f1)>2) {
right <- left <- 0;
prop_richness <- ttl_richness[f1]/sum(ttl_richness[f1]);
f1cc <- length(f1);
left <- f1[1];
sum_prop <- prop_richness[1];
while (sum_prop <= 0.45) {
sum_prop <- sum_prop+prop_richness[f1cc];
left <- c(left,f1[f1cc]);
f1cc <- f1cc-1;
}
right <- f1[!f1 %in% left];
right <- right[order(-abs(branch_ages[right]))];
left <- left[order(-abs(branch_ages[left]))];
# shift rest of the tree away from ancestral node
# phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]]-sum(ttl_richness[right]);
# phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+sum(ttl_richness[left]);
rr <- 1;
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]]-ttl_richness[right[rr]]
phy_pos[right[rr]] <- phy_pos[htu]-ttl_richness[right[rr]];
while (rr < length(right)) {
rr <- rr+1;
if (sum(phy_pos<phy_pos[htu])>0)
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]]-(2*ttl_richness[right[rr]]);
# phy_pos[right[rr]] <- (phy_pos[right[rr-1]]-ttl_richness[right[rr-1]])-ttl_richness[right[rr]];
phy_pos[right[rr]] <- phy_pos[htu]-ttl_richness[right[rr]];
# phy_pos[c(htu,right)];
}
ll <- 1;
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+ttl_richness[left[ll]]
# phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+sum(ttl_richness[left[ll]]);
phy_pos[left[ll]] <- phy_pos[htu] + ttl_richness[left[ll]];
while (ll < length(left)) {
ll <- ll+1;
if (sum(phy_pos>phy_pos[htu])>0)
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+(2*ttl_richness[left[ll]]);
# phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+2*ttl_richness[left[ll]]);
# phy_pos[left[ll]] <- (phy_pos[left[ll-1]]+ttl_richness[left[ll-1]])+ttl_richness[left[ll]];
phy_pos[left[ll]] <- phy_pos[htu]+ttl_richness[left[ll]];
}
} else if (length(f1)==2) {
f1 <- f1[order(-ttl_richness[f1])];
if (phy_pos[htu]<phy_pos[1+notu]) {
# going left is positive, so shift everything above this up by this amount
if (last_right=="right") {
right <- f1[2];
left <- f1[1];
last_right <- "left"
} else {
right <- f1[1];
left <- f1[2];
last_right <- "right"
}
} else {
# going right is negative, so shift everything below this down by this amount
if (last_left=="left") {
right <- f1[1];
left <- f1[2];
last_left <- "right";
} else {
right <- f1[2];
left <- f1[1];
last_left <- "left";
}
}
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[left];
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[right];
phy_pos[left] <- phy_pos[htu] + ttl_richness[left];
phy_pos[right] <- phy_pos[htu] - ttl_richness[right];
} else if (length(f1)==1) {
if (phy_pos[htu]<phy_pos[1+notu]) {
if (last_right=="right") {
# going left is positive, so shift everything above this up by this amount
# phy_y[phy_y>(phy_y[htu]+ttl_richness[f1])] <- phy_y[phy_y>phy_y[htu]] + ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] + ttl_richness[f1];
last_right <- "left";
} else {
# going right is negative, so shift everything below this down by this amount
# phy_y[phy_y<(phy_y[htu]-ttl_richness[f1])] <- phy_y[phy_y>ttl_richness[f1]] - ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] - ttl_richness[f1];
last_right <- "right";
}
} else {
if (last_left=="right") {
# phy_y[phy_y>(phy_y[htu]+ttl_richness[f1])] <- phy_y[phy_y>phy_y[htu]] + ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] + ttl_richness[f1];
last_left <- "left";
} else {
# going right is negative, so shift everything below this down by this amount
# phy_y[phy_y<(phy_y[htu]-ttl_richness[f1])] <- phy_y[phy_y>ttl_richness[f1]] - ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] - ttl_richness[f1];
last_left <- "right";
}
}
}
phy_pos <- phy_pos-phy_pos[notu+1]; # recenter around the base of the tree
# print(c(nd,phy_pos));
# now do species
accounted <- c(accounted,mtree[nd,mtree[nd,]>0]);
}
final_pos <- match(phy_pos,sort(unique(phy_pos)));
needed_edits <- hist(final_pos,breaks=0:max(final_pos),plot=F)$counts
too_many_here <- (1:length(needed_edits))[needed_edits>2];
while (length(too_many_here)>0) {
ttl_minions <- 1:(notu+nNode);
tmh <- 0;
while (tmh < length(too_many_here)) {
tmh <- tmh+1;
problems <- ttl_minions[final_pos==too_many_here[tmh]]; # taxa overlapping each other
these_nodes <- c();
for (pp in 1:length(problems))
these_nodes <- c(these_nodes,which(mtree==problems[pp],arr.ind = T)[1]); # get the nodes containing problem cases;
problem_ancestors <- problems[(notu+these_nodes) %in% problems]; # separate out sampled ancestors
problem_ancestors_htu <- notu+these_nodes[match(problem_ancestors,problems)]; # keep track of the htu to which they belong, however!
these_nodes <- these_nodes[!problems %in% problem_ancestors];
problems <- problems[!problems %in% problem_ancestors]; # remove sampled ancestors for now
starting_points <- final_pos[notu+these_nodes]; # positions of ancestral nodes/taxa
adjust2 <- adjust <- starting_points-too_many_here[tmh];
adjust2[adjust<0]<- -(length(adjust[adjust<0]):1);
adjust2[adjust>0]<- 1:length(adjust[adjust>0]);
final_pos[final_pos<too_many_here[tmh]] <- final_pos[final_pos<too_many_here[tmh]]+min(adjust2);
final_pos[final_pos>too_many_here[tmh]] <- final_pos[final_pos>too_many_here[tmh]]+max(adjust2);
phy_pos[phy_pos<phy_pos[problems[1]]] <- phy_pos[phy_pos<phy_pos[problems[1]]]+min(adjust2);
phy_pos[phy_pos>phy_pos[problems[1]]] <- phy_pos[phy_pos>phy_pos[problems[1]]]+max(adjust2);
final_pos[problems] <- final_pos[problems]+adjust;
phy_pos[problems] <- phy_pos[problems]+adjust2;
final_pos[problem_ancestors] <- final_pos[problem_ancestors_htu];
phy_pos[problem_ancestors] <- phy_pos[problem_ancestors_htu];
too_many_here <- too_many_here+max(adjust2);
}
final_pos <- match(phy_pos,sort(unique(phy_pos)));
needed_edits <- hist(final_pos,breaks=0:max(final_pos),plot=F)$counts
too_many_here <- (1:length(needed_edits))[needed_edits>2]
}
return(final_pos);
}
center_budding_phylogeny_effed <- function(vector_tree,durations) {
venn_tree <- transform_vector_tree_to_venn_tree(vector_tree);
mtree <- transform_vector_tree_to_matrix_tree(vector_tree);
node_richness <- tally_node_richness_from_vector_tree(vector_tree = vector_tree);
node_ages <- c();
nNodes <- nrow(venn_tree)
for (nd in 1:nNodes) node_ages <- c(node_ages,min(durations[venn_tree[nd,venn_tree[nd,]>0],1]));
branch_ages <- c(durations[,1],node_ages)
nNode <- nrow(mtree);
notu <- length(vector_tree)-nNode;
ttl_richness <- c(rep(1,notu),node_richness);
patristic_distances <- accersi_patristic_distance_from_base(atree=mtree);
max_nodes <- max(patristic_distances);
last_left <- "left"; # move up the axis
last_right <- "right"; # move down the axis
accounted <- c();
nd <- 0;
phy_pos <- rep(0,nNode+notu);
for (nd in 1:nNodes) {
# nd <- nd+1;
htu <- nd+notu; # htu number of node;
tf1 <- sum(mtree[nd,]>0);
if (sampled_ancestors[htu]!=0) {
tf1 <- tf1-1;
phy_pos[sampled_ancestors[htu]] <- phy_pos[notu+nd];
}
f1 <- mtree[nd,!mtree[nd,] %in% c(sampled_ancestors[htu],0)];
f1 <- f1[order(-ttl_richness[f1])];
if (length(f1)>2) {
right <- left <- 0;
prop_richness <- ttl_richness[f1]/sum(ttl_richness[f1]);
f1cc <- 1;
left <- f1[1];
sum_prop <- prop_richness[1];
while (sum_prop < 0.5) {
f1cc <- 1+f1cc;
sum_prop <- sum_prop+prop_richness[f1cc];
left <- c(left,f1);
}
right <- f1[!f1 %in% left];
right <- right[order(-branch_ages[right])];
left <- left[order(-branch_ages[left])];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]]-sum(ttl_richness[right]);
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+sum(ttl_richness[left]);
rr <- 1;
phy_pos[right[rr]] <- phy_pos[htu]-ttl_richness[right[rr]];
while (rr < length(right)) {
rr <- rr+1;
phy_pos[right[rr]] <- (phy_pos[right[rr-1]]-ttl_richness[right[rr-1]])-ttl_richness[right[rr]];
}
ll <- 1;
phy_pos[left[ll]] <- phy_pos[htu] + ttl_richness[left[ll]];
while (ll < length(left)) {
ll <- ll+1;
phy_pos[left[ll]] <- (phy_pos[left[ll-1]]+ttl_richness[left[ll-1]])+ttl_richness[left[ll]];
}
} else if (length(f1)==2) {
f1 <- f1[order(-ttl_richness[f1])];
if (phy_pos[htu]<phy_pos[1+notu]) {
# going left is positive, so shift everything above this up by this amount
if (last_right=="right") {
right <- f1[2];
left <- f1[1];
last_right <- "left"
} else {
right <- f1[1];
left <- f1[2];
last_right <- "right"
}
} else {
# going right is negative, so shift everything below this down by this amount
if (last_left=="left") {
right <- f1[1];
left <- f1[2];
last_left <- "right";
} else {
right <- f1[2];
left <- f1[1];
last_left <- "left";
}
}
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[left];
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[right];
phy_pos[left] <- phy_pos[htu] + ttl_richness[left];
phy_pos[right] <- phy_pos[htu] - ttl_richness[right];
} else if (length(f1)==1) {
if (phy_pos[htu]<phy_pos[1+notu]) {
if (last_right=="right") {
# going left is positive, so shift everything above this up by this amount
# phy_y[phy_y>(phy_y[htu]+ttl_richness[f1])] <- phy_y[phy_y>phy_y[htu]] + ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] + ttl_richness[f1];
last_right <- "left";
} else {
# going right is negative, so shift everything below this down by this amount
# phy_y[phy_y<(phy_y[htu]-ttl_richness[f1])] <- phy_y[phy_y>ttl_richness[f1]] - ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] - ttl_richness[f1];
last_right <- "right";
}
} else {
if (last_left=="right") {
# phy_y[phy_y>(phy_y[htu]+ttl_richness[f1])] <- phy_y[phy_y>phy_y[htu]] + ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] + ttl_richness[f1];
last_left <- "left";
} else {
# going right is negative, so shift everything below this down by this amount
# phy_y[phy_y<(phy_y[htu]-ttl_richness[f1])] <- phy_y[phy_y>ttl_richness[f1]] - ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] - ttl_richness[f1];
last_left <- "right";
}
}
}
phy_pos <- phy_pos-phy_pos[notu+1]; # recenter around the base of the tree
# print(c(nd,phy_pos));
# now do species
accounted <- c(accounted,mtree[nd,mtree[nd,]>0]);
}
final_pos <- match(phy_pos,sort(unique(phy_pos)));
needed_edits <- hist(final_pos,breaks=0:max(final_pos),plot=F)$counts
too_many_here <- (1:length(needed_edits))[needed_edits>2];
while (length(too_many_here)>0) {
ttl_minions <- 1:(notu+nNode);
tmh <- 0;
while (tmh < length(too_many_here)) {
tmh <- tmh+1;
problems <- ttl_minions[final_pos==too_many_here[tmh]]; # taxa overlapping each other
these_nodes <- c();
for (pp in 1:length(problems))
these_nodes <- c(these_nodes,which(mtree==problems[pp],arr.ind = T)[1]); # get the nodes containing problem cases;
problem_ancestors <- problems[(notu+these_nodes) %in% problems]; # separate out sampled ancestors
problem_ancestors_htu <- notu+these_nodes[match(problem_ancestors,problems)]; # keep track of the htu to which they belong, however!
these_nodes <- these_nodes[!problems %in% problem_ancestors];
problems <- problems[!problems %in% problem_ancestors]; # remove sampled ancestors for now
starting_points <- final_pos[notu+these_nodes]; # positions of ancestral nodes/taxa
adjust2 <- adjust <- starting_points-too_many_here[tmh];
adjust2[adjust<0]<- -(length(adjust[adjust<0]):1);
adjust2[adjust>0]<- 1:length(adjust[adjust>0]);
final_pos[final_pos<too_many_here[tmh]] <- final_pos[final_pos<too_many_here[tmh]]+min(adjust2);
final_pos[final_pos>too_many_here[tmh]] <- final_pos[final_pos>too_many_here[tmh]]+max(adjust2);
phy_pos[phy_pos<phy_pos[problems[1]]] <- phy_pos[phy_pos<phy_pos[problems[1]]]+min(adjust2);
phy_pos[phy_pos>phy_pos[problems[1]]] <- phy_pos[phy_pos>phy_pos[problems[1]]]+max(adjust2);
final_pos[problems] <- final_pos[problems]+adjust;
phy_pos[problems] <- phy_pos[problems]+adjust2;
final_pos[problem_ancestors] <- final_pos[problem_ancestors_htu];
phy_pos[problem_ancestors] <- phy_pos[problem_ancestors_htu];
too_many_here <- too_many_here+max(adjust2);
}
final_pos <- match(phy_pos,sort(unique(phy_pos)));
needed_edits <- hist(final_pos,breaks=0:max(final_pos),plot=F)$counts
too_many_here <- (1:length(needed_edits))[needed_edits>2]
}
return(final_pos);
}
# otu_cols <- rep("blue",notu)
# vtree <- vector tree in which each number gives the node from which a species evolved
# strat_ranges <- first and last appearance times of taxa
# durations <- originations and extinctions of taxa
# apos <- rep(1,length(divergence_times_1))
draw_calibrated_phylogeny_vertical <- function(vtree,strat_ranges,durations,apos,oldest=NULL,youngest=NULL,taxon_labels="",otu_cols,lazarus_col="gray50",branching_col="black") {
# draws phylogeny onto an already configured plot
notu <- nrow(strat_ranges)
svtree <- cbind(rank(vtree),vtree)
otu_order <- order(vtree[1:notu])
sampled_ancestors <- accersi_poss_ancestors_for_nodes(vtree,FA=strat_ranges[,1],apos)
if (is.null(oldest)) {
oldest <- min(durations)
}
phy_x <- vector(length=length(vtree))
if ((taxon_labels[1]!="numbers" && taxon_labels[1]!="Numbers") && taxon_labels!="") {
y_adj <- (mxy-mny)/50
x_adj <- -(notu+1)/37.5
} else {
y_adj <- x_adj <- 0;
}
for (n in 1:notu) {
phy_x[n] <- match(n,otu_order)
if (!is.na(match(n,sampled_ancestors))) {
segments(phy_x[n],durations[n,2],phy_x[n],strat_ranges[n,1],col=branching_col,lwd=4)
segments(phy_x[n],durations[n,2],phy_x[n],strat_ranges[n,1],col=otu_cols[n],lwd=2)
}
segments(phy_x[n],durations[n,1],phy_x[n],durations[n,2],col=lazarus_col,lwd=4)
if (strat_ranges[n,1]!=strat_ranges[n,2]) {
rect((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),min(youngest,strat_ranges[n,2]),col=otu_cols[n])
} else {
segments((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),strat_ranges[n,1],lwd=4)
segments((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),strat_ranges[n,1],lwd=2,col=otu_cols[n])
}
if (taxon_labels[1]=="numbers" || taxon_labels[1]=="Numbers") {
text(phy_x[n],strat_ranges[n,2],n,pos=3)
} else if (!is.na(taxon_labels[1])) {
text(phy_x[n]+x_adj,strat_ranges[n,2]+y_adj,taxon_labels[n],srt=90,pos=4)
}
}
Nnode <- max(vtree) - notu
mtree <- transform_vector_tree_to_matrix_tree(vtree)
for (nn in Nnode:1) {
n <- notu+nn
if (sampled_ancestors[n]==0) {
phy_x[n] <- mean(phy_x[mtree[nn,]])
segments(min(phy_x[mtree[nn,]]),durations[n,2],max(phy_x[mtree[nn,]]),durations[n,2],lwd=1)
segments(phy_x[n],durations[n,1],phy_x[n],durations[n,2],col=lazarus_col,lwd=4)
} else {
phy_x[n] <- phy_x[sampled_ancestors[n]]
f1 <- mtree[nn,mtree[nn,]!=sampled_ancestors[nn]]
for (f in 1:length(f1)) {
segments(phy_x[f1[f]],durations[f1[f],1],phy_x[sampled_ancestors[n]],durations[f1[f],1],lwd=1)
}
segments(phy_x[n],durations[n,2],phy_x[n],durations[n,1],lwd=4,col=lazarus_col)
}
# nn <- nn-1
}
}
#draw_calibrated_phylogeny_horizontal <- function(vtree,finds,durations,apomorphies,oldest=NA,youngest=NA,taxon_labels=NA,otu_cols,lazarus_col="gray50",branching_col="black",plot_stratigraphy="ranges",new_plot=F,xsize=4,ysize=6) {
draw_calibrated_phylogeny_horizontal <- function(vtree,finds,durations,apomorphies,taxon_labels=NA,otu_cols,lazarus_col="gray50",branching_col="black",plot_stratigraphy="ranges",new_plot=F,xsize=4,ysize=6) {
# draws phylogeny onto an already configured plot
notu <- match(-1,vtree)-1;
durations <- -abs(durations);
finds <- -abs(finds);
if (plot_stratigraphy=="ranges") {
strat_ranges <- data.frame(FAD=as.numeric(rep(0,notu)),LAD=as.numeric(rep(0,notu)),stringsAsFactors = F);
for (n in 1:notu) {
if (sum(finds[n,]!=0)>0) {
strat_ranges$FAD[n] <- min(finds[n,]);
strat_ranges$LAD[n] <- max(finds[n,finds[n,]!=0]);
}
}
}
if (new_plot) {
mxx <- -abs(0.5*ceiling(max(durations)/0.5));
mnx <- -abs(0.5*ceiling(min(durations)/0.5));
par(pin=c(ysize,xsize));
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="",xlim=c(mnx,mxx),ylim=c(1,notu))
}
phy_y <- center_budding_phylogeny(vtree,durations,sampled_ancestors)
#for (n in 1:notu) {
# phy_y[n] <- match(n,atu_order);
# if (!is.na(match(n,sampled_ancestors))) {
# segments(durations[n,2],phy_y[n],strat_ranges[n,1],phy_y[n],col=branching_col,lwd=3)
# segments(durations[n,2],phy_y[n],strat_ranges[n,1],phy_y[n],col=otu_cols[n],lwd=1.5)
# }
adj_y <- (mxx-mnx)*0.005;
nNode <- max(vtree) - notu;
for (nn in nNode:1) {
n <- notu+nn;
f1 <- mtree[nn,!mtree[nn,] %in% c(0,sampled_ancestors[n])];
for (f in 1:length(f1))
segments(durations[f1[f],1]-adj_y,phy_y[f1[f]],durations[f1[f],1]-adj_y,phy_y[n],lwd=1);
nn <- nn-1;
}
n <- 0;
while (n < notu) {
n <- n+1;
segments(durations[n,1],phy_y[n],durations[n,2],phy_y[n],col=lazarus_col,lwd=3);
if (plot_stratigraphy=="ranges") {
if (strat_ranges[n,1]!=strat_ranges[n,2]) {
rect(strat_ranges[n,1],(phy_y[n]-0.25),min(max_no,strat_ranges[n,2]),(phy_y[n]+0.25),col=otu_cols[n])
} else if (strat_ranges[n,1]!=0) {
# segments(strat_ranges[n,1],(phy_y[n]-0.25),strat_ranges[n,1],(phy_y[n]+0.25),lwd=4)
# segments(strat_ranges[n,1],(phy_y[n]-0.25),strat_ranges[n,1],(phy_y[n]+0.25),lwd=2,col=otu_cols[n])
points(strat_ranges[n,1],phy_y[n],pch=22,cex=1,bg=otu_cols[n]);
}
} else if (plot_stratigraphy=="points") {
these_finds <- finds[n,finds[n,]!=0];
tf <- 0;
while (tf < length(these_finds)) {
tf <- 1+tf;
points(these_finds[tf],phy_y[n],pch=21,cex=1,bg=otu_cols[n])
}
}
if (!is.na(taxon_labels)) {
if (taxon_labels[1]=="numbers" || taxon_labels[1]=="Numbers") {
text(phy_y[n],durations[n,2],n,pos=3)
} else if (!is.na(taxon_labels[1])) {
text(phy_y[n]+x_adj,durations[n,2]+y_adj,taxon_labels[n],srt=90,pos=4)
}
}
}
return(atu_order);
}
draw_calibrated_phylogeny_flex <- function(vtree,finds,durations,apos,orientation="vertical",oldest=NA,youngest=NA,taxon_labels=NA,otu_cols,lazarus_col="gray50",branching_col="black",plot_stratigraphy="ranges",new_plot=F,xsize=4,ysize=6) {
# draws phylogeny onto an already configured plot
notu <- match(-1,vtree)-1;
durations <- -abs(durations);
finds <- -abs(finds);
if (plot_stratigraphy=="ranges") {
strat_ranges <- data.frame(FAD=as.numeric(rep(0,notu)),LAD=as.numeric(rep(0,notu)),stringsAsFactors = F);
for (n in 1:notu) {
if (sum(finds[n,]!=0)>0) {
strat_ranges$FAD[n] <- min(finds[n,]);
strat_ranges$LAD[n] <- max(finds[n,finds[n,]!=0]);
}
}
}
phy_z <- center_budding_phylogeny(vtree,durations,sampled_ancestors);
if (new_plot) {
mxz <- -abs(0.5*ceiling(max(durations)/0.5));
mnz <- -abs(0.5*ceiling(min(durations)/0.5));
if (orientation=="vertical") {
par(pin=c(min(ysize,xsize),max(ysize,xsize)));
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="",ylim=c(mnz,mxz),xlim=c(1,max(phy_z)));
plot(NA,type='n',axes=T,main="",xlab="",ylab="",ylim=c(mnz,mxz),xlim=c(1,max(phy_z)));
} else {
par(pin=c(max(ysize,xsize),min(ysize,xsize)));
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="",xlim=c(mnz,mxz),ylim=c(1,max(phy_z)));
plot(NA,type='n',axes=T,main="",xlab="",ylab="",xlim=c(mnz,mxz),ylim=c(1,max(phy_z)));
}
}
adj_z <- (mxz-mnz)*0.0025;
nNode <- max(vtree) - notu;
for (nn in nNode:1) {
n <- notu+nn;
f1 <- mtree[nn,!mtree[nn,] %in% c(0,sampled_ancestors[n])];
for (f in 1:length(f1)) {
if (orientation=="horizontal") {
segments(durations[f1[f],1]-adj_z,phy_z[f1[f]],durations[f1[f],1]-adj_z,phy_z[n],lwd=1);
} else {
segments(phy_z[f1[f]],durations[f1[f],1]-adj_z,phy_z[n],durations[f1[f],1]-adj_z,lwd=1);
}
}
nn <- nn-1;
}
n <- 0;
while (n < notu) {
n <- n+1;
if (orientation=="horizontal") {
segments(durations[n,1],phy_z[n],durations[n,2],phy_z[n],col=lazarus_col,lwd=3);
} else {
segments(phy_z[n],durations[n,1],phy_z[n],durations[n,2],col=lazarus_col,lwd=3);
}
if (plot_stratigraphy=="ranges") {
if (strat_ranges[n,1]!=strat_ranges[n,2]) {
rect(strat_ranges[n,1],(phy_z[n]-0.25),min(youngest,strat_ranges[n,2]),(phy_z[n]+0.25),col=otu_cols[n])
} else if (strat_ranges[n,1]!=0) {
# segments(strat_ranges[n,1],(phy_y[n]-0.25),strat_ranges[n,1],(phy_y[n]+0.25),lwd=4)
# segments(strat_ranges[n,1],(phy_y[n]-0.25),strat_ranges[n,1],(phy_y[n]+0.25),lwd=2,col=otu_cols[n])
points(strat_ranges[n,1],phy_z[n],pch=22,cex=1,bg=otu_cols[n]);
}
} else if (plot_stratigraphy=="points") {
these_finds <- finds[n,finds[n,]!=0];
tf <- 0;
while (tf < length(these_finds)) {
tf <- 1+tf;
points(these_finds[tf],phy_z[n],pch=21,cex=1,bg=otu_cols[n])
}
}
if (!is.na(taxon_labels)) {
if (taxon_labels[1]=="numbers" || taxon_labels[1]=="Numbers") {
text(phy_z[n],durations[n,2],n,pos=3)
} else if (!is.na(taxon_labels[1])) {
text(phy_z[n]+x_adj,durations[n,2]+y_adj,taxon_labels[n],srt=90,pos=4)
}
}
}
# if (sampled_ancestors[n]==0) {
## phy_y[n] <- mean(phy_y[mtree[nn,]])
# segments(durations[n,2],min(phy_y[mtree[nn,]]),durations[n,2],max(phy_y[mtree[nn,]]),lwd=1)
# segments(durations[n,1],phy_y[n],durations[n,2],phy_y[n],col=lazarus_col,lwd=4)
# } else {
# phy_y[n] <- phy_y[sampled_ancestors[n]];
# segments(durations[n,2],phy_y[n],durations[n,1],phy_y[n],lwd=4,col=lazarus_col)
return(atu_order);
}
# modified 2020-05-11
draw_calibrated_phylogeny <- function(vector_tree,finds,durations,phylo_axis,apomorphies,orientation="vertical",taxon_labels="",otu_cols,lazarus_col="gray50",plot_stratigraphy="no",branching_col="black",lineage_lwd=4,branching_lwd=2,new_plot=F,height=4,width=6,taxon_cex=0.5) {
# working as of 2019-07-10
# draws phylogeny onto an already configured plot or makes a new one
# ctree: vector giving the node (htu) number to which each taxon or node is attached; -1 signifies the base of the tree.
# plot_stratigraphy: default is "n"; if "range" or "ranges", it plots those
notu <- match(-1,vector_tree)-1;
durations <- -abs(durations);
## add routine to add nodal ranges if they are not present
finds <- -abs(finds);
if (plot_stratigraphy=="ranges" || plot_stratigraphy=="range") {
strat_ranges <- data.frame(FAD=as.numeric(rep(0,notu)),LAD=as.numeric(rep(0,notu)),stringsAsFactors = F);
for (n in 1:notu) {
if (sum(finds[n,]!=0)>0) {
strat_ranges$FAD[n] <- min(finds[n,finds[n,]!=0]);
strat_ranges$LAD[n] <- max(finds[n,finds[n,]!=0]);
}
}
}
# get ancestral species that obviate "ghost taxon" nodes
sampled_ancestors <- accersi_poss_ancestors_for_nodes(vtree=vector_tree,FA=durations[,1],apos=apomorphies);
if (length(otu_cols)==1)
otu_cols <- rep(otu_cols,notu);
if (new_plot) {
mxz <- -abs(0.5*ceiling(min(abs(durations))/0.5));
mnz <- -abs(0.5*ceiling(max(abs(durations))/0.5));
if (abs(mxz-mnz) <= 25) {
maj_break <- 1;
} else if (abs(mxz-mnz) <= 50) {
maj_break <- 5;
} else if (abs(mxz-mnz) <= 200) {
maj_break <- 10;
} else {
maj_break <- 25;
}
med_break <- maj_break/2;
min_break <- maj_break/10;
if (orientation=="vertical") {
par(pin=c(width,height));
if (taxon_labels[1]=="") {
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="Time",ylim=c(mnz,mxz),xlim=c(1,max(phylo_axis)));
} else {
adj_y <- mxz + (mxz - mnz)/10;
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="Time",ylim=c(mnz,adj_y),xlim=c(1,max(phylo_axis)));
}
specified_axis(axe=2,max_val=mxz,min_val=mnz,maj_break,med_break,min_break,linewd=4/3,orient=2,print_label=TRUE);
} else {
par(pin=c(height,width));
plot(NA,type='n',axes=FALSE,main="",xlab="Time",ylab="",xlim=c(mnz,mxz),ylim=c(1,max(phylo_axis)));
specified_axis(axe=1,max_val=mxz,min_val=mnz,maj_break,med_break,min_break,linewd=4/3,orient=1,print_label=TRUE);
}
}
#if (nrow(durations)==notu)
# durations <- rbind(durations,durations[sampled_ancestors[(notu+1):length(ctree)],])
adj_z <- (mxz-mnz)*0.0025;
nNode <- max(vector_tree) - notu;
mtree <- transform_vector_tree_to_matrix_tree(vector_tree);
nn <- nNode+1;
gotcha <- vector(length=length(vector_tree));
#for (nn in nNode:1) {
while (nn > 1) {
# draw lines from ancestor to descendants
nn <- nn-1;
n <- notu+nn;
fo <- mtree[nn,!mtree[nn,] %in% 0];
f1 <- mtree[nn,!mtree[nn,] %in% c(0,sampled_ancestors[n])];
if (sampled_ancestors[n]!=0) {
durations$LAD[n] <- durations$LAD[sampled_ancestors[n]];
}
durations$LAD[n] <- max(c(durations$LAD[n],durations$FAD[f1]));
# durations$LAD[sampled_ancestors[n]] <- durations$LAD[n];
if (orientation=="horizontal") {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=lineage_lwd,col=branching_col);
if (sampled_ancestors[n]==0) {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=0.75*lineage_lwd,col=lazarus_col);
gotcha[n] <- 1;
}
} else {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=lineage_lwd,col=branching_col);
if (sampled_ancestors[n]==0) {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=0.75*lineage_lwd,col=lazarus_col);
gotcha[n] <- 1;
}
}
for (f in 1:length(f1)) {
if (orientation=="horizontal") {
segments(durations[f1[f],1]-adj_z,phylo_axis[f1[f]],durations[f1[f],1]-adj_z,phylo_axis[n],lwd=branching_lwd,col=branching_col);
if (gotcha[f1[f]]==0)
segments(durations[f1[f],1],phylo_axis[f1[f]],durations[f1[f],2],phylo_axis[f1[f]],lwd=lineage_lwd,col=branching_col);
} else {
segments(phylo_axis[f1[f]],durations[f1[f],1]-adj_z,phylo_axis[n],durations[f1[f],1]-adj_z,lwd=branching_lwd,col=branching_col);
if (gotcha[f1[f]]==0)
segments(phylo_axis[f1[f]],durations[f1[f],1],phylo_axis[f1[f]],durations[f1[f],2],lwd=lineage_lwd,col=branching_col);
}
gotcha[f1[f]] <- 1;
}
# nn <- nn-1;
}
n <- 1;
for (n in 1:notu) {
if (orientation=="horizontal") {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=0.75*lineage_lwd,col=otu_cols[n]);
if (taxon_labels[1]!="") {
text(durations[n,2],phylo_axis[n],taxon_labels[n],pos=3);
}
} else {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=0.75*lineage_lwd,col=otu_cols[n]);
if (taxon_labels[1]!="") {
text(phylo_axis[n],durations[n,2],taxon_labels[n],pos=3,cex=taxon_cex);
}
}
# n <- 1+n;
}
}
draw_calibrated_phylogeny_new <- function(vtree,finds,durations,phylo_axis,apomorphies,orientation="vertical",taxon_labels="",otu_cols,lazarus_col="gray50",plot_stratigraphy=F,branching_col="black",lineage_lwd=4,branching_lwd=2,new_plot=F,height=4,width=6,max_age=NULL) {
# working as of 2019-07-10
# draws phylogeny onto an already configured plot or makes a new one
# vtree: vector giving the node (htu) number to which each taxon or node is attached; -1 signifies the base of the tree.
# plot_stratigraphy: default is "n"; if "range" or "ranges", it plots those
notu <- match(-1,vtree)-1;
if (sum(durations$onset)>0)
finds <- -abs(finds);
#if (plot_stratigraphy=="ranges" || plot_stratigraphy=="range") {
if (plot_stratigraphy) {
strat_ranges <- data.frame(FAD=as.numeric(rep(0,notu)),LAD=as.numeric(rep(0,notu)),stringsAsFactors = F);
for (n in 1:notu) {
if (sum(finds[n,]!=0)>0) {
strat_ranges$FAD[n] <- min(finds[n,finds[n,]!=0]);
strat_ranges$LAD[n] <- max(finds[n,finds[n,]!=0]);
}
}
}
# get ancestral species that obviate "ghost taxon" nodes
sampled_ancestors <- accersi_poss_ancestors_for_nodes(vtree=vtree,FA=strat_ranges$FA,apos=apomorphies);
if (length(otu_cols)==1)
otu_cols <- rep(otu_cols,notu);
if (new_plot) {
mxz <- -abs(0.5*ceiling(min(abs(durations))/0.5));
if (is.null(max_age)) {
mnz <- -abs(0.5*ceiling(max(abs(durations))/0.5));
} else {
mnz <- -abs(0.5*ceiling(max_age)/0.5);
}
if (abs(mxz-mnz) <= 25) {
maj_break <- 1;
} else if (abs(mxz-mnz) <= 50) {
maj_break <- 5;
} else if (abs(mxz-mnz) <= 200) {
maj_break <- 10;
} else {
maj_break <- 25;
}
med_break <- maj_break/2;
min_break <- maj_break/10;
if (orientation=="vertical") {
par(pin=c(width,height));
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="Time",ylim=c(mnz,mxz),xlim=c(1,max(phylo_axis)));
specified_axis(axe=2,max_val=mxz,min_val=mnz,maj_break,med_break,min_break,linewd=4/3,orient=2,print_label=TRUE);
} else {
par(pin=c(height,width));
plot(NA,type='n',axes=FALSE,main="",xlab="Time",ylab="",xlim=c(mnz,mxz),ylim=c(1,max(phylo_axis)));
specified_axis(axe=1,max_val=mxz,min_val=mnz,maj_break,med_break,min_break,linewd=4/3,orient=1,print_label=TRUE);
}
}
#if (nrow(durations)==notu)
# durations <- rbind(durations,durations[sampled_ancestors[(notu+1):length(vtree)],])
adj_z <- (mxz-mnz)*0.0025;
nNode <- max(vtree) - notu;
mtree <- transform_vector_tree_to_matrix_tree(vtree);
nn <- nNode;
gotcha <- vector(length=length(vtree));
for (nn in nNode:1) {
# draw lines from ancestor to descendants
n <- notu+nn;
if (orientation=="horizontal") {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=lineage_lwd,col=branching_col);
if (sampled_ancestors[n]==0) {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=0.75*lineage_lwd,col=lazarus_col);
gotcha[n] <- 1;
}
} else {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=lineage_lwd,col=branching_col);
if (sampled_ancestors[n]==0) {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=0.75*lineage_lwd,col=lazarus_col);
gotcha[n] <- 1;
}
}
f1 <- mtree[nn,!mtree[nn,] %in% c(0,sampled_ancestors[n])];
for (f in 1:length(f1)) {
if (orientation=="horizontal") {
segments(durations[f1[f],1]-adj_z,phylo_axis[f1[f]],durations[f1[f],1]-adj_z,phylo_axis[n],lwd=branching_lwd,col=branching_col);
if (gotcha[f1[f]]==0)
segments(durations[f1[f],1],phylo_axis[f1[f]],durations[f1[f],2],phylo_axis[f1[f]],lwd=lineage_lwd,col=branching_col);
} else {
segments(phylo_axis[f1[f]],durations[f1[f],1]-adj_z,phylo_axis[n],durations[f1[f],1]-adj_z,lwd=branching_lwd,col=branching_col);
if (gotcha[f1[f]]==0)
segments(phylo_axis[f1[f]],durations[f1[f],1],phylo_axis[f1[f]],durations[f1[f],2],lwd=lineage_lwd,col=branching_col);
}
gotcha[f1[f]] <- 1;
}
nn <- nn-1;
}
n <- 1;
for (n in 1:notu) {
if (orientation=="horizontal") {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=0.75*lineage_lwd,col=otu_cols[n]);
} else {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=0.75*lineage_lwd,col=otu_cols[n]);
}
# n <- 1+n;
}
}
#vtree_old <- vtree <- rangeomorph_vtree;
plot_calibrated_phylogeny <- function(vtree,strat_ranges,durations,apos,oldest=NULL,youngest=NULL,taxon_labels="",otu_cols,xsize=3.5,ysize=3.5) {
#vtree <- ladderize_vector_tree(vtree);
venn_tree <- transform_vector_tree_to_venn_tree(vtree)
mtree <- transform_venn_tree_to_matrix_tree(venn_tree)
notu <- dim(strat_ranges)[1]
svtree <- cbind(rank(vtree),vtree)
ordinate <- "Ma"
otu_order <- order(vtree[1:notu])
sampled_ancestors <- accersi_poss_ancestors_for_nodes(vtree,FA=strat_ranges[,1],apos)
# set up Y-axis (time) scale
if (is.null(oldest)) {
ax_min <- min(durations)
} else {
ax_min <- min(oldest,min(durations))
}
if (is.null(youngest)) {
ax_max <- max(strat_ranges)
} else {
ax_max <- max(youngest,max(strat_ranges))
}
if (taxon_labels[1]=="") {
y_add <- 1
} else {
y_add <- 0
}
#axis_info <- wagner_set_axes(ax_min,ax_max,y_add)
#tcs <- axis_info$Ticks
#lbl_prn <- axis_info$Labels
#tick_str <- axis_info$Tick_Strength
#mny <- min(tcs)
#mxy <- max(tcs[dim(tcs)[1],tcs[dim(tcs)[1],]!=0])
par(pin=c(xsize,ysize))
plot(NA,type='n',axes=FALSE,xlab="",ylab=ordinate,xlim=c(0,notu+1),ylim=c(oldest,youngest));
tick_tock <- set_axis_breaks(max_no = youngest,min_no=oldest);
specified_axis(axe=2,max_val=youngest,min_val=oldest,maj_break=tick_tock$maj_break,med_break=tick_tock$med_break,min_break=tick_tock$min_break,linewd=4/3,orient=1,print_label=TRUE);
#axis(2,at=seq(mny,mxy,by=(abs(mxy-mny))),tcl=-0.00,labels=FALSE,lwd=1.1,lwd.ticks=0.0,las=2)
phy_x <- vector(length=length(vtree));
if ((taxon_labels[1]!="numbers" && taxon_labels[1]!="Numbers") && taxon_labels[1]!="") {
y_adj <- (youngest-oldest)/50;
x_adj <- -(notu+1)/37.5
}
for (n in 1:notu) {
# n <- n+1;
phy_x[n] <- match(n,otu_order);
if (!is.na(match(n,sampled_ancestors))) {
segments(phy_x[n],durations[n,2],phy_x[n],strat_ranges[n,1],col="black",lwd=4);
ghost_col <- paste(otu_cols[n],"4",sep="");
segments(phy_x[n],durations[n,2],phy_x[n],strat_ranges[n,1],col=ghost_col,lwd=2);
}
segments(phy_x[n],durations[n,1],phy_x[n],durations[n,2],col="gray50",lwd=4)
if (strat_ranges[n,1]!=strat_ranges[n,2]) {
rect((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),strat_ranges[n,2],col=otu_cols[n])
} else {
segments((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),strat_ranges[n,1],lwd=4);
# ghost_col <- paste(otu_cols[n],"1",sep="");
segments((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),strat_ranges[n,1],lwd=2,col=otu_cols[n])
}
if (taxon_labels[1]=="numbers" || taxon_labels[1]=="Numbers") {
text(phy_x[n],strat_ranges[n,2],n,pos=3)
} else if (taxon_labels[1]!="") {
text(phy_x[n]+x_adj,strat_ranges[n,2]+y_adj,taxon_labels[n],srt=90,pos=4)
}
}
Nnode <- max(vtree) - notu;
### problem is here somewhere!!!!
nn <- Nnode+1;
while (nn>1) {
#for (nn in Nnode:1) {
nn <- nn-1;
n <- notu+nn;
f1 <- mtree[nn,mtree[nn,]>0];
if (sampled_ancestors[n]==0) {
phy_x[n] <- mean(phy_x[f1])
segments(min(phy_x[f1]),durations$end[n],max(phy_x[f1]),durations$end[n],lwd=1)
segments(phy_x[n],durations[n,1],phy_x[n],durations[n,2],col="gray50",lwd=4)
} else {
phy_x[n] <- phy_x[sampled_ancestors[n]];
f1a <- f1[f1!=sampled_ancestors[n]];
for (f in 1:length(f1a)) {
segments(phy_x[f1a[f]],durations[f1a[f],1],phy_x[sampled_ancestors[n]],durations[f1a[f],1],lwd=1)
}
segments(phy_x[n],durations[n,2],phy_x[n],durations[n,1],lwd=4,col="gray50")
}
# nn <- nn-1
}
}
get_phylo_axis_from_newick_string <- function(newick_string,sampled_ancestors,root_low=T) {
newick_string_new <- gsub(")","),",newick_string);
newick_string_new <- gsub(",,","),",newick_string_new);
otu_order_string <- simplify2array(strsplit(newick_string_new,split=",")[1])[,1]
otu_order_string <- gsub("\\(","",otu_order_string);
otu_order_string <- gsub("\\)","",otu_order_string);
otu_order_string <- otu_order_string[!otu_order_string %in% c("",";")];
otu_order_string <- as.numeric(otu_order_string);
otu_order <- match(1:max(otu_order_string),otu_order_string);
notu <- length(otu_order);
cladogram <- read_newick_string(newick_string);
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree=cladogram);
nNodes <- nrow(mat_tree);
if (root_low) {
max_tax <- (1:notu)[match(max(otu_order),otu_order)];
end_node <- which(mat_tree==max_tax,arr.ind=T)[,1];
if (end_node < (nNodes/2)) {
otu_order <- notu-otu_order
}
} else {
otu_order <- otu_order-min(otu_order)
}
phylo_axis <- c(otu_order,rep(0,nNodes));
relv_ancestors <- c(sampled_ancestors,rep(0,nNodes));
for (nd in nNodes:1) {
htu <- nd+notu;
f1 <- mat_tree[nd,mat_tree[nd,]>0];
if (sum(relv_ancestors[f1])>0) {
obs_anc <- f1[relv_ancestors[f1]==1][1];
phylo_axis[htu] <- otu_order[obs_anc];
} else {
phylo_axis[htu] <- mean(phylo_axis[mat_tree[nd,mat_tree[nd,]>0]])
}
}
return(phylo_axis);
}
get_phylo_axis_from_newick_string_w_anagenesis <- function(newick_string,sampled_ancestors,anagenetic_ancestors=0,root_low=T) {
if (length(anagenetic_ancestors)==1)
anagenetic_ancestors <- rep(0,length(sampled_ancestors));
newick_string_new <- gsub(")","),",newick_string);
newick_string_new <- gsub(",,","),",newick_string_new);
otu_order_string <- simplify2array(strsplit(newick_string_new,split=",")[1])[,1]
otu_order_string <- gsub("\\(","",otu_order_string);
otu_order_string <- gsub("\\)","",otu_order_string);
otu_order_string <- otu_order_string[!otu_order_string %in% c("",";")];
otu_order_string <- as.numeric(otu_order_string);
otu_order <- match(1:max(otu_order_string),otu_order_string);
notu <- length(otu_order);
cladogram <- read_newick_string(newick_string);
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree=cladogram);
nNodes <- nrow(mat_tree);
if (root_low) {
max_tax <- (1:notu)[match(max(otu_order),otu_order)];
end_node <- which(mat_tree==max_tax,arr.ind=T)[,1];
if (end_node < (nNodes/2)) {
otu_order <- notu-otu_order
}
} else {
otu_order <- otu_order-min(otu_order)
}
observed_nodes <- rep(0,nNodes);
if (sum(anagenetic_ancestors)>0) {
anas <- (1:notu)[anagenetic_ancestors==1];
names(anas) <- names(anagenetic_ancestors)[anas];
# for (an in 1:length(anas)) {
an <- 0;
while (an < length(anas)) {
an <- an+1;
acells <- which(mat_tree==anas[an],arr.ind = T);
f1 <- mat_tree[acells[1],mat_tree[acells[1],]!=anas[an]];
f1 <- f1[f1<=notu]; # reduce to just species
if (length(f1)>0) {
otu_order[f1] <- otu_order[anas[an]];
} else {
f1 <- mat_tree[acells[1],mat_tree[acells[1],]!=anas[an]];
observed_nodes[f1[1]-notu] <-anas[an];
for (ff in 1:length(f1)) {
f2 <- mat_tree[f1[ff]-notu,mat_tree[f1[ff]-notu,]>0];
if (sum(sampled_ancestors[f2[f2<=notu]])>0) {
next_anc <-f2[sampled_ancestors[f2]==1][1];
otu_order[next_anc] <- otu_order[anas[an]];
} else if (sum(f2<=notu)>0) {
# if ancestral to node
if (root_low) {
otu_order[anas[an]] <- min(otu_order[f2[f2<=notu]])+0.5;
} else {
otu_order[anas[an]] <- min(otu_order[f2[f2<=notu]])-0.5;
}
}
}
}
}
min_ord <- min(otu_order);
otu_order <- match(otu_order,sort(unique(otu_order)));
otu_order <- otu_order-min(otu_order);
}
#hist(otu_order,breaks=-1:max(otu_order))
#hist(otu_order,breaks=sort(c(min(otu_order)-1,unique(otu_order))))
phylo_axis <- c(otu_order,rep(0,nNodes));
relv_ancestors <- c(sampled_ancestors,rep(0,nNodes));
for (nd in nNodes:1) {
htu <- nd+notu;
f1 <- mat_tree[nd,mat_tree[nd,]>0];
if (sum(relv_ancestors[f1])>0) {
# print(nd);
obs_anc <- f1[relv_ancestors[f1]==1][1];
# if (anagenetic_ancestors[obs_anc]==1) {
# }
phylo_axis[htu] <- otu_order[obs_anc];
} else if (observed_nodes[nd]!=0) {
phylo_axis[htu] <- phylo_axis[observed_nodes[nd]];
} else {
phylo_axis[htu] <- mean(phylo_axis[mat_tree[nd,mat_tree[nd,]>0]])
}
}
return(phylo_axis);
}
### This needs work!!!!
get_phylo_axis_from_vector_tree <- function(vector_tree,sampled_ancestors,root_low=T) {
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree=cladogram);
nNodes <- nrow(mat_tree);
notu <- length(vector_tree)-nNodes;
phylo_axis <- vector(length=length(vector_tree));
init_order <- vector(length=notu);
for (sp in 1:notu) {
i_o <- which(mat_tree==sp,arr.ind=T);
init_order[sp] <- xx;
}
newick_string_new <- gsub(")","),",newick_string);
newick_string_new <- gsub(",,","),",newick_string_new);
otu_order_string <- simplify2array(strsplit(newick_string_new,split=",")[1])[,1]
otu_order_string <- gsub("\\(","",otu_order_string);
otu_order_string <- gsub("\\)","",otu_order_string);
otu_order_string <- otu_order_string[!otu_order_string %in% c("",";")];
otu_order_string <- as.numeric(otu_order_string);
otu_order <- match(1:max(otu_order_string),otu_order_string);
notu <- length(otu_order);
cladogram <- read_newick_string(newick_string);
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree=cladogram);
nNodes <- nrow(mat_tree);
if (root_low) {
max_tax <- (1:notu)[match(max(otu_order),otu_order)];
end_node <- which(mat_tree==max_tax,arr.ind=T)[,1];
if (end_node < (nNodes/2)) {
otu_order <- notu-otu_order
}
} else {
otu_order <- otu_order-min(otu_order)
}
phylo_axis <- c(otu_order,rep(0,nNodes));
for (nd in nNodes:1) {
htu <- nd+notu;
phylo_axis[htu] <- mean(phylo_axis[mat_tree[nd,mat_tree[nd,]>0]])
}
return(phylo_axis);
}
draw_cladogram_from_newick_string <- function(newick_string) {
### NOTE: This assumes that each numbered taxon is in the cladogram
### IF you have only SOME taxa in the string, then this needs too be rewritten
newick_string_new <- gsub(")","),",newick_string);
newick_string_new <- gsub(",,","),",newick_string_new);
otu_order_string <- simplify2array(strsplit(newick_string_new,split=",")[1])[,1]
otu_order_string <- gsub("\\(","",otu_order_string);
otu_order_string <- gsub("\\)","",otu_order_string);
otu_order_string <- otu_order_string[!otu_order_string %in% c("",";")];
otu_order_string <- as.numeric(otu_order_string);
otu_order <- match(1:max(otu_order_string),otu_order_string);
cladogram <- read_newick_string(newick_string);
need_to_be_drawn <- sort(otu_order_string);
notu <- length(need_to_be_drawn);
tttu <- length(cladogram);
nhtu <- tttu-notu;
otu_order <- c(otu_order,rep(0,nhtu));
undrawn <- 1:nhtu;
mxy <- mxx <- notu+1;
mnx <- 0;
mny <- -mxy;
par(pin=c(4.5,4.5));
lineage_tops <- array(0,dim=tttu);
node_span <- array(0,dim=c(tttu,2));
node_span[1:notu,1] <- node_span[1:notu,2] <- otu_order[1:notu];
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="",xlim=c(mnx,mxx),ylim=c(mny,mxy));
for (nn in nhtu:1) {
node <- nn+notu;
descendants <- (1:tttu)[cladogram==(notu+nn)];
if (sum(descendants %in% need_to_be_drawn)==length(descendants)) {
node_span[node,1] <- min(node_span[descendants,1])
node_span[node,2] <- max(node_span[descendants,2])
otu_order[node] <- mean(node_span[node,]);
lineage_tops[node] <- -(node_span[node,2]-node_span[node,1]);
for (dd in 1:length(descendants)) {
segments(otu_order[descendants[dd]],lineage_tops[descendants[dd]],otu_order[node],lineage_tops[node],lwd=4);
}
need_to_be_drawn <- need_to_be_drawn[!need_to_be_drawn %in% descendants];
need_to_be_drawn <- sort(c(need_to_be_drawn,node));
}
nn <- nn-1;
}
}
draw_cladogram_from_vector_tree <- function(vector_tree) {
### NOTE: This assumes that each numbered taxon is in the cladogram
### IF you have only SOME taxa in the string, then this needs too be rewritten
newick_string_new <- gsub(")","),",newick_string);
newick_string_new <- gsub(",,","),",newick_string_new);
otu_order_string <- simplify2array(strsplit(newick_string_new,split=",")[1])[,1]
otu_order_string <- gsub("\\(","",otu_order_string);
otu_order_string <- gsub("\\)","",otu_order_string);
otu_order_string <- otu_order_string[!otu_order_string %in% c("",";")];
otu_order_string <- as.numeric(otu_order_string);
otu_order <- match(1:max(otu_order_string),otu_order_string);
cladogram <- read_newick_string(newick_string);
need_to_be_drawn <- sort(otu_order_string);
notu <- length(need_to_be_drawn);
tttu <- length(cladogram);
nhtu <- tttu-notu;
otu_order <- c(otu_order,rep(0,nhtu));
undrawn <- 1:nhtu;
mxy <- mxx <- notu+1;
mnx <- 0;
mny <- -mxy;
par(pin=c(4.5,4.5));
lineage_tops <- array(0,dim=tttu);
node_span <- array(0,dim=c(tttu,2));
node_span[1:notu,1] <- node_span[1:notu,2] <- otu_order[1:notu];
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="",xlim=c(mnx,mxx),ylim=c(mny,mxy));
for (nn in nhtu:1) {
node <- nn+notu;
descendants <- (1:tttu)[cladogram==(notu+nn)];
if (sum(descendants %in% need_to_be_drawn)==length(descendants)) {
node_span[node,1] <- min(node_span[descendants,1])
node_span[node,2] <- max(node_span[descendants,2])
otu_order[node] <- mean(node_span[node,]);
lineage_tops[node] <- -(node_span[node,2]-node_span[node,1]);
for (dd in 1:length(descendants)) {
segments(otu_order[descendants[dd]],lineage_tops[descendants[dd]],otu_order[node],lineage_tops[node],lwd=4);
}
need_to_be_drawn <- need_to_be_drawn[!need_to_be_drawn %in% descendants];
need_to_be_drawn <- sort(c(need_to_be_drawn,node));
}
nn <- nn-1;
}
}
#### Add Image Files ####
add_png <- function(png_info, x = NULL,y = NULL,width = NULL,height = NULL,interpol = TRUE,x_cent=T,y_cent=T) {
# obj, # an image file imported as an array (e.g. png::readPNG, jpeg::readJPEG)
# x = NULL, # mid x coordinate for image
# y = NULL, # mid y coordinate for image
# width = NULL, # width of image (in x coordinate units)
# height = NULL, # width of image (in x coordinate units)
# interpol = TRUE # (passed to graphics::rasterImage) A logical vector (or scalar) indicating whether to apply linear interpolation to the image when drawing.
# x_cent = TRUE # Image centered on x; if false, then it starts at X.
# y_cent = TRUE # Image centered on y; if false, then it starts at y.
if (is.null(x) | is.null(y) | (is.null(width) && is.null(height))) {
stop("Must provide args 'x', 'y', and/or 'width'")
}
USR <- par()$usr; # A vector of the form c(x1, x2, y1, y2) giving the extremes of the user coordinates of the plotting region
PIN <- par()$pin; # The current plot dimensions, (width, height), in inches
DIM <- dim(png_info); # number of x-y pixels for the image
if (!is.null(width)) {
ARp <- DIM[1]/DIM[2]; # pixel aspect ratio (y/x)
WIDi <- width/(USR[2]-USR[1])*PIN[1]; # convert width units to inches
HEIi <- WIDi * ARp; # height in inches
height <- HEIu <- HEIi/PIN[2]*(USR[4]-USR[3]); # height in units
} else {
HEIu <- height/(USR[4]-USR[3])*PIN[2]; # convert height units to inches
ARp <- DIM[2]/DIM[1]; # pixel aspect ratio (y/x)
WIDi <- HEIu * ARp; # height in inches
width <- WIDi/(PIN[1])*(USR[2]-USR[1]);
}
if (!x_cent) x <- x+width/2;
if (!y_cent) y <- y+height/2;
rasterImage(image = png_info,
xleft = x-(width/2), xright = x+(width/2),
ybottom = y-(height/2), ytop = y+(height/2),
# ybottom = y-(HEIu/2), ytop = y+(HEIu/2),
interpolate = interpol);
}
add_jpeg = function(jpg_file,x,y,width=NULL,height=NULL) {
require('jpeg');
PIN <- par()$pin; # The current plot dimensions, (width, height), in inches
USR <- par()$usr; # A vector of the form c(x1, x2, y1, y2) giving the extremes of the user coordinates of the plotting region
jpg <- readJPEG(jpg_file, native=T); # read theTfile
res <- dim(jpg)[2:1]; # get the resolution, [x, y]
measured_y_span <- abs(USR[3]-USR[4]);
measured_x_span <- abs(USR[1]-USR[2]);
if (!is.null(height)) {
measured_y_span <- abs(USR[3]-USR[4]);
y_x_ratio_1 <- PIN[2]/PIN[1];
new_ht <- height/measured_y_span;
new_ratio <- height/res[2]
rasterImage(jpg,x,y,new_ratio*res[1],height);
}
#if (!add) # initialize an empty plot area if add==FALSE
# plot(1,1,xlim=c(1,res[1]),ylim=c(1,res[2]),asp=1,type='n',xaxs='i',yaxs='i',xaxt='n',yaxt='n',xlab='',ylab='',bty='n');
rasterImage(jpg,x,y,res[1],res[2]);
}
add_Img <- function(obj, x = NULL,y = NULL,width = NULL,height = NULL,interpol = TRUE) {
# obj, # an image file imported as an array (e.g. png::readPNG, jpeg::readJPEG)
# x = NULL, # mid x coordinate for image
# y = NULL, # mid y coordinate for image
# width = NULL, # width of image (in x coordinate units)
# height = NULL, # width of image (in x coordinate units)
# interpol = TRUE # (passed to graphics::rasterImage) A logical vector (or scalar) indicating whether to apply linear interpolation to the image when drawing.
if (is.null(x) | is.null(y) | (is.null(width) && is.null(height))) {
stop("Must provide args 'x', 'y', and/or 'width'")
}
USR <- par()$usr; # A vector of the form c(x1, x2, y1, y2) giving the extremes of the user coordinates of the plotting region
PIN <- par()$pin; # The current plot dimensions, (width, height), in inches
DIM <- dim(obj); # number of x-y pixels for the image
if (!is.null(width)) {
ARp <- DIM[1]/DIM[2]; # pixel aspect ratio (y/x)
WIDi <- width/(USR[2]-USR[1])*PIN[1]; # convert width units to inches
HEIi <- WIDi * ARp; # height in inches
height <- HEIu <- HEIi/PIN[2]*(USR[4]-USR[3]); # height in units
} else {
HEIu <- height/(USR[4]-USR[3])*PIN[2]; # convert height units to inches
ARp <- DIM[2]/DIM[1]; # pixel aspect ratio (y/x)
WIDi <- HEIu * ARp; # height in inches
width <- WIDi/(PIN[1])*(USR[2]-USR[1]);
}
rasterImage(image = obj,
xleft = x-(width/2), xright = x+(width/2),
ybottom = y-(height/2), ytop = y+(height/2),
# ybottom = y-(HEIu/2), ytop = y+(HEIu/2),
interpolate = interpol);
}
| /R/General_Plot_Templates.r | no_license | PeterJWagner3/Inverse_Modelling_with_Compatibility | R | false | false | 152,676 | r | # accersi: fetch/summon
# divido: divide!
# expello: banish
# mundus: clean
# percursant: scour
# revelare: reveal
#### General Graphic Stuff ####
default_minor_time_axis_size <- 4.285714285;
make_font_Arial <- function() {
arial <- quartzFonts(arial = c("Arial","Arial-Bold","Arial-Italic","Avenir-BoldItalic"))
}
makeTransparent <- function(someColor, alpha=100) {
newColor <- col2rgb(someColor)
apply(newColor, 2, function(curcoldata)X = {
rgb(red=curcoldata[1], green=curcoldata[2],blue=curcoldata[3],alpha=alpha, maxColorValue=255)}
)
}
shape_size_rosetta_stone <- function() {
return(rosetta_stone_shape_size <- data.frame(size = c(1.5,1.0,2/3),circles=c(8.11,5.4,3.6),squares=c(7.18,4.79,3.19),triangles=c(10.905,7.27,4.846667),diamonds=c(10.15,6.765,4.51)));
}
accersi_cex_size_for_shape <- function(cex_size,shape_pch) {
rosetta_stone <- shape_size_rosetta_stone();
circle_pchs <- c(1,8,10,16,19,20,21);
square_pchs <- c(0,3,4,7,12,13,14,15,22);
triangle_pchs <- c(2,6,11,17,24,25);
diamond_pchs <- c(5,9,18,23);
area_to_match <- (rosetta_stone$squares[2]*cex_size)^2
if (!is.na(match(shape_pch,circle_pchs))) {
rosetta_circle <- match("circles",colnames(rosetta_stone));
new_cex <- 2*sqrt(area_to_match/pi)/rosetta_stone[2,rosetta_circle];
} else if (!is.na(match(shape_pch,triangle_pchs))) {
rosetta_triangle <- match("triangles",colnames(rosetta_stone));
# h <- sqrt(((rosetta_stone[2,rosetta_triangle]/2)^2)+(rosetta_stone[2,rosetta_triangle]^2))
b <- rosetta_stone[2,rosetta_triangle];
tri_area <- sqrt(3)*(b^2)/4
new_cex <- sqrt(area_to_match/tri_area)
# tri_area2 <- sqrt(3)*((new_cex*b)^2)/4
# sqrt(3)*(((sqrt(area_to_match/tri_area)*rosetta_stone[2,rosetta_triangle]))^2)/4
} else if (!is.na(match(shape_pch,diamond_pchs))) {
rosetta_diamond <- match("diamonds",colnames(rosetta_stone));
diagon <- rosetta_stone[2,rosetta_diamond];
dia_area <- (diagon^2)/2
new_cex <- sqrt(area_to_match/dia_area)
# side <- sqrt((rosetta_stone[2,rosetta_diamond]^2)/2)
# dia_area <- (side^2)
} else {
new_cex <- cex_size;
}
return(new_cex);
#rossetta_stone_shape_size[1,]/rossetta_stone_shape_size[2,]}
}
#### Stratigraphic Figures ####
draw_time_scale <- function(time_scale,onset,end,strat_colors="",strat_names="",line_color="black",ord,height,strat_label_size,axis=1) {
if (onset[1]<0 && time_scale[1]>0)
time_scale <- -1*time_scale;
first_period <- sum(abs(time_scale)>=abs(onset));
last_period <- sum(abs(time_scale)>=abs(end));
if (length(strat_colors)==0) strat_colors <- rep("white",last_period-first_period);
for (b in first_period:last_period)
rect(max(onset,time_scale[b]),ord,min(end,time_scale[b+1]),height,col=as.character(strat_colors[b]),border=as.character(strat_colors[b]))
for (s in first_period:last_period) segments(time_scale[s],ord,time_scale[s],height,lwd=0.5,col=line_color)
# put in decisive final line
if (!is.na(match(end,time_scale))) segments(end,ord,end,height,col=line_color);
segments(onset,ord,end,ord,col=line_color,lwd=2)
segments(onset,height,end,col=line_color,height,lwd=2)
if (length(strat_names)>0) {
dummy <- mid <- c();
for (i in first_period:last_period) {
mid <- c(mid,(max(onset,time_scale[i])+min(end,time_scale[(i+1)]))/2)
dummy <- c(dummy,(height+ord)/2);
}
#add something
text(mid[first_period:last_period],dummy[first_period:last_period],label=strat_names[first_period:last_period],cex=strat_label_size);
}
}
# use this for simulated expectations (Supplementary figures)
Phanerozoic_Timescale_Plot <- function(onset=-541, end=0, time_scale, mxy, mny, ordinate, xsize=6, ysize=4.285714285, hues=FALSE, alt_back=FALSE, main_time_tick=100) {
if (time_scale[1]>0) time_scale <- -1*time_scale;
if (onset>0) onset <- -1*onset;
if (end>0) end <- -1*end;
lst <- length(time_scale)
par(pin=c(xsize,ysize));
ages <- vector(length=1+abs(ceiling(onset/100))-abs(ceiling(end/100)))
st <- abs(ceiling(onset/100))+1
for (i in 1:length(ages)) {
ages[i] <- 100*(st-i)
}
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab=ordinate,xlim=c(onset,end),ylim=c(mny,mxy));
#axis(1,at=seq(onset,end,by=1),tcl=-0.0,labels=FALSE,lwd=4/3)
axis(1,at=seq(100*ceiling(onset/100),end,by=100),tcl=-0.3,labels=ages,lwd=0,lwd.ticks=4/3)
axis(1,at=seq(100*ceiling(onset/100),end,by=50)[!seq(100*ceiling(onset/100),end,by=50)%in%seq(100*ceiling(onset/100),end,by=100)],tcl=-0.2,labels=FALSE,lwd=0,lwd.ticks=4/3)
axis(1,at=seq(10*round(onset/10),end,by=10)[!seq(10*round(onset/10),end,by=10)%in%seq(100*ceiling(onset/100),end,by=50)],tcl=-0.1,labels=FALSE,lwd=0,lwd.ticks=4/3)
exy <- (mxy-mny)/25
if (alt_back!=FALSE) {
for (b in 1:(lst-1)) {
if (b%%2==0) {
rect(time_scale[b],(mny+exy),time_scale[b+1],mxy,col=alt_back,border=alt_back)
}
}
}
#segments(onset,mny+exy,onset,mny-exy)
if (hues) {
per_colors <- Phanerozoic_Period_Colors();
for (b in 1:(lst-1)) {
rect(time_scale[b],(mny+exy),time_scale[b+1],(mny-exy),col=per_colors[b],border=per_colors[b])
}
}
for (s in 1:lst) segments(max(onset,time_scale[s]),mny+exy,max(onset,time_scale[s]),mny-exy)
segments(onset,mny+exy,end,mny+exy,lwd=2)
segments(onset,mny-exy,end,mny-exy,lwd=2)
mid <- vector(length=(lst-1))
for (i in 1:(lst-1)) mid[i] <- (max(onset,time_scale[i])+time_scale[(i+1)])/2
text(mid,y=c(mny,mny,mny,mny,mny,mny,mny,mny,mny,mny,mny),label=c("Cm","O","S","D","C","P","Tr","J","K","Pg","Ng"),cex=0.9)
}
Phanerozoic_Timescale_Plot_Partial <- function(onset=-541, end=0, time_scale, mxy, mny, ordinate, stage_names, xsize=6, ysize=4.285714285, hues=FALSE, alt_back=FALSE, main_time_tick=100) {
#quartzFonts(arial = c("Arial", "Arial Black", "Arial Oblique","Arial Black Oblique"))
if (time_scale[1]>0) time_scale <- -1*time_scale;
if (onset>0) onset <- -1*onset;
if (end>0) end <- -1*end;
time_scale <- time_scale[time_scale>=onset]
time_scale <- time_scale[time_scale<=end]
lst <- length(time_scale)
par(pin=c(xsize,ysize));
#ages <- vector(length=1+abs(ceiling(onset/main_time_tick))-abs(ceiling(end/main_time_tick)))
st <- abs(ceiling(onset/main_time_tick))+1 # get increment distance
#ages <- c()
#for (i in 1:length(ages)) {
# ages[i] <- main_time_tick*(st-i)
# ages <- c(ages,main_time_tick*(st-i))
# }
ages1 <- seq(main_time_tick*floor(abs(onset)/main_time_tick),main_time_tick*ceiling(abs(end)/main_time_tick),by=-1*abs(main_time_tick))
med_time_tick <- main_time_tick/2
ages2 <- seq(med_time_tick*floor(abs(onset)/med_time_tick),med_time_tick*ceiling(abs(end)/med_time_tick),by=-1*abs(med_time_tick))[!seq(med_time_tick*floor(abs(onset)/med_time_tick),med_time_tick*ceiling(abs(end)/med_time_tick),by=-1*abs(med_time_tick)) %in% ages1]
minor_time_tick <- main_time_tick/10
ages3 <- seq(minor_time_tick*floor(abs(onset)/minor_time_tick),minor_time_tick*ceiling(abs(end)/minor_time_tick),by=-1*abs(minor_time_tick))
ages3 <- ages3[!ages3 %in% c(ages1,ages2)]
exy <- (mxy-mny)/25
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab=ordinate,xlim=c(onset,end),ylim=c(mny,mxy))
#axis(1,at=seq(onset,end,by=abs(onset-end)),tcl=-0.0,labels=FALSE,lwd=4/3)
#axis(1,at=seq(main_time_tick*ceiling(onset/main_time_tick),end,by=main_time_tick),tcl=-0.3,labels=ages,lwd=0,lwd.ticks=4/3)
#axis(1,at=seq(main_time_tick*ceiling(onset/main_time_tick),end,by=(main_time_tick/2))[!seq(main_time_tick*ceiling(onset/main_time_tick),end,by=(main_time_tick/2))%in%seq(main_time_tick*ceiling(onset/main_time_tick),end,by=main_time_tick)],tcl=-0.2,labels=FALSE,lwd=0,lwd.ticks=4/3)
#axis(1,at=seq(10*round(onset/10),end,by=10)[!seq(10*round(onset/10),end,by=10)%in%seq(main_time_tick*ceiling(onset/main_time_tick),end,by=(main_time_tick/2))],tcl=-0.1,labels=FALSE,lwd=0,lwd.ticks=4/3)
if (hues) {
# per_colors <- Phanerozoic_Period_Colors()
for (b in 1:(lst-1)) {
per_col <- infer_interval_color(abs(time_scale[b]),abs(time_scale[b+1]))
rect(time_scale[b],(mny+exy),time_scale[b+1],(mny-exy),col=per_col,border=per_col)
# rect(time_scale[b],(mny+exy),time_scale[b+1],(mny-exy),col=per_colors[b],border=per_colors[b])
}
}
#segments(onset,mny+exy,onset,mny-exy)
for (s in 1:lst) segments(time_scale[s],mny+exy,time_scale[s],mny-exy)
segments(onset,mny+exy,end,mny+exy,lwd=2)
segments(onset,mny-exy,end,mny-exy,lwd=2)
axis(1,at=-1*ages1,tcl=-0.3,labels=ages1,lwd=0,lwd.ticks=4/3)
axis(1,at=-1*ages2,tcl=-0.2,labels=FALSE,lwd=0,lwd.ticks=4/3)
axis(1,at=-1*ages3,tcl=-0.1,labels=FALSE,lwd=0,lwd.ticks=4/3)
if (alt_back!=FALSE) {
for (b in 1:(lst-1)) {
if (b%%2==0) {
rect(time_scale[b],(mny+exy),time_scale[b+1],mxy,col=alt_back,border=alt_back)
}
}
}
#mid <- c()
for (i in 1:(lst-1)) {
# mid <- c(mid,(time_scale[i]+time_scale[(i+1)])/2)
mid <- (time_scale[i]+time_scale[(i+1)])/2
text(mid,mny,label=stage_names[i],cex=0.9)
}
}
Phanerozoic_Timescale_Plot_Flexible <- function(onset=-541,end=0,time_scale_to_plot,mxy,mny,use_strat_labels=TRUE,strat_names,strat_colors,plot_title="",ordinate="",abscissa="Ma",yearbreaks,xsize=6, ysize=4.285714285,hues=TRUE,colored="base",alt_back=FALSE,alt_back_hue="gray90",strat_label_size=1) {
# UPDATED 2016-12-31
# Smaller updates 2018-08-31
# onset: when to start x-axis (use -100 for 100 Million years ago)
# end: when to end x-axis 0
# time_scale: vector giving start of each bin, with one more than bins
# mxy: maximum value for y-axis
# mny: minimum value for y-axis
# use_strat_labels: true means that you put labels on stratigraphic unts
# strat_names: names to use for those labels
# strat_colors
# ordinate: label for Y-axis
# yearbreaks: where to put minor & major breaks; c(10,50,100) will put minor
# medium and major breaks at 10, 50 & 100 Ma marks
# xsize: dimension of X-axis for plot for par(pin=c(xsize,ysize)) command
# ysize: dimension of Y-axis for plot for par(pin=c(xsize,ysize)) command
# hues: we want colors
# colored: where the colors should be: base or background
# alt_back: alternating white and light gray backgrouns
# use_strat_labels added 2016/09/23
if (time_scale_to_plot[1]>0) time_scale_to_plot <- -1*time_scale_to_plot;
if (onset>0) onset <- -1*onset;
if (end>0) end <- -1*end;
lst <- length(time_scale_to_plot)
first_period <- sum(time_scale_to_plot<=onset)
last_period <- sum(time_scale_to_plot<=end)
#if (is.na(match(end,time_scale))) {
# last_period <- match(end,time_scale)
# } else {
# last_period <- match(end,time_scale)-1
# }
draws <- length(yearbreaks)
stx <- vector(length=draws)
for (i in 1:draws) stx[i] <- yearbreaks[i]*ceiling(onset/yearbreaks[i])
exy <- (mxy-mny)/25;
#print(c(xsize,ysize));
par(pin=c(xsize,ysize));
plot(NA,type='n',axes=FALSE,main=plot_title,xlab=abscissa,ylab=ordinate,xlim=c(onset,end),ylim=c(mny-exy,mxy))
if (max(time_scale_to_plot)<end)
end <- max(time_scale_to_plot);
#axis(1,at=seq(onset,end,by=abs(onset-end)),tcl=0.0,labels=FALSE,lwd=2)
if (draws==2) {
pts <- c(-0.15,-0.30)
} else if (draws==3) {
pts <- c(-0.1,-0.2,-0.3)
}
for (i in 1:draws) {
on <- yearbreaks[i]*ceiling(onset/yearbreaks[i]);
en <- yearbreaks[i]*floor(end/yearbreaks[i]);
tcs <- seq(on,en,by=yearbreaks[i]);
if (i < draws) {
on2 <- yearbreaks[i+1]*ceiling(onset/yearbreaks[i+1]);
en2 <- yearbreaks[i+1]*floor(end/yearbreaks[i+1]);
tcs <- tcs[!tcs %in% seq(on2,en2,by=yearbreaks[i+1])];
axis(1,at=tcs,tcl=pts[i],labels=FALSE,lwd=0,lwd.ticks=4/3);
} else {
axis(1,at=tcs,tcl=pts[i],labels=abs(tcs),lwd=0.0,lwd.ticks=4/3);
}
}
if (alt_back && colored!="backdrop")
for (b in first_period:last_period)
if (gtools::even(b)) rect(max(onset,time_scale_to_plot[b]),mny,min(end,time_scale_to_plot[b+1]),mxy,col=alt_back_hue,border=alt_back_hue)
if (hues) {
if (colored=="base") {
for (b in first_period:last_period)
rect(max(onset,time_scale_to_plot[b]),mny,min(end,time_scale_to_plot[b+1]),(mny-2*exy),col=as.character(strat_colors[b]),border=as.character(strat_colors[b]),lwd=0);
} else {
for (b in first_period:last_period)
rect(max(onset,time_scale_to_plot[b]),mny,min(end,time_scale_to_plot[b+1]),mxy,col=strat_colors[b],border=strat_colors[b],lwd=0);
rect(onset,mny+exy,end,mxy,col=makeTransparent("white",50),border=makeTransparent("white",50))
}
}
# put in lines separating stages
for (s in first_period:last_period) segments(time_scale_to_plot[s],mny,time_scale_to_plot[s],mny-2*exy,lwd=0.5,col="gray25")
# put in decisive final line
if (!is.na(match(end,time_scale_to_plot))) segments(end,mny,end,mny-2*exy);
segments(onset,mny,end,mny,lwd=2)
segments(onset,mny-2*exy,end,mny-2*exy,lwd=2)
if (use_strat_labels) {
thin_bin <- min(abs(time_scale_to_plot[2:length(time_scale_to_plot)]-time_scale_to_plot[1:(length(time_scale_to_plot)-1)])/abs(onset-end));
mid <- vector(length=(lst-1));
for (i in 1:(lst-1)) mid[i] <- (max(onset,time_scale_to_plot[i])+min(end,time_scale_to_plot[(i+1)]))/2;
dummy <- vector(length=(lst-1));
for (i in 1:(lst-1)) dummy[i] <- mny-exy;
#add something
font_size <- min(2,thin_bin*xsize*strat_label_size);
# print(c(thin_bin,xsize,strat_label_size));
# print(font_size);
text(mid[first_period:last_period],dummy[first_period:last_period],label=strat_names[first_period:last_period],cex=font_size);
}
}
Phanerozoic_Timescale_Plot_Flexible_Ordinate <- function(onset=-541,end=0,time_scale_to_plot,mxx,mnx,use_strat_labels=TRUE,strat_names,strat_colors,plot_title="",abscissa="",ordinate="Ma",yearbreaks,ysize=6,xsize=4.285714285,hues=TRUE,colored="base",alt_back=FALSE,alt_back_hue="gray90",strat_label_size=1,stage_box_width=0.025) {
# UPDATED 2016-12-31
# Smaller updates 2018-08-31
# Smaller updates 2019-09-16
# Smaller updates 2019-10-18
# onset: when to start x-axis (use -100 for 100 Million years ago)
# end: when to end x-axis 0
# time_scale: vector giving start of each bin, with one more than bins
# mxy: maximum value for y-axis
# mny: minimum value for y-axis
# use_strat_labels: true means that you put labels on stratigraphic unts
# strat_names: names to use for those labels
# strat_colors
# ordinate: label for Y-axis
# yearbreaks: where to put minor & major breaks; c(10,50,100) will put minor
# medium and major breaks at 10, 50 & 100 Ma marks
# xsize: dimension of X-axis for plot for par(pin=c(xsize,ysize)) command
# ysize: dimension of Y-axis for plot for par(pin=c(xsize,ysize)) command
# hues: we want colors
# colored: where the colors should be: base or background
# alt_back: alternating white and light gray backgrouns
# use_strat_labels added 2016/09/23
if (time_scale_to_plot[1]>0) time_scale_to_plot <- -1*time_scale_to_plot;
if (onset>0) onset <- -1*onset;
if (end>0) end <- -1*end;
lst <- length(time_scale_to_plot)
first_period <- sum(time_scale_to_plot<=onset)
last_period <- sum(time_scale_to_plot<=end)
draws <- length(yearbreaks)
stx <- vector(length=draws)
for (i in 1:draws) stx[i] <- yearbreaks[i]*ceiling(onset/yearbreaks[i])
exx <- stage_box_width*(mxx-mnx);
#print(c(xsize,ysize));
par(pin=c(xsize,ysize));
#plot(NA,type='n',axes=FALSE,main=plot_title,ylab=ordinate,xlab=abscissa,ylim=c(onset,end),xlim=c(mnx-exx,mxx));
plot(NA,type='n',axes=FALSE,main=plot_title,ylab=ordinate,xlab=abscissa,ylim=c(onset,end),xlim=c(mnx-exx,mxx),xaxs="i",yaxs="i");
#axis(2,at=seq(onset,end,by=abs(onset-end)),tcl=0.0,labels=FALSE,lwd=2);
# set size of ticks
if (draws==2) {
pts <- c(-0.15,-0.30)
} else if (draws==3) {
pts <- c(-0.1,-0.2,-0.3)
}
for (i in 1:draws) {
on <- yearbreaks[i]*ceiling(onset/yearbreaks[i]);
en <- yearbreaks[i]*floor(end/yearbreaks[i]);
tcs <- seq(on,en,by=yearbreaks[i]);
if (i < draws) {
on2 <- yearbreaks[i+1]*ceiling(onset/yearbreaks[i+1]);
en2 <- yearbreaks[i+1]*floor(end/yearbreaks[i+1]);
tcs <- tcs[!tcs %in% seq(on2,en2,by=yearbreaks[i+1])]
axis(2,at=tcs,tcl=pts[i],labels=FALSE,lwd=0,lwd.ticks=4/3);
} else {
axis(2,at=tcs,tcl=pts[i],labels=abs(tcs),lwd=0.0,lwd.ticks=4/3,las=2)
}
}
if (alt_back && colored!="backdrop")
for (b in first_period:last_period)
if (gtools::even(b)) rect(max(onset,time_scale_to_plot[b]),mnx,min(end,time_scale_to_plot[b+1]),mxx,col=alt_back_hue,border=alt_back_hue)
if (hues) {
if (colored=="base") {
for (b in first_period:last_period)
rect(0,max(onset,time_scale_to_plot[b]),(mnx-exx),min(end,time_scale_to_plot[b+1]),col=as.character(strat_colors[b]),border=as.character(strat_colors[b]));
# rect(mnx,max(onset,time_scale_to_plot[b]),0,min(end,time_scale_to_plot[b+1]),col=as.character(strat_colors[b]),border=as.character(strat_colors[b]));
} else {
for (b in first_period:last_period)
rect(mnx,max(onset,time_scale_to_plot[b]),mxx,min(end,time_scale_to_plot[b+1]),col=strat_colors[b],border=strat_colors[b])
rect(mnx+exx,onset,mxx,end,col=makeTransparent("white",50),border=makeTransparent("white",50))
}
}
# put in lines separating stages
for (s in first_period:last_period) segments(0,time_scale_to_plot[s],mnx-exx,time_scale_to_plot[s],lwd=0.5,col="gray25");
# put in decisive final line
if (!is.na(match(end,time_scale_to_plot))) segments(0,end,mnx-exx,end);
segments(0,onset,0,end,lwd=2);
axis(2,at=seq(onset,end,by=abs(onset-end)),tcl=0.0,labels=FALSE,lwd=2);
#points(0,-450)
#points(mnx,-450)
#points(mnx-exx,-450)
if (use_strat_labels) {
thin_bin <- min(abs(time_scale_to_plot[2:length(time_scale_to_plot)]-time_scale_to_plot[1:(length(time_scale_to_plot)-1)])/abs(onset-end));
mid <- vector(length=(lst-1));
for (i in 1:(lst-1)) mid[i] <- (max(onset,time_scale_to_plot[i])+min(end,time_scale_to_plot[(i+1)]))/2;
dummy <- vector(length=(lst-1));
for (i in 1:(lst-1)) dummy[i] <- (mnx-exx)/2;
#add something
text(dummy[first_period:last_period],mid[first_period:last_period],label=strat_names[first_period:last_period],cex=min(2,thin_bin*xsize*strat_label_size));
}
}
#### routines to get IGS colors for geological ages
Phanerozoic_Period_Colors <- function(Carboniferous=TRUE) {
# if Carboniferous is TRUE, then use that; otherwise, use Miss. & Penn.
if (Carboniferous) {
period_col <- vector(length=11)
names(period_col) <- c("Cambrian","Ordovician","Silurian","Devonian","Carboniferous","Permian","Triassic","Jurassic","Cretaceous","Paleogene","Neogene")
period_col[5] <- "#67A599" # Carboniferous
period_col[6] <- "#F04028" # Permian
period_col[7] <- "#812B92" # Triassic
period_col[8] <- "#34B2C9" # Jurassic
period_col[9] <- "#7FC64E" # Cretaceous
period_col[10] <- "#F4B470" # Neogene
period_col[11] <- "#FFE619" # Paleogene
} else {
names(period_col) <- c("Cambrian","Ordovician","Silurian","Devonian","Mississippian","Pennsylvanian","Permian","Triassic","Jurassic","Cretaceous","Paleogene","Neogene")
period_col <- vector(length=12)
period_col[5] <- "#678F66" # Mississippian
period_col[6] <- "#99C2B5" # Pennsylvanian
period_col[7] <- "#F04028" # Permian
period_col[8] <- "#812B92" # Triassic
period_col[9] <- "#34B2C9" # Jurassic
period_col[10] <- "#7FC64E" # Cretaceous
period_col[11] <- "#F4B470" # Neogene
period_col[12] <- "#FFE619" # Paleogene
}
period_col[1] <- "#7FA056" # Cambrian
period_col[2] <- "#009270" # Ordovician
period_col[3] <- "#B3E1B6" # Silurian
period_col[4] <- "#CB8C37" # Devonian
return (period_col)
}
Phanerozoic_Interval_Colors <- function() {
interval_colors <- c("Cenozoic","#F2F91D")
interval_colors <- rbind(interval_colors,c("Mesozoic","#67C5CA"))
interval_colors <- rbind(interval_colors,c("Paleozoic","#99C08D"))
return(interval_colors)
}
accersi_time_scale_color <- function() {
interval <- c("Eoarchean","Archean","Precambrian","Precambrian-Cambrian","Precambrian-Paleozoic","Precambrian-Phanerozoic","Paleoarchean","Mesoarchean","Kenoran","Neoarchean","Siderian","Aphebian","Paleoproterozoic","Proterozoic","Proterozoic-Cambrian","Proterozoic-Paleozoic","Rhyacian","Orosirian","Hudsonian","Statherian","Calymmian","Paleohelikian","Mesoproterozoic","Elsonian","Ectasian","Neohelikian","Stenian","Tonian","Neoproterozoic","Neoproterozoic-Cambrian","Neoproterozoic-Paleozoic","Cryogenian","Hadrynian");
interval <- c(interval,"Ediacaran","Fortunian","Terreneuvian","Begadean","Early Cambrian","Waucoban","Cambrian","Cambrian-Ordovician","Paleozoic","Phanerozoic","Stage 2","Wyattia","Fritzaspis","Stage 3","Epoch 2","Series 2","Fallotaspis","Montezuman","Nevadella","Olenellus","Dyeran","Stage 4","Middle Cambrian","Albertan","Eokochaspis nodosa","Delamaran","Amecephalus arrojosensis","Plagiura-Poliella","Wuliuan","Stage 5","Epoch 3","Series 3","Albertella","Ptychagnostus praecurrens","Topazan","Ptychagnostus gibbus","Drumian","Bolaspidella","Marjuman","Dresbachian","Late Cambrian","Guzhangian","Cedaria","Crepicephalus","Aphelaspis","Paibian","Steptoean","Furongian","Franconian","Dunderbergia","Jiangshanian","Elvinia","Taenicephalus","Sunwaptan","Pseudoyuepingia asaphoides","Trempealeauan","Ellipsocephaloides-Idahoia","Saukiella junia/Saukiella pyrene","Nelegerian","Stage 10","Saukiella serotina","Eurekia apopsis","Missisquoia","Skullrockian","Symphysurina brevispicata","Symphysurina bulbosa","Iapetognathus fluctivagus","Tremadocian","Early Ordovician","Ordovician","Ordovician-Silurian","Cordylodus angulatus","Rossodus manitouensis","Macerodus dianae","Stairsian","Acodus delatus/Oneotodus costatus","Tulean","Floian","Arenigian","Oepikodus communis","Blackhillsian","Reutterodus andinus","Microzarkodina flabellum/Tripodus laevus","Dapingian","Middle Ordovician","Whiterock","Histiodella altifrons","Histiodella sinuosa","Darriwilian","Llanvirnian","Histiodella holodentata","Chazyan","Phragmodus polonicus","Cahabagnathus friendsvillensis","Llandeilo","Cahabagnathus sweeti","Blackriverian","Sandbian","Caradocian","Late Ordovician","Plectodina aculeata","Rocklandian-Kirkfield","Rocklandian-Shermanian","Rocklandian","Mohawkian","Erismodus quadridactylus","Kirkfield","Belodina compressa","Shermanian","Phragmodus undatus","Edenian-Maysvillian","Plectodina tenuis","Katian","Belodina confluens","Edenian","Oulodus velicuspis","Maysvillian","Richmondian-Hirnantian","Oulodus robustus","Aphelognathus grandis","Richmondian","Ashgill","Aphelognathus divergens","Aphelognathus shatzeri","Hirnantian","Gamachian","Distomodus kentuckyensis","Rhuddanian","Alexandrian","Llandovery","Early Silurian","Silurian","Silurian-Devonian","Aspelunda expansa","Aeronian","Pterospathodus tenuis","Distomodus staurognathoides","Telychian","Niagaran","Pterospathodus eopennatus","Pterospathodus amorphognathoides angulatus","Pterospathodus amorphognathoides lennarti","Pterospathodus amorphognathoides lithuanicus","Pterospathodus amorphognathoides amorphognathoides","Pterospathodus pennatus procerus","Sheinwoodian","Wenlock","Kockelella ranuliformis","Ozarkodina sagitta rhenana","Kockelella walliseri","Kockelella ortus ortus","Ozarkodina sagitta sagitta","Lockportian","Homerian","Ozarkodina bohemica longa","Kockelella ortus obsidata","Kockelella crassa","Gorstian","Ludlow","Late Silurian","Kockelella variabilis variabilis","Ancoradella ploeckensis","Ludfordian","Cayugan","Polygnathoides siluricus","Ozarkodina snajdri","Ozarkodina crispa","Ozarkodina eosteinhornensis","Pridoli","Oulodus elegans detortus","Gedinnian","Lochkovian","Helderbergian","Early Devonian","Ulsterian","Devonian","Devonian-Mississippian","Caudicriodus hesperius","Caudicriodus postwoschmidti","Lanea omoalpha","Lanea eleanorae","Leanea transitans","Ancryodelloides trigonicus","Masaraella pandora morpho. Beta","Pedavis gilberti","Gondwania irregularis","Siegenian","Pragian","Gondwania kindlei","Eocostapolygnathus pireneae","Eocostapolygnathus kitabicus","Emsian","Sawkillian","Deerparkian","Eocostapolygnathus excavatus");
interval <- c(interval,"Eocostapolygnathus gronbergi","Eocostapolygnathus nothoperbonus","Polygnathus inversus","Linguipolygnathus serotinus","Polygnathus costatus patulus","Polygnathus costatus partitus","Eifelian","Southwoodian","Erian","Cazenovian","Middle Devonian","Cazenovia","Polygnathus costatus costatus","Tortodus knockelianus knockelianus","Polygnathus ensensis","Polygnathus hemiansatus","Givetian","Tioughniogan","Polygnathus varcus","Schmidtognathus hermanni","Senecan","Klapperina disparilis","Mesotaxis guanwushanensis","Fingerlakesian","Frasnian","Late Devonian","Palmatolepis transitans","Palmatolepis punctata","Palmatolepis hassi","Chemungian","Palmatolepis rhenana","Palmatolepis linguiformis","Palmatolepis triangularis","Cassadagan","Famennian","Palmatolepis crepida","Chatauquan","Palmatolepis rhomboidea","Palmatolepis marginifera","Conewangan","Palmatolepis rugosa trachytera","Palmatolepis perlobata postera","Palmatolepis gracilis expansa","Siphonodella praesulcata","Siphonodella sulcata","Kinderhookian","Tournaisian","Mississippian","Carboniferous","Siphonodella duplicata","Siphonodella sandbergi-Siphonodella belkai","Siphonodella quadruplicata-Patrognathus andersoni","Gnathodus typicus-Siphonodella isosticha","Osagean","Dollimae bouckaerti","Gnathodus semiglaber-Polygnathus communis","Gnathodus pseudosemiglaber-Scallioganthus anchoralis","Visean","Gnathodus texanus","Meramecian","Gnathodus praebillineatus","Gnathodus bilineatus","Lochriea mononodosa","Chesterian","Lochriea nodosa","Lochriea ziegleri","Serpukhovian","Namurian","Lochriea cruciformis","Gnathodus bollandensis","Gnathodus postbilineatus","Declinognathodus noduliferus","Morrowan","Bashkirian","Pennsylvanian","Pennsylvanian-Permian","Idiognathoides sinuatus","Neognathodus askynensis","Idiognathodus sinuosus","Atokan","Declinognathodus marginodosus","Neognathodus atokaensis","Declinognathodus donetzianus","Moscovian","Westphalian","Neognathodus uralicus","Streptognathodus dissectus","Desmoinian","Neoghanthodus medexultimus-Streptognathodus-concinnus","Neoghanthodus round-Streptognathodus cancellosus","Streptognathodus subexcelsus","Kasimovian","Stephanian","Missourian","Idiognathodus sagittalis","Streptognathodus cancellosus","Idiognathodus toretzianus","Streptognathodus firmus","Streptognathodus simulator","Gzhelian","Virgilian","Streptognathodus vitali","Streptognathodus virgilicus","Streptognathodus simplex-Streptognathodus bellus","Streptognathodus wabaunsensis","Streptognathodus isolatus","Asselian","Wolfcampian","Early Permian","Cisuralian","Permian","Streptognathodus sigmoidalis-Streptognathodus cristellaris","Streptognathodus constrictus-Mesogondolella belladontae","Streptognathodus fusus","Streptognathodus postfusus","Sweetognathus merrilli-Mesogondolella uralensis","Sakmarian","Sweetognathus binodosus","Sweetognathus anceps-Mesogondolella bisselli","Sweetognathus whitei","Artinskian","Sweetognathus clarki","Neostreptognathodus pequopensis","Leonardian","Neostreptognathodus pnevi","Kungurian","Neostreptognathodus prayi","Sweetognathus guizhouensis","Mesogondolella lamberti-Neostreptognathodus sulcoplicatus","Roadian","Ufimian","Jinogondolella nankingensis","Kazanian","Guadalupian","Wordian","Jinogondolella aserrata","Jinogondolella postserrata","Capitanian","Jinogondolella shannoni","Jinogondolella altudaensis","Jinogondolella prexuanhanensis","Jinogondolella granti","Clarkina postitteri hongshuiensis","Ochoan","Clarkina postitteri postitteri","Dzhulfian","Wuchiapingian","Tatarian","Late Permian","Lopingian","Clarkina dukouensis","Clarkina asymmetrica","Clarkina leveni","Clarkina guangyuanensis","Clarkina transcaucasica","Clarkina orientalis","Clarkina longicuspidata","Clarkina wangi","Changhsingian","Clarkina subarinata","Clarkina changxingensis","Clarkina deflecta-Clarkina yini","Clarkina zhejiangensis-Clarkina meishanensis","lower Otoceras boreale","Hindeodus parvus","upper Otoceras boreale","Griesbachian","Induan","Scythian","Early Triassic","Triassic","Triassic-Jurassic","Mesozoic","Ophiceras commune");
interval <- c(interval,"Isarcicella isarcica","Proptychites rosenkrantzi strigatus","Neogondollela krystyni","Sweetospathodus kummeli","Proptychites candidus","Dienerian","Neospathodus dieneri Morph 3","Vavilovites sverdrupi","Neospathodus waageni","Hedenstroemia hedenstroemi","Smithian","Olenekian","Euflemingites romunderi","Borinella buurensis-Scythogondolella milleri","Anawasatchites tardus","Bajarunia eumphala","Spathian","Neospathodus pingdingshanensis","Icriospathodus collinsoni","Olenikites pilaticus","Neospathodus triangularis","Triassospathodus sosioensis","Chiosella gondolelloides","Chiosella timorensis","Anisian","Middle Triassic","Siberlingites mulleri","Neogondolella? regalis","Lenotropites caurus","Anagymnotoceras varium","Paragondolella excelsa","Eogymnotoceras deleeni","Frechites chischa","Eoprotrachyceras matutinum","Ladinian","Budurovignathus truempyi","Tuchodiceras poseidon","Budurovignathus hungaricus","Meginoceras meginae","Macl. maclearni","Paragondolella inclinata","Frankites sutherlandi","Metapolygnathus intermedius","Daxatina canadensis","Julian","Carnian","Late Triassic","Metapolygnathus tadpole","Tachyceras desatoyense","Austrotrachyceras obesum","Sirenites nanseni","Metapolygnathus polygnathiformis","Tropites dilleri","Metapolygnathus carpathicus","Tropites welleri","Metapolygnathus nodosus","Tuvalian","Klamathites macrolobatus","Metapolygnathus primitius","Lacian","Stikinoceras kerri","Norian","Epigondolella quadrata","Malayites dawsoni","Epigondolella triangularis","Juvavites magnus","Drepanites rutherfordi","Cypriodella postera","Alaunian","Mesohimavatites columbianus","Cypriodella spiculata","Cypriodella postera","Sevatian","Cypriodella serrulata","Cypriodella bidentata","Gnomohalorites cordilleranus","Cypriodella mosheri","Chochloceras amoenum","Rhaetian","Norigondolella sp.","Choristoceras crickmayi","Misikella posternstenini","Psiloceras planorbis","Hettangian","NJ1","Early Jurassic","Jurassic","Jurassic-Cretaceous","Alsatites liasicus","Schlotheimia angulata","Arietites bucklandi","NJ2","Sinemurian","Arnioceras semicostatum","Caenisites turneri","Asteroceras obtusum","Oxynoticeras oxynotum","NJ3","Echinoceras raricostatum","Uptonia jamesoni","Pliensbachian","Tragophylloceras ibex","NJ4","Prodactylioceras davoei","Amaltheus margaritatus","NJ5","Pleuroceras spinatum","NJ6","Dactylioceras tenuicostatum","Toarcian","NJ7","Harpoceras falciferum","Hildoceras bifrons","Haugia variabilis","NJ8","Grammoceras thouarsense","Dumortieria levesquei","Pleydellia aalensis","Leioceras opalinum","Aalenian","Middle Jurassic","Ludwigia murchisonae","Brasilia bradfordensis","NJ9","Graphoceras concavum","Hyperlioceras discites","Bajocian","NJ10","Witchellia laeviuscula","Stephanoceras humphriesianum","Strenoceras niortense","Garantiana garantiana","Parkinsonia parkinsoni","Zigzagiceras zigzag","Bathonian","NJ11","Procerites progracilis","Tulites subcontractus","Morrisiceras morrisi","Procerites hodsoni","Oxycerites orbis","Clydoniceras discus","Macrocephalites herveyi","Callovian","Prolanulites koenigi","NJ12","Sigloceras calloviense","Kosmoceras jason","Erymnoceras coronatum","Peltoceras athleta","Quenstedtoceras lamberti","Quenstedtoceras mariae","Oxfordian","Late Jurassic","NJ13","Cardioceras cordatum","NJ14","Perisphinctes plicatilis","Perisphinctes pumilus","Perisphinctes cautisnigrae","NJ15","Ringsteadia pseudocordata","Pictonia baylei","Kimmeridgian","Rasenia cymodoce","Aulacostephanus mutabilis","Aulacostephanus eudoxus","Aulacostephanus autissiodorensis","Tithonian","Pectinatites elegans","Pectinatites scitulus","NJ16","Pectinatites wheatleyensis","Pectinatites hudlestoni","Pavlovia pallasioides","NJ17","Pavlovia rotunda","Virgatopavlovia fittoni","Progalbanites albani","Glaucolithites glaucolithus","Galbanites okusensis","Galbanites kerberus","Titanites anguiformes","Paracraspedites oppressus","Subcraspedites primitivus","Subcraspedites preplicomphalus","Portlandian","Berriasian","Early Cretaceous","Cretaceous","Subcraspedites lamplughi","CC1","Runctonia runctoni");
interval <- c(interval,"CC2","Hectoroceras kochi","Surites icenii","Surites stenomphalus","Peregrinoceras albidum","CC3","Valanginian","Paratollia/Platylenticeras","Polyptychites","CC4","Prodichotomites","Dichotomites","Stolcoceras tuberulatum","Eleniceras paucinodum","Endemoceras amblygonium","Endemoceras noricum","Endemoceras regale","Speetoniceras inversum","Milanowskia speetonensis","Hauterivian","Craspedodiscus gottschei","CC5","Simbirskites marginatus","Simbirskites variabilis","Paracrioceras rarocinctum","Barremian","Haplocrioceras fissicostatum","CC6","Paracrioceras elegans","Paracrioceras denckmanni","Ancyloceras inexum/S. pingue","Simanocyloceras stolleyi","Parancyloceras bidentatum/Parancyloceras scalare","CC7","Deshayesites forbesi","Aptian","Deshayesites deshayesi","Tropaeum bowerbanki","Epicheloniceras martinoides","Parahoplites nutfieldiensis","Korangan","Hypacanthoplites jacobi","Leymeriella schrammeni","Albian","CC8","Douvilleiceras mammillatum","Hoplites dentatus","Euhoplites loricatus","Urutawan","Euhoplites lautus","Diploceras cristatum","Mortoniceras pricei","Mortoniceras inflatum","Mortoniceras fallax","Motuan","CC9","Mortoniceras rostratum","Mortoniceras perinflatum","Arrhaphoceras briacensis","Cenomanian","Late Cretaceous","Cretaceous-Paleogene","Neogastroplites haasi","Mantelliceras mantelli","Ngaterian","Neogastroplites cornutus","Neogastroplites muelleri","Neogastroplites americanus","Mantelliceras dixoni","Neogastroplites maclearni","Conlinoceras tarrantense - Conlinoceras gilberti","Acanthoceras granerosense","Acanthoceras muldoonense","Acanthoceras bellense","Acanthoceras amphibolum","Pleisacanthoceras wyomingense","CC10","Dunveganoceras pondi","Dunveganoceras problematicum","Dunveganoceras albertense","Arowhanan","Dunveganoceras conditum","Sciponoceras gracile (Vasoceras diartianum)","Sciponoceras gracile (Euomphaloceras septemseriatum)","Burroceras clydense","Neocardioceras juddii","Nigericeras scotti","Watinoceras devonense","Pseudaspidoceras flexuosum","CC11","Turonian","Vascoceras birchbyi","Mammites nodosoides","Collingnoniceras woollgari","Collingnoniceras praecox","Mangaotanean","Prionocyclus hyatti","Prionocyclus macombi","Scaphites warreni","Scaphites whitfieldi","CC12","Scaphites nigricollensis","Prionocyclus germari","Scaphites mariasensis","Scaphites preventricosus","CC13","Coniacian","Scaphites ventricosus","Teratan","Scaphites depressus","CC14","Clioscaphites saxitonianus","Piripauan","CC15","Santonian","Clioscaphites vermiformis","Clioscaphites choteauensis","Desmoscaphites erdmanni","CC16","Desmoscaphites bassleri","CC17","Scaphites leei","Haumurian","CC18","Scaphites hippocrepis","Campanian","Scaphites hippocrepis II","Scaphites hippocrepis III","Baculites sp. (smooth)","Baculites sp. (weak flank ribs)","Baculites obtusus","CC19","Baculites maclearni","Baculites asperiformis","Baculites sp. (smooth)","Baculites perplexus","Baculites gregoryensis","CC20","Baculites reduncus","Baculites scotti","CC21","Didymoceras nebrascense","CC22","Didymoceras stevensoni","CC23","Exiteloceras jenneyi","Didymoceras cheyennense","Baculites compressus","Baculites cuneatus","Baculites reesidei","Baculites jenseni","Baculites eliasi","Baculites baculus","CC24","Maastrichtian","Baculites clinolobatus","Hoploscaphites birkelundae","CC25","Hoploscaphites nicolleti","Jeletzkytes nebrascensis","CC26","Puercan","NP1","Danian","Early Paleocene","Paleocene","Teurian","Early Tertiary","Paleogene","Tertiary","Cenozoic","Torrejonian","NP2","NP3","Tiffanian","NP4","Selandian","Middle Paleocene","Selandian-Thanetian","NP5","Thanetian","Late Paleocene","NP6","NP7","Clarkforkian","NP8","NP9","Wasatchian","Ypresian","Early Eocene","Eocene","Waipawan","NP10","NP11","Mangaorapan","NP12","Bridgerian","NP13","NP14","Heretaungan","Uintan","Lutetian","Middle Eocene","NP15","Porangan","NP16","Bortonian","Bartonian","NP17","Duchesnean","Jacksonian","Priabonian","Chadronian","Late Eocene","NP18","Kaiatan","Runangan","NP19-20","Whaingaroan","NP21","Orellan","Rupelian","Early Oligocene");
interval <- c(interval,"Oligocene","Middle Tertiary","NP22","NP23","Whitneyan","Geringian","NP24","Arikareean","Chattian","Late Oligocene","NP25","Duntroonian","Monroecreekian","Waitakian","Harrisonian","NN1","Aquitanian","Early Miocene","Miocene","Neogene","Late Tertiary","NN2","Otaian","Burdigalian","NN3","Altonian","Hemingfordian","NN4","Barstovian","Langhian","Middle Miocene","Clifdenian","NN5","Lillburnian","Serravallian","NN6","Clarendonian","Waiauan","NN7","Tortonian","Late Miocene","Tongaporutuan","NN8","NN9","Hemphillian","NN10","NN11","Messinian","Kapitean","NN12","Zanclean","Early Pliocene","Pliocene","Opoitian","NN13","Blancan","NN14","NN15","NN16","Waipipian","Piacenzian","Late Pliocene","Mangapanian","NN17","Gelasian","Early Pleistocene","Pleistocene","Quaternary","NN18","Nukumaruan","NN19","Calabrian","Irvingtonian","Castlecliffian","Middle Pleistocene","Ionian","NN20","Haweran","Rancholabrean","NN21","Wisconsinan","Late Pleistocene","Holocene");
interval <- c(interval,"Oandu","Rakvere","Nabala","Vormsi","Pirgu","Tremadoc");
interval <- c(interval,"Sa1","Sa2","Ka1","Ka2-3","Ka4","Stage 10");
color <- c("#DA037F","#F0047F","#F04370","#D6D6D6","#EBEBEB","#FFFFFF","#F444A9","#F768A9","#F99BC1","#F99BC1","#F74F7C","#F74370","#F74370","#F73563","#C7C7C7","#B5B5B5","#F75B89","#F76898","#F875A7","#F875A7","#FDC07A","#FDC07A","#FDB462","#F8C682","#F3CC8A","#FED99A","#FED99A","#FEBF4E","#FEB342","#A6A6A6","#B5B5B5","#FECC5C","#FECC5C");
color <- c(color,"#FED96A","#99B575","#8CB06C","#8CB06C","#8CB06C","#8CB06C","#7FA056","#409963","#99C08D","#9AD9DD","#A6BA80","#A6BA80","#A6C583","#A6C583","#99C078","#99C078","#A6C583","#A6C583","#A6C583","#99C078","#99C078","#B3CA8E","#BFD99D","#BFD99D","#B3CA8E","#BFD99D","#B3CA8E","#B3D492","#B3D492","#B3D492","#A6CF86","#A6CF86","#B3D492","#B3D492","#B3D492","#B3D492","#BFD99D","#BFD99D","#A6CF86","#D3E5B2","#B3E095","#CCDFAA","#CCDFAA","#CCDFAA","#CCEBAE","#CCEBAE","#B3E095","#B3E095","#D9EABA","#B3E095","#D9F0BB","#D9F0BB","#D9F0BB","#B3E095","#D9F0BB","#E0F0C1","#D9F0BB","#B3E095","#E6F5C9","#E6F5C9","#E6F5C9","#E6F5C9","#E6F5C9","#99C08D","#E6F5C9","#33A97E","#33A97E","#33A97E","#1A9D6F","#009270","#86CDA5","#33A97E","#33A97E","#33A97E","#33A97E","#1A9D6F","#1A9D6F","#41B087","#139B77","#41B087","#41B087","#41B087","#009270","#66C092","#4DB47E","#27A37F","#66C092","#4DB47E","#74C69C","#3AAC86","#74C69C","#4DB58D","#74C69C","#74C69C","#61BD95","#009270","#8ED195","#8CD094","#8FD297","#7FCA93","#8CD094","#93D39A","#94D49B","#91D298","#7FCA93","#8CD094","#96D59C","#8CD094","#97D59E","#8CD094","#9DD8A3","#99D69F","#99D69F","#99D69F","#9BD7A1","#99D69F","#A0D9A5","#A4DAA9","#99D69F","#99D69F","#A2D9A7","#A4DAA9","#99D69F","#7FCA93","#A6DBAB","#A6DBAB","#A6DCB5","#A6DCB5","#A6DCB5","#99D7B3","#99D7B3","#B3E1B6","#BFB777","#99D7B3","#B3E1C2","#B3E1C2","#99D7B3","#BFE6CF","#BFE6CF","#BFE6CF","#BFE6CF","#BFE6CF","#BFE6CF","#BFE6CF","#BFE6C3","#BFE6C3","#B3E1C2","#BFE6C3","#BFE6C3","#BFE6C3","#BFE6C3","#CCEBD1","#CCEBD1","#CCEBD1","#CCEBD1","#CCEBD1","#CCECDD","#CCECDD","#BFE6CF","#E6F5FF","#CCECDD","#D9F0DF","#D9F0DF","#E0F3E0","#D9F0DF","#D9F0DF","#D9F0DF","#E6F5E1","#E6F5E1","#E6F5E1","#E5B75A","#E5B75A","#E5AC4D","#E5AC4D","#E5AC4D","#CB8C37","#998E4F","#E5B75A","#E5B75A","#E5B75A","#E5B75A","#E5B75A","#E5B75A","#E5B75A","#E5B75A","#E5AC4D","#E5C468","#E5C468","#E5C468","#E5C468","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#E5D075","#F1D576","#F1D576","#F1D576","#F1C868","#F1C868","#F1C868","#F1C868","#F1D576","#F1D576","#F1D576","#F1E185","#F1E185","#F1E185","#F1E185","#F1E185","#F1CE78","#F1E185","#CB8C37","#F1D487","#F2EDAD","#F1E19D","#F2EDAD","#F2EDAD","#F2EDAD","#F2DB97","#F2EDAD","#F2EDAD","#F2EDC5","#F2E1A6","#F2EDC5","#F2EDC5","#F2EDC5","#F2EDC5","#F2EDC5","#F2E7B6","#F2EDC5","#F2EDC5","#F2EDC5","#F2EDC5","#8CB06C","#96B46C","#8CB06C","#678F66","#67A599","#8CB06C","#8CB06C","#8CB06C","#8CB06C","#A0B76C","#8CB06C","#8CB06C","#A6B96C","#A6B96C","#A6B96C","#ABBB6B","#A6B96C","#A6B96C","#A6B96C","#B5BE6B","#A6B96C","#BFC26B","#BFC26B","#A6C093","#BFC26B","#BFC26B","#BFC26B","#99C2B5","#9FC4B7","#99C2B5","#99C2B5","#C5816F","#99C2B5","#99C2B5","#99C2B5","#ACC9BC","#99C2B5","#99C2B5","#C7CBB9","#C7CBB9","#A6C7BA","#C7CBB9","#C7CBB9","#B3CBBE","#C7CBB9","#C7CBB9","#BFD0C5","#BFD0C5","#BFD0C3","#B9CDC0","#BFD0C5","#BFD0C5","#BFD0C5","#BFD0C5","#CCD4C7","#CCD4C7","#C6D2C5","#CCD4C7","#CCD4C7","#CCD4C7","#CCD4C7","#E36350","#E36350","#E36956","#EE5845","#EF5845","#F04028","#E36350","#E36350","#E36350","#E36350","#E36F5C","#E36F5C","#E36F5C","#E36F5C","#E37B68","#E37B68","#E37B68","#E37B68","#E3816F","#E38776","#E38776","#E38776","#E38776","#E38776","#FB8069","#F57F71","#FB8069","#EE7E79","#FB745C","#FB8D76","#FB8D76","#FB9A85","#FB9A85","#FB9A85","#FB9A85","#FB9A85","#FB9A85","#FB9A85","#E27C88","#FCB4A2","#E87D80","#FCB4A2","#E27C88","#FBA794","#FBA794","#FCB4A2","#FCB4A2","#FCB4A2","#FCB4A2","#FCB4A2","#FCB4A2","#FCB4A2","#FCC0B2","#FCC0B2","#FCC0B2","#FCC0B2","#FCC0B2","#FCC0B2","#FCC0B2","#A4469F","#A4469F","#DC7B90","#A4469F","#D57998","#983999","#812B92","#5B6FAE","#67C5CA","#A4469F","#A4469F","#A4469F","#983999","#A4469F","#A4469F");
color <- c(color,"#CF78A0","#A4469F","#A4469F","#B051A5","#B051A5","#C977A7","#B051A5","#B051A5","#B051A5","#B051A5","#B051A5","#C276AF","#B051A5","#B051A5","#B051A5","#B051A5","#B051A5","#B051A5","#BC75B7","#BC75B7","#B168B1","#BC75B7","#BC75B7","#BC75B7","#BC75B7","#B168B1","#BC75B7","#BC75B7","#C983BF","#C983BF","#C983BF","#C983BF","#C983BF","#C983BF","#C983BF","#C983BF","#C983BF","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#BD8CC3","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#C99BCB","#BD8CC3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#D6AAD3","#E3B9DB","#E3B9DB","#E3B9DB","#E3B9DB","#E3B9DB","#E3B9DB","#4EB3D3","#4EB3D3","#56B6D5","#42AED0","#34B2C9","#5ABC8C","#4EB3D3","#4EB3D3","#67BCD8","#5FB9D6","#67BCD8","#67BCD8","#67BCD8","#67BCD8","#67BCD8","#74C1DB","#67BCD8","#80C5DD","#80C5DD","#80C5DD","#88C8DF","#80C5DD","#80C5DD","#91CBE1","#80C5DD","#99D1E2","#99CEE3","#99CEE3","#9AD4E0","#99CEE3","#99CEE3","#99CEE3","#9AD6DF","#99CEE3","#99CEE3","#9AD9DD","#9AD9DD","#9AD9DD","#80CFD8","#9AD9DD","#9AD9DD","#A9DEE1","#9AD9DD","#A6DDE0","#A6DDE0","#ADE0E2","#A6DDE0","#A6DDE0","#A6DDE0","#A6DDE0","#A6DDE0","#B3E2E3","#B3E2E3","#B0E1E2","#B3E2E3","#B3E2E3","#B3E2E3","#B3E2E3","#B3E2E3","#B3E2E3","#BFE7E5","#BFE7E5","#BFE7E5","#B9E5E4","#BFE7E5","#BFE7E5","#BFE7E5","#BFE7E5","#BFE7E5","#BFE7F1","#BFE7F1","#B3E3EE","#BFE7E9","#BFE7F1","#BFE7ED","#BFE7F1","#BFE7F1","#BFE7F1","#C6EAF3","#BFE7F1","#CCECF4","#CCECF4","#CCECF4","#CCECF4","#CCECF4","#CCECF4","#D9F1F7","#D9F1F7","#D9F1F7","#D3EFF6","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1EE","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#D9F1F7","#8CCD60","#8CCD60","#8CCD57","#7FC64E","#8CCD60","#90CF63","#8CCD60","#95D167","#8CCD60","#8CCD60","#8CCD60","#99D36A","#9DD56E","#99D36A","#99D36A","#99D36A","#A2D771","#99D36A","#99D36A","#99D36A","#99D36A","#99D36A","#99D36A","#99D36A","#99D36A","#8CCD57","#A6D975","#A6D975","#ADDC7A","#A6D975","#A6D975","#B3DF7F","#B3DF7F","#B3DF7F","#B7E183","#B3DF7F","#B3DF7F","#B3DF7F","#B3DF7F","#B3DF7F","#BBE286","#BFE48A","#BFE48A","#BFE48A","#BFE48A","#BFE48A","#BFE48A","#70C189","#BFE48A","#CCEA97","#CCEA97","#C6E791","#CCEA97","#CCEA97","#CCEA97","#77C48E","#CCEA97","#CCEA97","#CCEA97","#CCEA97","#CCEA97","#7DC693","#C0E475","#CCEA97","#CCEA97","#B3DE55","#B3DE53","#A6D84A","#D2C055","#B3DE53","#B3DE53","#84C998","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B7E056","#B3DE53","#B3DE53","#B3DE53","#91CEA3","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#B3DE53","#BFE35D","#BBE15A","#BFE35D","#BFE35D","#BFE35D","#BFE35D","#BFE35D","#98D1A8","#BFE35D","#BFE35D","#BFE35D","#BFE35D","#C3E561","#BFE35D","#BFE35D","#BFE35D","#A6D84A","#C8E764","#CCE968","#CCE968","#9ED3AE","#CCE968","#D0EB6C","#A6D84A","#AEDABA","#D5ED70","#D9EF74","#D9EF74","#D9EF74","#D9EF74","#DCF077","#D9EF74","#E0F27A","#D9EF74","#B8DEC2","#E3F37C","#A6D84A","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E8F581","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E9F683","#E6F47F","#E6F47F","#EBF785","#E6F47F","#EDF786","#E6F47F","#EFF888","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#E6F47F","#A6D84A","#F0F98A","#F2FA8C","#F2FA8C","#F2FA8C","#F2FA81","#F2FA8C","#F2FA8C","#F2FA76","#FDB469","#FDB663","#FDB462","#FDB462","#FDA75F","#F4AA70","#FDB46C","#FD9A52","#F4B470","#F2F91D","#FEBA64","#FDB863","#FEBB64","#FEBF6A","#FEBD64","#FEBF65","#FEBF65","#FEBF6A","#FEBF6A","#FDBF6F","#FDBF6F","#FDBA70","#FDB571","#FDB371","#FCB171","#FCAC72","#FCAB78","#FCA773","#FCA773","#FDB46C","#EFA76E","#FCA976","#FCAB78","#EAA46D","#FCAE7B","#FCB07D","#FCB07D","#FCB280","#E4A06B","#FCB887","#FCB482","#FCB482","#FCB887","#DF9D69","#FDBC8C","#E3A570","#FDC091","#FDC799","#FDBC8C","#FDCDA1","#FDCDA1","#FDC799","#FED99A","#FDD09F","#EBB07A","#F4BA83");
color <- c(color,"#FED39E","#FCCA96","#FED69C","#FED39E","#FED99A","#FED99A","#FDC07A","#FDC07A","#FEDC9E","#FEE0A2","#FEE39F","#FFECA5","#FEE3A6","#FEE9B2","#FEE6AA","#FEE6AA","#FEE6AA","#FDCFA0","#FEEE82","#FDD5AA","#FFF75B","#FFF75B","#FFFF33","#FFFF33","#FFFF00","#FFE619","#FFFF00","#FFFF3A","#FEDAB4","#FFFF41","#FFFF45","#FDD6A7","#FFFF3A","#FFFF49","#FFFF47","#FFFF4D","#FFFF4D","#FDCD90","#FFFF53","#FDCA7B","#FFFF59","#FFFF5D","#FFFF60","#FDD07C","#FFFF62","#FFFF66","#FFFF66","#FED67E","#FFFF69","#FFFF6B","#FFFF6D","#FFFF6E","#FFFF70","#FFFF73","#FEE582","#FFFF93","#FFFFB3","#FFFFB3","#FFFF99","#FFF78A","#FFFFB5","#FFFFB9","#FFFFB8","#FFFFBA","#FFFFBD","#FFF88E","#FFFFBF","#FFFFBF","#FFF891","#FFF6B9","#FFEDB3","#FFEDB3","#FFF2AE","#F9F97F","#FFEFBA","#FFF994","#FFF0C0","#FFF2C7","#FFF0BD","#FFF997","#FFF2C7","#FFF2D3","#FFF2D8","#FFFA9B","#FFF2E1","#FFF2DC","#FFF2D3","#FFF2C7","#FEF2E0");
color <- c(color,"#7FCA93","#97D59E","#99D69F","#99D69F","#A2D9A7","#33A97E");
color <- c(color,"#8ED195","#96D59C","#99D69F","#A0D9A5","#A2D9A7","#E6F5C9");
onset <- c(4000,4000,4000,4000,4000,4000,3600,3200,2800,2800,2500,2500,2500,2500,2500,2500,2300,2050,1800,1800,1600,1600,1600,1400,1400,1300,1200,1000,1000,1000,1000,850,850);
onset <- c(onset,635,541,541,541,541,541,541,541,541,541,529,524,521,521,521,521,520,520,518.5,515.5,515.5,514,513,513,511.1,511,510.5,509,509,509,509,509,507.3,506.5,506.5,505.25,504.5,504.5,504.5,501,501,500.5,499.5,498.5,497,497,497,497,496.8,495.2,494,493.9,493,493,493,492.5,492.3,490.9,489.5,489.5,487.8,487.1,486.7,486.7,486.2,485.4,485.4,485.4,485.4,485.4,485.4,484.4,480.3,480,480,479.8,479.5,477.7,477.7,477.5,475,473.7,470.3,470.0,470.0,470.0,468.7,467.4,467.3,465.5,464.4,464,463.7,462.4,461.4,461.2,458.4,458.4,458.4,458.4,456.6,456.5,456.5,456.5,456.5,455.8,455.5,454.7,454.5,453.7,453.5,453,453,452.3,451,450,450,450,449.7,449,449,449,447,445.5,445.2,445.2,443.8,443.8,443.8,443.8,443.8,443.8,443.8,441.1,440.8,440.5,439.4,438.5,438.5,437.5,436.4,435.8,435.5,435.2,433.4,433.4,433.4,432.6,432.3,431.8,431.2,430.5,430.5,430.5,429.4,428.3,427.4,427.4,427.4,427.4,427,425.6,425.6,425.6,425.2,424.8,423.6,423,423,421.2,419.2,419.2,419.2,419.2,419.2,419.2,419.2,419,417.2,415.2,414.5,414,413.5,412.5,412,411,410.8,410.8,410,408.5,407.6,407.6,407.6,407.6,406,404.2,403.5,401,397.5,395,393.3,393.3,393.3,393.3,393.3,393.3,393.3,391.5,389.2,388,387.7,387.7,387.7,387.5,385,385,384,383,382.7,382.7,382.7,381.2,380.2,379,379,376.5,373,372.2,372.2,372.2,370,370,367.5,366.5,365,364,363,362.2,360,358.9,358.9,358.9,358.9,358.9,358.4,357.4,355,353,351.5,349,348,346.7,346.7,344,343.5,340,336.8,335,335,332,330.9,330.9,330.9,329.5,327.2,325.5,323.2,323.2,323.2,323.2,323.2,322.5,320.8,320,319,318.6,317,315.2,315.2,315,314.5,312.5,312,310.8,309,307,307,307,306,305.7,305.2,304.5,304,303.7,303.7,303.7,302.7,302,301,300,298.9,298.9,298.9,298.9,298.9,298.9,298,297.5,297,296.5,295.5,295.5,294.1,293.5,290.1,290.1,285,282,282,279.3,279.3,275.5,274.7,273.7,272.3,272.3,272.3,272.3,272.3,268.8,268.8,265.1,265.1,264.5,264,263.7,261.8,260.3,259.9,259.9,259.9,259.9,259.9,259.9,259.9,259,258.5,257.7,257.2,256.5,255.3,254.5,254.14,254.14,253.5,253.1,252.8,252.5,252.4,252.17,252.17,252.17,252.17,252.17,252.17,252.17,252.17,252.17,251.9,251.9,251.8,251.7,251.6,251.6,251.6,251.4,251.3,251.2,251.2,251.2,251.2,249.3,249.3,248.55,248.5,248.5,248.4,248.3,248,247.6,247.4,247.3,247.2,247.2,247.2,246.8,246.7,246.5,246.3,244.3,244,243.5,242,242,241,240.4,240.3,239,238.3,238,238,237,237,237,237,237,236.5,236.3,234.6,234,233.5,233.5,233,232.6,231.4,230.5,229.7,229.5,228.4,228,228,226.5,224.5,221.4,218.3,217.5,217.4,217.4,217,216.9,216.5,215.3,214.8,214,214,208.5,208.5,208.5,202.3,202.2,201.9,201.3,201.3,201.3,201.3,201.3,201.3,201.1,200.2,199.3,199.3,199.3,197.8,196.4,195.4,193.8,193.3,192.8,190.8,190.8,189.5,189,188.5,187.5,185.5,184.2,182.8,182.7,182.7,181,180.7,180.4,178.4,177.2,176.4,175,174.4,174.1,174.1,174.1,172.2,171.3,171.2,170.9,170.3,170.3,170.3,170,169.8,169.5,169,168.6,168.3,168.3,168,167.3,167,166.8,166.6,166.4,166.2,166.1,166.1,165.5,165.4,165,164.6,164.5,164.2,163.8,163.5,163.5,163.5,163.4,161.4,161.4,160.8,160.1,159.7,159.7,159,157.3,157.3,156,154.7,153.7,152.4,152.1,152,151.5,151.2,151,150.7,150.2,149.2,149,148.3,148,147.6,147.4,147.2,147,146.7,146.4,146,146,145,145,145,144.6,144,144,142.4,141.6,141,140.2,139.8,139.8,139.8,138.5,137.6,137.4,136.8,135.6,135,134.3,133.95,133.85,133.5,133.4,133,132.9,132.8,132.7,132.5,132.3,129.4,129.4,129,129,128.8,128.2,127.8,127.3,125.5,125.4,125,125,124.5,124,123,118,117.5,116.8,113,113,112.2,111.3,110.3,109.5,108.4,108.3,107.5,107,104.9,104.1,103.3,103,101.7,101.3,100.8,100.5,100.5,100.5,100.25,100.2,100.2,99.81,99.17,98.75,98.5,98.19,97.26,96.24,96.08,95.98,95.9,95.81,95.7,95.67,95.47,95.24,95.2,95.01,94.78,94.57,94.39,94.27,94.15,93.98,93.9,93.9,93.9,93.55,93.45,93.35,92.9,92.1,92.08,91.6,91.41,91.34,91,90.65,90.24,89.98,89.87,89.8,89.8,89.77,89.1,88.77,88,87.86,86.5,86.3,86.3,86.26,85.56,85.23,85,84.52,84.2,84.08,84,83.8,83.64,83.6,82.7,82,81.53,81.28,81.13,81,80.97,80.67,80.21,79.64,79.01,79,78.34,77.63,77.6,76.94,76.8,76.27,76,75.64,75.08,74.6,74.21)
onset <- c(onset,73.91,73.63,73.27,72.74,72.1,72.1,72.05,70.44,70.2,69.91,69.3,67.8,66,66,66,66,66,66,66,66,66,66,64.75,64.5,63.8,62.25,62.2,61.6,61.6,61.6,59.7,59.2,59.2,58.4,57.5,57.5,57.3,56.2,56,56,56,56,55.5,55,53.61,53,52.85,52,50.6,49.7,49.5,47.9,47.8,47.8,47.3,46.2,43.4,43,41.3,41.3,39.9,38,37.8,37.8,37.8,37,37,36,36,34.3,34.2,33.9,33.9,33.9,33.9,33.9,32.8,32.3,32.1,30.8,29.9,29.8,28.1,28.1,27.5,27.3,26.3,25.2,24.8,23.9,23.03,23.03,23.03,23.03,23.03,22.2,21.7,20.44,19,19,18.6,18.3,16.3,15.97,15.97,15.9,15.6,15.1,13.82,13.6,13.6,12.7,11.8,11.62,11.62,10.92,10.9,10.7,10.3,9.4,8.6,7.246,6.5,5.6,5.333,5.333,5.333,5.28,5,4.9,4.15,4,3.75,3.6,3.6,3.6,3,2.6,2.588,2.588,2.588,2.588,2.5,2.4,2,1.806,1.806,1.63,0.781,0.781,0.5,0.34,0.3,0.3,0.15,0.126,0.0117)
onset <- c(onset,453.0,452.0,450.4,449.4,448.5,485.4)
onset <- c(onset,458.4,456.5,453.0,450.3,448.3,489.5)
end <- c(3600,2500,541,485.4,252.17,0,3200,2800,2500,2500,2300,1600,1600,541,485.4,252.17,2050,1800,1600,1600,1400,1300,1000,1300,1200,850,1000,850,541,485.4,252.17,635)
end <- c(end,541,541,529,521,520,513,513,485.4,443.8,252.17,0,521,521,520,514,509,509,518.5,515.5,515.5,511.1,511,509,501,501,510.5,506.5,509,507.3,504.5,504.5,497,497,506.5,505.25,504.5,504.5,500.5,499.5,497,496.8,485.4,497,498.5,497,495.2,494,493,485.4,492.5,493.9,489.5,493,492.3,486.7,488.27,485.4,490.9,487.8,485.4,485.4,487.1,486.7,486.2,480,485.4,484.4,484.4,477.7,470.0,443.8,419.2,480.3,480,479.8,479.5,477.5,475,470.0,465.5,473.7,470.0,470.3,468.7,467.3,458.4,456.5,467.4,464.4,458.4,461.9,463.7,458.4,462.4,461.2,458.4,456.6,456.5,453,449,443.8,455.8,457,453.5,455.5,451,454.7,454.5,453.7,451,453,449,452.3,445.2,450,450,449.7,449,443.8,449,447,445.2,443.8,445.5,443.8,443.8,443.8,441.1,440.8,438.5,433.4,427.4,419.2,358.9,440.5,438.5,439.4,437.5,433.4,425.6,436.4,435.8,435.5,435.2,433.4,432.6,430.5,427.4,432.3,431.8,431.2,430.5,429.4,427.4,427.4,428.3,427.4,427,425.6,423,419.2,425.6,425.2,423,419.2,424.8,423.6,423,421.2,419.2,419,410.8,410.8,393.3,393.3,393.3,358.9,323.2,417.2,415.2,414.5,414,413.5,412.5,412,411,410,407.6,407.6,408.5,407.6,406,393.3,393.3,393.3,404.2,403.5,401,397.5,395,393.3,391.5,387.7,387.7,385,382.7,382.7,382.7,389.2,388,387.7,387.5,382.7,382.7,385,384,370,383,381.2,379,372.2,358.9,380.2,379,376.5,372.2,373,372.2,370,365,358.9,367.5,358.9,366.5,364,358.9,363,362.2,360,358.9,358.4,351.5,346.7,323.2,298.9,357.4,355,353,349,343.5,348,346.7,344,330.9,340,335,336.8,335,332,323.2,330.9,329.5,323.2,315,327.2,325.5,323.2,322.5,319,315.2,298.9,252.17,320.8,320,318.6,312,317,315.2,314.5,307,307,312.5,310.8,306,309,307,305.7,303.7,298.9,303.7,305.2,304.5,304,303.7,302.7,298.9,298.9,302,301,300,298.9,298,295.5,282,272.3,272.3,252.17,297.5,297,296.5,295.5,294.1,290.1,293.5,290.1,285,279.3,282,279.3,272.3,275.5,272.3,274.7,273.7,272.3,268.8,268.8,268.8,259.9,259.9,265.1,265.1,264.5,259.9,264,263.7,261.8,260.3,259.9,259,259,254.14,254.14,252.17,252.17,252.17,258.5,257.7,257.2,256.5,255.3,254.5,254.14,253.5,252.17,253.1,252.8,252.5,252.17,252.17,251.9,251.9,251.6,251.2,247.2,247.2,201.3,145,66,251.8,251.7,251.6,251.6,251.4,251.3,251.2,251.2,251.2,249.3,249.3,248.5,247.2,248.55,248.4,248.5,248,247.2,248.3,247.6,247.7,247.4,247.3,247.2,246.7,242,237,246.5,244.3,246.3,244,241,243.5,242,240.4,237,240.3,239,238,238.3,238,237,237,236.5,236.3,230.5,228,201.3,233.5,234.6,234,233.5,233,232.6,231.4,229.7,229.5,228,228,226.5,217.4,224.5,208.5,221.4,218.3,217.4,217.5,217,216.9,215.3,214,216.5,214.8,208.5,214,208.5,208.5,202.3,202.2,201.3,201.9,201.3,201.3,201.1,199.3,199.3,174.1,145,66,200.2,199.3,197.8,193.3,190.8,196.4,195.4,193.8,192.8,189,190.8,189.5,182.7,188.5,185.5,187.5,184.2,182.8,182.7,181,180.7,174.1,177.2,180.4,178.4,176.4,171.2,175,174.4,174.1,172.2,170.3,163.5,171.3,170.9,170.3,170.3,170,168.3,168,169.8,169.5,169,168.6,168.3,167.3,166.1,165.4,167,166.8,166.6,166.4,166.2,166.1,165.5,163.5,165,163.4,164.6,164.5,164.2,163.8,163.5,161.4,157.3,145,161.4,160.8,159.7,160.1,159.7,159,151.2,157.3,156,152.1,154.7,153.7,152.4,152,145,151.5,151,149.2,150.7,150.2,149,144,148.3,148,147.6,147.4,147.2,147,146.7,146.4,146,144.6,142,139.8,100.5,66,144,142.4,141.6,139.8,141,140.2,139.8,138.5,137.4,132.9,137.6,136.8,132.7,135.6,135,134.3,133.95,133.85,133.5,133.4,133,132.8,129.4,132.5,129,132.3,129.4,129,125,128.8,125.4,128.2,127.8,127.3,125.5,125,112.2,124.5,113,124,123,118,116.8,108.4,113,111.3,100.5,103,110.3,109.5,108.3,103.3,107.5,107,104.9,104.1,101.7,100.2,95.7,101.3,100.8,100.2,93.9,66,56,99.81,98.5,95.2,99.17,98.75,98.19,97.5,97.76,96.24,96.08,95.98,95.9,95.81,95.67,93.9,95.47,95.24,95.01,92.1,94.78,94.57,94.39,94.27,94.15,93.98,93.9,93.55,91,89.8,93.45,93.35,92.9,92.08,89.1,91.6,91.41,91.34,90.65,89.8,90.24,89.98,89.87,89.77,88,86.3,88.77,86.5,87.86,86.3,86.26,84,85,83.6,85.56,85.23,84.52,84.2,84.08,83.8,83.64,66,81,82.7,72.1,82,81.53,81.28,81.13,80.97,79,80.67,80.21,79.64,79.01,78.34,77.6,77.63,76.94,76.8,76.27,76,75.64,72.1,75.08,74.6,74.21,73.91,73.63,73.27);
end <- c(end,72.74,72.05,70.2,66,70.44,69.91,67.8,69.3,68.69,66,64.75,64.5,61.6,61.6,56,55.5,33.9,23.03,2.588,0,62.25,63.8,62.2,57.5,59.7,59.2,59.2,56,58.4,56,56,57.5,57.3,56,56.2,55,52,47.8,47.8,33.9,53,53.61,52.85,49.5,50.6,47.9,49.7,47.3,46.2,39.9,41.3,38,43.4,43,41.3,37,37.8,37,37.8,33.9,33.9,33.9,33.9,36,36,34.3,34.2,27.3,32.8,32.1,28.1,28.1,23.03,23.03,32.3,29.9,29.8,26.3,27.5,18.6,23.03,23.03,23.9,25.2,24.8,21.7,20.6,22.2,20.44,15.97,5.333,2.588,2.588,19,19,15.97,18.3,15.9,16.3,15.6,13.6,13.82,11.62,15.1,13.6,12.7,11.62,11.8,10.3,10.92,10.9,7.246,5.333,6.5,10.7,9.4,4.9,8.6,5.6,5.333,5.28,5,3.6,3.6,2.588,3.6,4.15,1.806,4,3.75,2.6,3,2.588,2.588,2.4,2.5,1.806,1.806,0.0117,0,2,1.63,0.5,0.781,0.3,0.34,0.126,0.0117,0.3,0,0.0114,0,0.05,0.0117,0);
end <- c(end,452.0,450.4,449.4,448.5,445.2,477.7);
end <- c(end,456.5,453.0,450.3,448.3,445.2,485.4);
output <- data.frame(cbind(interval,onset,end,color));
return(output)
}
accersi_stage_colors <- function() {
interval <- c("Ediacaran","Fortunian","Stage 2","Stage 3","Stage 4","Wuliuan","Drumian","Guzhangian","Paibian","Jiangshanian","Stage 10","Tremadoc","Floian","Dapingian","Darriwilian","Sandbian","Katian","Hirnantian","Rhuddanian","Aeronian","Telychian","Sheinwoodian","Homerian","Gorstian","Ludfordian","Pridoli","Lochkovian","Pragian","Emsian","Eifelian","Givetian","Frasnian","Famennian","Tournaisian","Visean","Serpukhovian","Bashkirian","Asselian","Sakmarian","Artinskian","Kungurian","Roadian","Wordian","Capitanian","Wuchiapingian","Changhsingian");
onset <- c(635.0,541.0,529.0,521.0,514.0,509.0,504.5,500.5,497.0,494.0,489.5,485.4,477.7,470.0,467.3,458.4,453.0,445.2,443.8,440.8,438.5,433.4,430.5,427.4,425.6,423.0,419.2,410.8,407.6,393.3,387.7,382.7,372.2,358.9,346.7,330.9,323.2,298.9,295.5,290.1,279.3,272.3,268.8,265.1,259.9,254.1);
end <- c(542.0,529.0,521.0,514.0,509.0,504.5,500.5,497.0,494.0,489.5,485.4,477.7,470.0,467.3,458.4,453.0,445.2,443.8,440.8,438.5,433.4,430.5,427.4,425.6,423.0,419.2,410.8,407.6,393.3,387.7,382.7,372.2,358.9,346.7,330.9,323.2,315.5,295.5,290.1,279.3,272.3,268.8,265.1,259.9,254.1,252.2);
color <- c("#FED96A","#99B575","#A6BA80","#A6C583","#B3CA8E","#B3D492","#BFD99D","#CCDFAA","#CCEBAE","#D9F0BB","#E6F5C9","#33A97E","#41B087","#66C092","#74C69C","#8CD094","#99D69F","#A6DBAB","#A6DCB5","#B3E1C2","#BFE6CF","#BFE6C3","#CCEBD1","#CCECDD","#D9F0DF","#E6F5E1","#E5B75A","#E5C468","#E5D075","#F1D576","#F1E185","#F2EDAD","#F2EDC5","#8CB06C","#A6B96C","#BFC26B","#99C2B5","#E36350","#E36F5C","#E37B68","#E38776","#FB8069","#FB8D76","#FB9A85","#FCB4A2","#FCC0B2");
return(data.frame(cbind(interval,onset,end,color)));
}
infer_interval_color <- function(onset,end) {
color_scale <- accersi_time_scale_color();
onsets <- as.numeric(as.character(color_scale$onset));
ends <- as.numeric(as.character(color_scale$end));
start_diff <- abs(abs(onsets)-abs(onset))
end_diff <- abs(abs(ends)-abs(end))
ttl_diff <- start_diff+end_diff
color <- color_scale$color[match(min(ttl_diff),ttl_diff)]
return(color)
}
infer_stage_color_given_age <- function(ma) {
get_colors <- accersi_time_scale_color()
after <- as.numeric(as.character(get_colors$onset))-abs(ma);
before <- abs(ma)-as.numeric(as.character(get_colors$end));
get_colors <- cbind(get_colors,after,before);
get_colors <- subset(get_colors,get_colors$after>=0);
get_colors <- subset(get_colors,get_colors$before>=0);
span <- get_colors$before+get_colors$after;
best_bet <- match(min(span),span)
#ma_color <- c(xx[best_bet,4],xx[best_bet,1])
ma_color <- get_colors$color[best_bet];
return(ma_color)
}
infer_stage_color_given_stage_onset_and_end <- function(age_range) {
print(age_range);
get_colors <- accersi_time_scale_color()
after <- as.numeric(as.character(get_colors$onset))-abs(age_range[1]);
before <- abs(age_range[2])-as.numeric(as.character(get_colors$end));
get_colors <- cbind(get_colors,after,before);
get_colors <- subset(get_colors,get_colors$after>=0);
get_colors <- subset(get_colors,get_colors$before>=0);
span <- get_colors$before+get_colors$after;
best_bet <- match(min(span),span)
#ma_color <- c(xx[best_bet,4],xx[best_bet,1])
ma_color <- get_colors$color[best_bet];
return(ma_color)
}
infer_stage_color_given_strat_unit <- function(strat_unit) {
get_colors <- accersi_time_scale_color()
su <- match(strat_unit,get_colors$interval)
return(get_colors[su,4])
}
#### Taxon Shapes ####
pentagon_symbol <- function(x,y,abc,ord,size) {
pent <- matrix(0,2,5)
an <- 18
for (r in 1:5) {
# pent[1,r] <- x+cos(pi*an/180)*(abc*size)
# pent[2,r] <- y+sin(pi*an/180)*(ord*size)
pent[1,r] <- cos(pi*an/180)
pent[2,r] <- sin(pi*an/180)
an <- an+72
}
# center and rescale
pent <- size*pent
pent[1,] <- abc*pent[1,]
pent[2,] <- ord*pent[2,]
pent[1,] <- x+pent[1,]
pent[2,] <- y+pent[2,]
return(pent)
}
bug_symbol <- function(x,y,abc,ord,size) {
bug <- matrix(0,2,6)
bug[1,1] <- x-(0.75*abc*size*(sqrt(2)/2))
bug[2,1] <- y+(0.75*ord*size*(sqrt(2)/2))
bug[1,2] <- x
bug[2,2] <- y+(ord*size)
bug[1,3] <- x+(0.75*abc*size)*(sqrt(2)/2)
bug[2,3] <- y+(0.75*ord*size*(sqrt(2)/2))
bug[1,4] <- x+(0.75*abc*size)*(sqrt(2)/2)
bug[2,4] <- y-(0.75*ord*size*(sqrt(2)/2))
bug[1,5] <- x
bug[2,5] <- y-(ord*size)
bug[1,6] <- x-(0.75*abc*size*(sqrt(2)/2))
bug[2,6] <- y-(0.75*ord*size*(sqrt(2)/2))
return(bug)
}
star_symbol <- function(x,y,abc,ord,size) {
# x: x-coordinate
# y: y-coordinate
# abc: relative size of X-axis
# ord: relative size of Y-axis
# size: size of shell
star <- matrix(0,2,10)
an <- 18
for (r in 1:10) {
if (r%%2==1) {
# star[1,r] <- x+cos(pi*an/180)*(abc*size)
# star[2,r] <- y+sin(pi*an/180)*(ord*size)
star[1,r] <- cos(pi*an/180)/2
star[2,r] <- sin(pi*an/180)/2
}
if (r%%2==0) {
# star[1,r] <- x+cos(pi*an/180)*(size*abc/2)
# star[2,r] <- y+sin(pi*an/180)*(size*ord/2)
star[1,r] <- cos(pi*an/180)/4
star[2,r] <- sin(pi*an/180)/4
}
an <- an+36
}
# rescale
star <- size*star
star[1,] <- abc*star[1,]
star[2,] <- ord*star[2,]
star[1,] <- x+star[1,]
star[2,] <- y+star[2,]
return(star)
}
flower_symbol <- function(x,y,abc,ord,size) {
flower <- matrix(0,2,12)
an <- 0
for (r in 1:12) {
if (r%%2==1) {
flower[1,r] <- cos(pi*an/180)/2
flower[2,r] <- sin(pi*an/180)/2
}
if (r%%2==0) {
flower[1,r] <- cos(pi*an/180)/4
flower[2,r] <- sin(pi*an/180)/4
}
an <- an+30
}
# rescale
flower <- size*flower
flower[1,] <- abc*flower[1,]
flower[2,] <- ord*flower[2,]
flower[1,] <- x+flower[1,]
flower[2,] <- y+flower[2,]
return(flower)
}
mollusc_symbol <- function(x,y,abc,ord,size,whorls,W) {
# x: x-coordinate
# y: y-coordinate
# abc: relative size of X-axis
# ord: relative size of Y-axis
# size: size of shell
# whorls: number of whorls to draw
# W: Raup's W.
stops <- (72*whorls)+1+(72*(whorls-1)+1)
snail <- matrix(0,2,stops)
#r <- (size*abc)*/(W^whorls)
r <- 1/(W^whorls)
#snail[1,1] <- x-r
snail[1,1] <- -r
#snail[2,1] <- y
snail[2,1] <- 0
ang <- 5
for (i in 2:((72*whorls)+1)) {
ri <- r*W^(5*(i-1)/360)
# snail[1,i] <- x+ri*cos(pi*((180-ang)/180))
# snail[2,i] <- y-(ord/abc)*ri*sin(pi*((180-ang)/180))
snail[1,i] <- ri*cos(pi*((180-ang)/180))
snail[2,i] <- -ri*sin(pi*((180-ang)/180))
ang <- ang+5
}
a <- 72*(whorls-1)+1
i <- (72*whorls)+2
for (i in ((72*whorls)+2):stops) {
snail[1,i] <- snail[1,a]
snail[2,i] <- snail[2,a]
a <- a-1
}
mn <- (max(snail[1,])-min(snail[1,])+max(snail[2,])-min(snail[2,]))/2
snail <- size*snail/mn
snail[1,] <- abc*snail[1,]
snail[2,] <- ord*snail[2,]
snail[1,] <- x+snail[1,]
snail[2,] <- y+snail[2,]
return (snail)
}
brachiopod_symbol <- function(x,y,abc,ord,pedicle_width=5,sulcus_width=10,sulcus_depth=0.1,shell_width=2,size) {
# have the angle fo the hinge start high and decrease
#seq(sin(pedicle_angle),0,by=-sin(pedicle_angle)/60)
#yy <- seq(sin(pedicle_angle),0,by=-sin(pedicle_angle)/60)/60
#for (i in 2:length(yy)) yy[i] <- yy[i]+yy[i-1]
#plot(xx,yy);
#pa <- pedicle_angle
## draw the "base" (hinge) of the shell
xx <- seq(0,shell_width/2,by=(shell_width/2)/59)
yy <- c()
pa <- 0.01
for (i in 1:pedicle_width) {
yy <- c(yy,sin(pa));
pa <- 1.3*pa;
# i <- i+1;
}
for (i in (pedicle_width+1):60) {
yy <- c(yy,sin(pa));
pa <- 0.95*pa;
# i <- i+1;
}
for (i in 2:length(yy)) yy[i] <- yy[i]+yy[i-1];
yy <- 0.2*yy
#plot(xx,yy,xlim=c(-1,1),ylim=c(-1,1),type="l");
radius <- ((xx[length(xx)]^2)+(yy[length(yy)])^2)^0.5;
base <- tan(yy[length(yy)]/xx[length(xx)]);
### draw the outer margin up to the sulcus
sul_wdth <- pi*sulcus_width/180
top <- (pi/2)-sul_wdth;
edges <- seq(base+((top-base)/108),top,(top-base)/108);
yyy <- radius*sin(edges);
xxx <- radius*cos(edges);
#lines(xxx,yyy);
if (sulcus_width > 0) {
### draw the sulcus
edges2 <- seq(0+(pi/20),pi/2,by=pi/20);
xxx <- c(xxx,0,xxx[length(xxx)])
yyy <- c(yyy,0,yyy[length(yyy)])
xxxx <- xxx[length(xxx)]*cos(edges2);
yyyy <- max(yy,yyy)-xxx[length(xxx)]*sin(edges2);
brachiopod_x <- c(xx,xxx,xxxx);
brachiopod_y <- c(yy,yyy,yyyy);
} else {
# if there is no sulcus, then we skip that part
brachiopod_x <- c(xx,xxx);
brachiopod_y <- c(yy,yyy);
}
# draw the left side of the shell
brachiopod_x <- c(brachiopod_x,-brachiopod_x[length(brachiopod_x):1]);
brachiopod_y <- c(brachiopod_y,brachiopod_y[length(brachiopod_y):1]);
#recenter and rescale
brachiopod_y <- brachiopod_y-mean(brachiopod_y);
brachiopod <- rbind(brachiopod_x,brachiopod_y);
brachiopod <- size*brachiopod;
brachiopod[1,] <- abc*brachiopod[1,];
brachiopod[2,] <- ord*brachiopod[2,];
brachiopod[1,] <- x+brachiopod[1,];
brachiopod[2,] <- y+brachiopod[2,];
return(brachiopod);
}
trilobite_symbol <- function(x,y,abc,ord,size,cph,tho,thow,pyg,pygwu,pygwb) {
# x, y: x & y coabcissa (center of symbol)
# abc, ord: length of x & y axes (to make it "square")
# cph: relative height of cephalon (head)
# tho: relative height of thorax
# thow: relative width of thorax
# pyg: relative height of pygidium (tail)
# pygwu: relative width of pygidium at top
# pygwb: relative width of pygidium at bottom
l <- cph+tho+pyg # total length
cph <- cph/l
tho <- tho/l
pyg <- pyg/l
hby <- (0.5-cph) #where body starts relative to y
bty <- (0.5-(cph+tho)) #where body ends relative to y
a <- 0.5
b <- cph
xxx <- vector(length=34)
yyy <- vector(length=34)
trilo <- matrix(0,2,62)
j <- 1
trilo[1,j+0] <- -(thow*a) #head/body connction
trilo[1,j+1] <- -(((1+thow)/2)*a) #head/body connction
trilo[2,j+3] <- trilo[2,j+1] <- trilo[2,j+0] <- hby
trilo[1,j+3] <- trilo[1,j+2] <- -a #tip of spine
trilo[2,j+2] <- -(hby-(bty/2))
ang <- 175
j <- 4
for (i in 1:35) {
# go in 5?? increments
# ddd[i] <- (0.25*(abs(ang-90)/90))
trilo[1,i+j] <- (a*cos(ang*pi/180))
xxx[i] <- a*cos(ang*pi/180)
yyy[i] <- sqrt(((b^2)-((b^2)*xxx[i]^2)/(a^2)))
trilo[2,i+j] <- trilo[2,1]+yyy[i]
ang <- ang-5
}
j <- j+35
trilo[1,j+3] <- (thow*a) #head/body connction
trilo[1,j+2] <- (((1+thow)/2)*a) #head/body connction
trilo[2,j] <- trilo[2,j+2] <- trilo[2,j+3] <- hby
trilo[1,j] <- trilo[1,j+1] <- a #tip of spine
trilo[2,j+1] <- -(hby-(bty/2))
j <- j+4
trilo[1,j] <- trilo[1,j+2] <- trilo[1,j+4] <- trilo[1,j+6] <- ((7/6)*thow*a)
trilo[1,j+1] <- trilo[1,j+3] <- trilo[1,j+5] <- trilo[1,j+7] <- (thow*a)
tip <- (hby-bty)/8
for (i in 1:8) {
trilo[2,j] <- hby-(i*tip)
j <- j+1
}
rev <- j-1
# do pygidium
trilo[1,j] <- pygwu*a
trilo[2,j] <- bty
j <- j+1
trilo[1,j] <- pygwb*a
trilo[2,j] <- -0.5
j <- j+1
trilo[1,j] <- -pygwb*a
trilo[2,j] <- -0.5
j <- j+1
trilo[1,j] <- -pygwu*a
trilo[2,j] <- bty
j <- j+1
# do rest of thorax
for (i in 1:8) {
k <- i-1
trilo[1,j] <- -trilo[1,(rev-k)]
trilo[2,j] <- trilo[2,(rev-k)]
j <- j+1
}
# center and rescale
trilo <- size*trilo
trilo[1,] <- abc*trilo[1,]
trilo[2,] <- ord*trilo[2,]
trilo[1,] <- x+trilo[1,]
trilo[2,] <- y+trilo[2,]
#polygon(trilo[1,],trilo[2,],col="orange",lwd=0.25)
return(trilo)
}
fish_symbol <- function(x,y,abc,ord,size,a,b) {
# a <- 0.5
# b <- 0.25
spin <- 25
rev <- 3+(2*spin)
xxx <- vector(length=52)
yyy <- vector(length=52)
#ddd <- vector(length=34)
fish <- matrix(0,2,rev)
ang <- 160
for (i in 1:spin) {
# go in 5?? increments
# ddd[i] <- (0.25*(abs(ang-90)/90))
fish[1,i] <- fish[1,rev-i] <- (a*cos(ang*pi/180))
xxx[i] <- a*cos(ang*pi/180)
yyy[i] <- sqrt(((b^2)-((b^2)*xxx[i]^2)/(a^2)))
fish[2,i] <- yyy[i]
fish[2,rev-i] <- yyy[i]*-1
ang <- ang-5
}
fish[1,i+1] <- fish[1,i+2] <- 0.5
fish[2,i+1] <- 1.5*max(fish[2,])
fish[2,i+2] <- fish[2,i+1]*-1
fish[1,rev] <- -0.30
fish[2,rev] <- 0
# center and rescale fish
scx <- max(fish[1,])-min(fish[1,])
fish <- fish/scx
scxx <- max(fish[1,])-0.5
fish[1,] <- fish[1,]-scxx
# set the size so that the area is about equal to a circle with r=a
fish <- (a/b)*size*fish
fish[1,] <- abc*fish[1,]
fish[2,] <- ord*fish[2,]
fish[1,] <- x+fish[1,]
fish[2,] <- y+fish[2,]
return (fish)
}
#### Spindle like it's the 70's ####
spindle_diagram <- function(bin_onsets,spindled_midpts,spindled_counts,bin_colors,plot_on_y=T,bar_legend=T,legend_width=1,legend_case="Unit") {
# bin_onsets: vector giving where (on X or Y axis)
# spindled_midpts: the position where the middle of each spindle segment is plotted
# spindled_counts: matrix giving the number of times an observation is made
# with the mean of that variable
# bin_colors: colors for separate spindle diagrams
# plot_on_y: if TRUE, then spindles go "up" Y-axis, with different ones plotted
# on X-axis
# bar_legend: if TRUE,then print a legend for width
# legend_width: the width being plotted
# legend_case: Unit name (e.g., "case" or "clade") that one example represents
bins <- dim(spindled_counts)[1];
bin_mids <- (bin_onsets[1:bins]+bin_onsets[2:(bins+1)])/2;
bin_widths <- abs(bin_onsets[1:bins]-bin_onsets[2:(bins+1)]);
bin_maxs <- vector(length=bins)
for (i in 1:bins) bin_maxs[i] <- max(spindled_counts[i,]);
width_of_one <- min(bin_widths/bin_maxs);
y_hts <- abs(spindled_midpts[1]-spindled_midpts[2])/2
#cbind(bin_maxs,bin_widths,bin_widths/bin_maxs)
#which(spindled_counts==mxwd,arr.ind=TRUE)
print(c(width_of_one,legend_width));
for (s in 1:bins) {
toplot <- spindled_midpts[spindled_counts[s,]>0]
tocount <- spindled_counts[s,spindled_counts[s,]>0]
for (i in 1:length(toplot)) {
if (plot_on_y) {
x1 <- as.numeric(bin_mids[s]-(width_of_one/2)*tocount[i])
x2 <- as.numeric(bin_mids[s]+(width_of_one/2)*tocount[i])
y1 <- as.numeric(toplot[i])-y_hts
y2 <- as.numeric(toplot[i])+y_hts
} else {
y1 <- as.numeric(bin_mids[s]-(width_of_one/2)*tocount[i])
y2 <- as.numeric(bin_mids[s]+(width_of_one/2)*tocount[i])
x1 <- as.numeric(toplot[i])-y_hts
x2 <- as.numeric(toplot[i])+y_hts
}
rect(x1,y1,x2,y2,col=bin_colors[s],lwd=0.5)
}
}
# do legend if requested
if (bar_legend) {
xleg <- max(bin_onsets)-(0.2*(max(bin_onsets)-min(bin_onsets)));
if (plot_on_y) {
x1 <- xleg-((width_of_one/2)*legend_width);
x2 <- xleg+((width_of_one/2)*legend_width);
x3 <- xleg+((width_of_one/2)*legend_width);
y1 <- max(spindled_midpts)+y_hts;
y2 <- max(spindled_midpts)-y_hts;
y3 <- (y1+y2)/2;
} else {
y1 <- xleg-((width_of_one/2)*legend_width)
y2 <- xleg+((width_of_one/2)*legend_width)
y3 <- xleg+((width_of_one/2)*legend_width)
x1 <- max(spindled_midpts)+y_hts
x2 <- max(spindled_midpts)-y_hts
x3 <- (x1+x2)/2
}
rect(x1,y1,x2,y2,col="gray50",lwd=0.5)
legend_text <- paste(":",legend_width,legend_case,sep=" ")
text(x3,y3,legend_text,cex=0.75,pos=4)
}
}
single_spindle <- function(axe=2,midpoint,spindle,max_width,min_axe,max_axe,spindle_color="white",spindle_lwdth=4/3,spindle_linecol="black") {
# axe: 1 for x-axis (going left-right), 2 for y-axis (going up-down)
# midpoint: midpoint on the other axis
# spindle: width of spindel at each point along axis (= height of
# histogram for the same data)
# max_width: maximum width on the other axis
# spindle_color: color of spindel bars
# spindle_lwdth: width of lines around spindel
# spindel_linecol: color of lines
spinds <- length(spindle)
axe_incr <- seq(min_axe,max_axe,by=(max_axe-min_axe)/spinds)
scaled_spindle <- max_width*spindle
for (sp in 1:spinds) {
if (scaled_spindle[sp]>0 && (axe==2 || axe==4)) {
rect(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],col=spindle_color,lwd=4/3,border=spindle_color)
} else if (scaled_spindle[sp]>0) {
rect(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,col=spindle_color,lwd=4/3,border=spindle_color)
}
}
if (axe==2 || axe==4) {
for (sp in 1:spinds) {
# draw bottom if there is no segment beneath it
if (scaled_spindle[sp]>0) {
if (sp==1 || (sp>1 && scaled_spindle[sp-1]==0))
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp],lwd=spindle_lwdth,col=spindle_linecol)
# draw sides
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
# draw top if necessary
if (sp==spinds || (sp<spinds && scaled_spindle[sp+1]==0)) {
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]>scaled_spindle[sp+1]) {
# dev <- (scaled_spindle[sp]-scaled_spindle[sp+1])/2
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]<scaled_spindle[sp+1]) {
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
}
}
}
# top <- 1+max(seq(1:spinds)*(scaled_spindle>0))
# segments(midpoint-scaled_spindle[top]/2,min_axe,midpoint+scaled_spindle[1]/2,min_axe,lwd=spindle_lwdth,col=spindle_linecol)
} else {
for (sp in 1:spinds) {
# draw bottom if there is no segment beneath it
if (scaled_spindle[sp]>0) {
if (sp==1 || (sp>1 && scaled_spindle[sp-1]==0))
segments(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
# draw sides
segments(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
# draw top if necessary
if (sp==spinds || (sp<spinds && scaled_spindle[sp+1]==0)) {
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]>scaled_spindle[sp+1]) {
# dev <- (scaled_spindle[sp]-scaled_spindle[sp+1])/2
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]<scaled_spindle[sp+1]) {
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
}
}
}
# top <- 1+max(seq(1:spinds)*(scaled_spindle>0))
# segments(midpoint-scaled_spindle[top]/2,min_axe,midpoint+scaled_spindle[1]/2,min_axe,lwd=spindle_lwdth,col=spindle_linecol)
}
}
single_spindle_set_increments <- function(axe=2,midpoint,axe_incr,spindle,max_width,spindle_color="white",spindle_lwdth=4/3,spindle_linecol="black") {
# axe: 1 for x-axis (going left-right), 2 for y-axis (going up-down)
# midpoint: midpoint on the other axis
# spindle: width of spindel at each point along axis (= height of
# histogram for the same data)
# max_width: maximum width on the other axis
# spindle_color: color of spindel bars
# spindle_lwdth: width of lines around spindel
# spindel_linecol: color of lines
spindle <- spindle/max(spindle)
spinds <- length(spindle)
scaled_spindle <- max_width*spindle
for (sp in 1:spinds) {
if (scaled_spindle[sp]>0 && (axe==2 || axe==4)) {
rect(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],col=spindle_color,lwd=4/3,border=spindle_color)
} else if (scaled_spindle[sp]>0) {
rect(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,col=spindle_color,lwd=4/3,border=spindle_color)
}
}
if (axe==2 || axe==4) {
for (sp in 1:spinds) {
# draw bottom if there is no segment beneath it
if (scaled_spindle[sp]>0) {
if (sp==1 || (sp>1 && scaled_spindle[sp-1]==0))
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp],lwd=spindle_lwdth,col=spindle_linecol)
# draw sides
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
# draw top if necessary
if (sp==spinds || (sp<spinds && scaled_spindle[sp+1]==0)) {
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]>scaled_spindle[sp+1]) {
# dev <- (scaled_spindle[sp]-scaled_spindle[sp+1])/2
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]<scaled_spindle[sp+1]) {
segments(midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
}
}
}
# top <- 1+max(seq(1:spinds)*(scaled_spindle>0))
# segments(midpoint-scaled_spindle[top]/2,min_axe,midpoint+scaled_spindle[1]/2,min_axe,lwd=spindle_lwdth,col=spindle_linecol)
} else {
for (sp in 1:spinds) {
# draw bottom if there is no segment beneath it
if (scaled_spindle[sp]>0) {
if (sp==1 || (sp>1 && scaled_spindle[sp-1]==0))
segments(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
# draw sides
segments(axe_incr[sp],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(midpoint+scaled_spindle[sp]/2,axe_incr[sp],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],lwd=spindle_lwdth,col=spindle_linecol)
# draw top if necessary
if (sp==spinds || (sp<spinds && scaled_spindle[sp+1]==0)) {
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]>scaled_spindle[sp+1]) {
# dev <- (scaled_spindle[sp]-scaled_spindle[sp+1])/2
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
} else if (scaled_spindle[sp]<scaled_spindle[sp+1]) {
segments(axe_incr[sp+1],midpoint-scaled_spindle[sp]/2,axe_incr[sp+1],midpoint-scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
segments(axe_incr[sp+1],midpoint+scaled_spindle[sp]/2,axe_incr[sp+1],midpoint+scaled_spindle[sp+1]/2,lwd=spindle_lwdth,col=spindle_linecol)
}
}
}
# top <- 1+max(seq(1:spinds)*(scaled_spindle>0))
# segments(midpoint-scaled_spindle[top]/2,min_axe,midpoint+scaled_spindle[1]/2,min_axe,lwd=spindle_lwdth,col=spindle_linecol)
}
}
#### Stylized Axes ####
log_axes <- function(axe,min_ax,max_ax,increment,numbers,linewd=4/3,orient) {
axis(axe,at=seq(min_ax,max_ax,by=(max_ax-min_ax)),tcl=0,labels=NA,lwd.ticks=NA,lwd=linewd)
ticks <- length(numbers)
#if ((numbers[2]/numbers[1])>=increment) {
for (i in 1:ticks) {
l <- numbers[i]
axis(2,at=log(l),tcl=-.3,labels=numbers[i],lwd=0,lwd.ticks=linewd,las=orient)
}
for (i in 1:(ticks-1)) {
l <- numbers[i]+increment
axis(2,at=log(l),tcl=-.15,labels=NA,lwd=0,lwd.ticks=linewd)
}
}
#log10_axes(axe=2,min_ax=mny,max_ax=mxy,numbers=10^(mny:mxy),linewd=4/3,orient=2)
# produces y-axis with 10^-7 to 1.0 with increments from 2:9 & labels on 10^x for x=integer
log10_axes <- function(axe,min_ax,max_ax,numbers,linewd=4/3,font_size=1,orient=1) {
#log10_axes(axe=2,min_ax=mny,max_ax=mxy,numbers,linewd=1.5,orient=2)
# axe: 1 for x; 2 for y
# min_ax: minimum, already log10 transformed
# max_ax: maximum, already log10 transformed
# numbers: array of numbers
# linewd: line width
# orient: orientation of text, with 2 making y-axis the way I like it
axis(axe,at=seq(min_ax,max_ax,by=(max_ax-min_ax)),tcl=0,labels=NA,lwd.ticks=NA, lwd=linewd, las=orient);
mnn <- ceiling(min_ax);
mxn <- floor(max_ax)
for (i in mnn:mxn) {
# l <- numbers[i]
axis(axe,at=i,tcl=-.3,labels=FALSE,lwd=0,lwd.ticks=linewd)
}
for (i in 1:length(numbers)) {
l <- numbers[i]
axis(axe,at=log10(l),tcl=-.3,labels=numbers[i],lwd=0,lwd.ticks=0,las=orient,cex.axis=font_size)
}
if (ceiling(min_ax)>min_ax) {
strt <- match(min_ax,ceiling(min_ax)-(1-log10(1:9)))
if (is.na(strt)) {
strt <- 1+sum((ceiling(min_ax)-(1-log10(1:9)))<min_ax)
}
ticks <- (strt:10)*10^floor(min_ax)
} else {
ticks <- c()
}
if (ceiling(min_ax)<floor(max_ax)) {
for (i in ceiling(min_ax):(floor(max_ax)-1)) {
ticks <- c(ticks,(1:10)*10^i)
}
}
if (max_ax>floor(max_ax)) {
add <- max(ticks)
while (log10(max(ticks)+add)<=max_ax) ticks <- c(ticks,max(ticks)+add)
# end <- match(round(max_ax,4),round(floor(max_ax)+log10(1:9),4))
# ticks <- c(ticks,(1:end)*10^floor(max_ax))
}
ticks <- ticks[!ticks %in% 10^(mnn:mxn)]
#axis(axe,at=log10(0.2),tcl=-.15,labels=FALSE,lwd=0,lwd.ticks=linewd)
#axis(axe,at=log10(0.4),tcl=-.15,labels=FALSE,lwd=0,lwd.ticks=linewd)
axis(axe,at=log10(ticks),tcl=-.15,labels=FALSE,lwd=0,lwd.ticks=linewd)
}
log2_axes <- function(axe,min_ax,max_ax,numbers,linewd=4/3,orient) {
#log2_axes(axe=2,min_ax=mny,max_ax=mxy,numbers,linewd=1.5,orient=2)
# axe: 1 for x; 2 for y
# min_ax: minimum, already log2 transformed
# max_ax: maximum, already log2 transformed
# numbers: array of numbers
# linewd: line width
# orient: orientation of text, with 2 making y-axis the way I like it
axis(axe,at=seq(min_ax,max_ax,by=(max_ax-min_ax)),tcl=0,labels=NA,lwd.ticks=NA, lwd=linewd, las=orient)
ticks <- length(numbers)
for (i in 1:ticks) {
if (numbers[1]==0) {
l <- max(1,2*numbers[i])
} else l <- numbers[i]
axis(axe,at=log2(l),tcl=-.3,labels=numbers[i],lwd=0,lwd.ticks=linewd,las=orient)
# i <- i+1
}
}
set_axis_breaks <- function(max_no,min_no) {
if ((max_no-min_no)<=10) {
maj_break <- 1;
med_break <- 0.5;
min_break <- 0.5;
} else if ((max_no-min_no)<=20) {
maj_break <- 2;
med_break <- 1;
min_break <- 0.5;
} else if ((max_no-min_no)<=50) {
maj_break <- 5;
med_break <- 1;
min_break <- 1;
} else if ((max_no-min_no)<=100) {
maj_break <- 10;
med_break <- 5;
min_break <- 1;
} else if ((max_no-min_no)<=200) {
maj_break <- 20;
med_break <- 10;
min_break <- 5;
} else if ((max_no-min_no)<=500) {
maj_break <- 50;
med_break <- 10;
min_break <- 5;
} else if ((max_no-min_no)<=1000) {
maj_break <- 100;
med_break <- 50;
min_break <- 10;
} else if ((max_no-min_no)<=2000) {
maj_break <- 200;
med_break <- 100;
min_break <- 50;
} else if ((max_no-min_no)<=5000) {
maj_break <- 500;
med_break <- 100;
min_break <- 50;
} else {
maj_break <- 1000;
med_break <- 500;
min_break <- 100;
}
tick_tock <- data.frame(maj_break=as.numeric(maj_break),med_break=as.numeric(med_break),min_break=as.numeric(min_break),stringsAsFactors = F);
return(tick_tock);
}
set_axis_breaks_new <- function(max_no,min_no=0) {
fact10 <- floor(log10(max_no));
max_no <- max_no/(10^fact10)
if ((max_no-min_no)<=1) {
maj_break <- 0.10;
med_break <- 0.05;
min_break <- 0.01;
} else if ((max_no-min_no)<=2) {
maj_break <- 0.20;
med_break <- 0.10;
min_break <- 0.05;
} else if ((max_no-min_no)<5) {
maj_break <- 0.50;
med_break <- 0.10;
min_break <- 0.05;
} else if ((max_no-min_no)<=10) {
maj_break <- 1.0;
med_break <- 0.5;
min_break <- 0.1;
}
tick_tock <- (10^fact10)*data.frame(maj_break=as.numeric(maj_break),med_break=as.numeric(med_break),min_break=as.numeric(min_break),stringsAsFactors = F);
return(tick_tock);
}
wagner_set_axes <- function (ax_min,ax_max,y_add=0) {
if ((ax_max-ax_min)<=10) {
min_ax <- floor(ax_min)
max_ax <- ceiling(ax_max)
lbl_prn <- tcs <- c()
tcs <- c(1,0.5,0.1)
tcs <- rbind(tcs,seq(min_ax,max_ax,by=1))
lbl_prn <- rbind(lbl_prn,-1*(min_ax:max_ax))
} else if ((ax_max-ax_min)<=25) {
min_ax <- floor(ax_min)
max_ax <- ceiling(ax_max)
tcs <- seq(2*ceiling(min_ax/2),2*floor(max_ax/2),by=2)
lbl_prn <- -1*tcs
add <- length(tcs)-length((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
if (add==0) {
tcs <- rbind(tcs,added)
lbl_prn <- rbind(lbl_prn,rep("",length(added)))
} else if (add>0) {
added <- c(added,rep(0,add))
tcs <- rbind(tcs,added)
lbl_prn <- rbind(lbl_prn,rep("",length(added)))
} else {
tcs <- rbind(c(tcs,rep(0,abs(add))),added)
lbl_prn <- rbind(c(lbl_prn,rep("",abs(add)),rep("",length(added))))
}
tick_str <- c(-0.30,-0.15)
} else if ((ax_max-ax_min)<=50) {
min_ax <- floor(ax_min);
max_ax <- ceiling(ax_max);
tcs <- seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)
lbl_prn <- -1*tcs
add <- length(tcs)-length((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
if (add==0) {
tcs <- rbind(tcs,added)
lbl_prn <- rbind(lbl_prn,rep("",length(added)))
} else if (add>0) {
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs],rep(0,abs(add)))
tcs <- rbind(tcs,added);
lbl_prn <- rbind(c(lbl_prn,rep("",add)),rep("",length(added)))
} else {
tcs <- rbind(c(tcs,rep(0,abs(add))),added)
lbl_prn <- rbind(c(lbl_prn,rep("",abs(add))),rep("",length(added)))
}
tick_str <- c(-0.30,-0.15)
# tcs <- rbind(tcs,(min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
# lbl_prn <- c(TRUE,FALSE)
} else if ((ax_max-ax_min)<=200) {
min_ax <- floor(ax_min);
max_ax <- ceiling(ax_max);
tcs <- seq(10*ceiling(min_ax/10),10*floor(max_ax/10),by=10);
tcs <- rbind(tcs,seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)[!seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5) %in% seq(10*ceiling(min_ax/10),10*floor(max_ax/10),by=10)])
tcs <- rbind(tcs,(min_ax:max_ax)[!(min_ax:max_ax) %in% seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)])
tick_str <- c(-0.30,-0.20,-0.10)
lbl_prn <- c(TRUE,FALSE,FALSE)
} else {
}
output <- list(tcs,lbl_prn,tick_str)
names(output) <- c("Ticks","Labels","Tick_Strength")
return(output)
}
wagner_set_axes_old <- function (ax_min,ax_max,y_add=0) {
if ((ax_max-ax_min)<=10) {
min_ax <- floor(ax_min)
max_ax <- ceiling(ax_max)
lbl_prn <- tcs <- c()
tcs <- rbind(tcs,seq(min_ax,max_ax,by=1))
lbl_prn <- rbind(lbl_prn,-1*(min_ax:max_ax))
} else if ((ax_max-ax_min)<=25) {
min_ax <- floor(ax_min)
max_ax <- ceiling(ax_max)
tcs <- seq(2*ceiling(min_ax/2),2*floor(max_ax/2),by=2)
lbl_prn <- -1*tcs
add <- length(tcs)-length((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
if (add==0) {
tcs <- rbind(tcs,added)
lbl_prn <- rbin(lbl_prn,rep("",length(added)))
} else if (add>0) {
added <- c(added,rep(0,add))
tcs <- rbind(tcs,added)
lbl_prn <- rbind(lbl_prn,rep("",length(added)))
} else {
tcs <- rbind(c(tcs,rep(0,abs(add))),added)
lbl_prn <- rbind(c(lbl_prn,rep("",abs(add)),rep("",length(added))))
}
tick_str <- c(-0.30,-0.15)
} else if ((ax_max-ax_min)<=50) {
min_ax <- floor(ax_min)
max_ax <- ceiling(max(divergences))
tcs <- seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)
lbl_prn <- -1*tcs
add <- length(tcs)-length((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
if (add==0) {
tcs <- rbind(tcs,added)
lbl_prn <- rbin(lbl_prn,rep("",length(added)))
} else if (add>0) {
added <- c((min_ax:max_ax)[!(min_ax:max_ax) %in% tcs],rep(0,abs(add)))
tcs <- rbind(tcs,added)
lbl_prn <- rbind(c(lbl_prn,rep("",add)),rep("",length(added)))
} else {
tcs <- rbind(c(tcs,rep(0,abs(add))),added)
lbl_prn <- rbind(c(lbl_prn,rep("",abs(add))),rep("",length(added)))
}
tick_str <- c(-0.30,-0.15)
# tcs <- rbind(tcs,(min_ax:max_ax)[!(min_ax:max_ax) %in% tcs])
# lbl_prn <- c(TRUE,FALSE)
} else if ((ax_max-ax_min)<=200) {
min_ax <- floor(ax_min)
max_ax <- ceiling(max(divergences))
tcs <- seq(10*ceiling(min_ax/10),10*floor(max_ax/10),by=10);
tcs <- rbind(tcs,seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)[!seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5) %in% seq(10*ceiling(min_ax/10),10*floor(max_ax/10),by=10)])
tcs <- rbind(tcs,(min_ax:max_ax)[!(min_ax:max_ax) %in% seq(5*ceiling(min_ax/5),5*floor(max_ax/5),by=5)])
tick_str <- c(-0.30,-0.20,-0.10)
lbl_prn <- c(TRUE,FALSE,FALSE)
} else {
}
output <- list(tcs,lbl_prn,tick_str)
names(output) <- c("Ticks","Labels","Tick_Strength")
return(output)
}
fitted_linear_axis <- function(axe,max_val,min_val,linewd=4/3,orient=1,decimals=TRUE) {
if (max_val<=10) {
axis(axe,at=seq(min_val,max_val,by=max_val-min_val),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient)
if (decimals==TRUE) {
ticks <- old_ticks <- seq(min_val,max_val,by=1)
axis(axe,at=ticks,tcl=-0.30,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
bs <- 0.5
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.20,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
axis(axe,at=seq(min_val,max_val,by=bs),tcl=-0.0,labels=TRUE,lwd=0.0,lwd.ticks=0.0,las=orient)
bs <- 0.1
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.10,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
} else {
axis(axe,at=seq(min_val,max_val,by=1),tcl=-0.3,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
}
} else if (max_val <=20) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=2)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 1
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.15,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
} else if (max_val <=50) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=5)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 1
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.15,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
} else if (max_val <=100) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=10)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 5
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.20,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
old_ticks <- ticks
bs <- 1
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.10,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
} else if (max_val <=250) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=25)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 5
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.15,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
} else if (max_val <=500) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=50)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 25
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.20,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
bs <- 5
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.10,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
} else if (max_val <=1000) {
axis(axe,at=seq(min_val,max_val,by=(max_val-min_val)),tcl=0.0,labels=FALSE,lwd=linewd,las=orient)
ticks <- seq(min_val,max_val,by=100)
axis(axe,at=ticks,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- ticks
bs <- 50
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.20,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
bs <- 10
ticks <- seq(min_val,max_val,by=bs)[!seq(min_val,max_val,by=bs) %in% old_ticks]
axis(axe,at=ticks,tcl=-0.10,labels=FALSE,lwd=0.0,lwd.ticks=linewd)
}
}
specify_basic_plot <- function(mxx, mnx, mxy, mny, main="",subtitle="",abcissa="", ordinate="", xsize=3, ysize=3, cexaxis=1, cexlab=1, cexmain=1, cexsub=1) {
par(pin=c(xsize,ysize));
plot(NA,type='n',axes=FALSE,main=main,sub=subtitle,xlab=abcissa,ylab=ordinate,xlim=c(mnx,mxx),ylim=c(mny,mxy),cex.axis=cexaxis,cex.lab=cexlab,cex.main=cexmain,cex.sub=cexsub);
}
# routine to make axes as you want them.
# axe: axis # (1 = x; 2= y)
# max_val: maximum value
# min_val: minimum value
# maj_break: major (labelled) breaks
# med_break: intermediate breaks
# min_break: minor breaks. NOTE: if med_break or min_break=0, then just two breaks
# specified_axis(axe=2,max_val=mxy,min_val=mny,maj_break=100,med_break=50,min_break=10,linewd=4/3,orient=2) gives y-axis with 0:700 labeled,
# 2ndary ticks on 50 & tertiary ticks on 10
specified_axis <- function(axe,max_val,min_val,maj_break,med_break,min_break,linewd=4/3,font_size=1,orient=1,print_label=TRUE) {
#axis(axe,at=seq(min_val,max_val,by=max_val-min_val),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient)
if ((min_val/maj_break)-floor(min_val/maj_break)<(10^-10)) {
# this is a kluge necessitated by tiny rounding errors....
mnv1 <- min_val;
mxv1 <- max_val;
} else {
mnv1 <- (maj_break*ceiling(min_val/maj_break));
mxv1 <- med_break*ceiling(max_val/med_break);
}
strt <- min(min_val,mnv1);
endy <- max(max_val,mxv1);
axis(axe,at=seq(strt,endy,by=endy-strt),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient);
ticks <- old_ticks <- seq(mnv1,mxv1,by=maj_break);
axis(axe,at=ticks,tcl=-0.30,labels=print_label,lwd=0.0,lwd.ticks=linewd,las=orient,cex.axis=font_size);
if (med_break!=0) {
mnv2 <- (med_break*ceiling(min_val/med_break));
ticks <- seq(mnv2,mxv1,by=med_break)[!seq(mnv2,max_val,by=med_break) %in% old_ticks]
if (min_break!=0) {
tck_sz <- -0.20
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- sort(c(ticks,old_ticks))
}
if (min_break!=0) {
ticks <- seq(min(min_val,mnv1),max(mxv1,max_val),by=min_break);
ticks <- ticks[!ticks %in% old_ticks];
if (med_break!=0) {
tck_sz <- -0.10
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
}
}
specified_axis_w_labels <- function(axe,max_val,min_val,maj_break,med_break,min_break,axis_labels,linewd=4/3,label_pos="tick",font_size=1,orient=1,print_label=T) {
#axis(axe,at=seq(min_val,max_val,by=max_val-min_val),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient)
if ((min_val/maj_break)-floor(min_val/maj_break)<(10^-10)) {
# this is a kluge necessitated by tiny rounding errors....
mnv1 <- min_val;
mxv1 <- max_val;
} else {
mnv1 <- (maj_break*ceiling(min_val/maj_break));
mxv1 <- med_break*ceiling(max_val/med_break);
}
strt <- min(min_val,mnv1);
endy <- max(max_val,mxv1);
if (label_pos=="mid" && is.numeric(axis_labels[1])) {
label_span <- abs(axis_labels[2]-axis_labels[1]);
mxv1 <- endy <- min(endy,max(axis_labels)+(label_span-1));
}
axis(axe,at=seq(strt,endy,by=endy-strt),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient);
ticks <- old_ticks <- seq(mnv1,mxv1,by=maj_break);
axis(axe,at=ticks,tcl=-0.30,labels=F,lwd=0.0,lwd.ticks=linewd,las=orient);
axis_span <- max_val-min_val;
if (label_pos=="mid" && is.numeric(axis_labels[1])) {
labels_ticks <- axis_labels-0.5
pass_one <- (1:length(axis_labels))[(1:length(axis_labels)) %% 2==1];
pass_two <- (1:length(axis_labels))[(1:length(axis_labels)) %% 2==0];
# axis(axe,at=labels_ticks,tcl=-0.30,labels=axis_labels,lwd=0.0,lwd.ticks=0.0,las=orient,cex.axis=font_size);
axis(axe,at=labels_ticks[pass_one],tcl=-0.30,labels=axis_labels[pass_one],lwd=0.0,lwd.ticks=0.0,las=orient,cex.axis=font_size);
axis(axe,at=labels_ticks[pass_two],tcl=-0.30,labels=axis_labels[pass_two],lwd=0.0,lwd.ticks=0.0,las=orient,cex.axis=font_size);
} else {
labels_ticks <- seq(min_val,max_val,by=axis_span/(length(axis_labels)-1));
pass_one <- (1:length(axis_labels))[(1:length(axis_labels)) %% 2==1];
pass_two <- (1:length(axis_labels))[(1:length(axis_labels)) %% 2==0];
axis(axe,at=labels_ticks[pass_one],tcl=-0.30,labels=axis_labels[pass_one],lwd=0.0,lwd.ticks=linewd,las=orient,cex.axis=font_size);
axis(axe,at=labels_ticks[pass_two],tcl=-0.30,labels=axis_labels[pass_two],lwd=0.0,lwd.ticks=linewd,las=orient,cex.axis=font_size);
}
if (med_break!=0 && med_break!=maj_break) {
mnv2 <- (med_break*ceiling(min_val/med_break));
ticks <- seq(mnv2,mxv1,by=med_break)[!seq(mnv2,max_val,by=med_break) %in% old_ticks]
if (min_break!=0) {
tck_sz <- -0.20
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- sort(c(ticks,old_ticks))
}
if (min_break!=0 && !min_break %in% c(maj_break,med_break)) {
ticks <- seq(min(min_val,mnv1),max(mxv1,max_val),by=min_break);
ticks <- ticks[!ticks %in% old_ticks];
if (med_break!=0) {
tck_sz <- -0.10
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
}
}
specified_axis_w_labels_old <- function(axe,max_val,min_val,maj_break,med_break=0,min_break=0,axis_labels,axis_label_pts=NULL,axis_label_size=1,linewd=4/3,orient=1) {
axis(axe,at=seq(min_val,max_val,by=max_val-min_val),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient);
#if ((min_val/maj_break)-floor(min_val/maj_break)<(10^-10)) {
# this is a kluge necessitated by tiny rounding errors....
# mnv1 <- min_val;
# } else {
# mnv1 <- (maj_break*ceiling(min_val/maj_break));
# }
ticks <- old_ticks <- seq(min_val,max_val,by=maj_break);
axis(axe,at=ticks,tcl=-0.30,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient);
if (med_break!=0) {
mnv2 <- (med_break*ceiling(min_val/med_break))
ticks <- seq(mnv2,max_val,by=med_break)[!seq(mnv2,max_val,by=med_break) %in% old_ticks]
if (min_break!=0) {
tck_sz <- -0.20
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- sort(c(ticks,old_ticks))
}
#if (min_break!=0) {
# ticks <- seq(min_val,max_val,by=min_break)[!seq(min_val,max_val,by=min_break) %in% old_ticks]
# if (med_break!=0) {
# tck_sz <- -0.10
# } else tck_sz <- -0.15
# axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
# }
if (min_break!=0) {
ticks <- seq(min(min_val,mnv1),max_val,by=min_break);
ticks <- ticks[!ticks %in% old_ticks];
if (med_break!=0) {
tck_sz <- -0.10
} else tck_sz <- -0.15
axis(axe,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
}
#axis(axe,at=axis_labels,tcl=-0.30,labels=TRUE,lwd=0.0,lwd.ticks=0.0,las=orient)
if(is.numeric(axis_labels[1]) && is.null(axis_label_pts)) {
axis_label_pts <- axis_labels;
} else {
axis_label_pts <- seq(min_val,max_val,by=maj_break);
}
axis(axe,at=axis_label_pts,tcl=-0.30,labels=axis_labels,lwd=0.0,lwd.ticks=0.0,las=orient,cex.axis=axis_label_size);
}
specified_right_y_axis <- function(mxy1,mny1,max_val,min_val,maj_break,med_break,min_break,linewd=4/3,orient=2,print_label=TRUE) {
# mxy1: maximum value on main (left) y-axis
# mny1: minimum value on main (left) y-axis; we rescale (max_val-min_val)/(mxy1-mny1)
axis(side=4,at=seq(mny1,mxy1,by=(mxy-mny)),tcl=-0.0,labels = FALSE,lwd=linewd,las=orient)
if ((min_val/maj_break)-floor(min_val/maj_break)<(10^-10)) {
# this is a kluge necessitated by tiny rounding errors....
mnv1 <- min_val
} else {
mnv1 <- (maj_break*ceiling(min_val/maj_break))
}
labels <- seq(mnv1,max_val,by=maj_break);
ticks <- old_ticks <- mny1 + (mxy1-mny1)*labels/(max(labels)-min(labels))
#ticks <- old_ticks <- seq(mnv1,mxy1,by=(mxy1-mny1)/(max_val/maj_break))
if (print_label) {
axis(side=4,at=ticks,tcl=-0.30,labels=seq(mnv1,max_val,by=maj_break),lwd=0.0,lwd.ticks=linewd,las=orient)
} else {
axis(side=4,at=ticks,tcl=-0.30,labels=print_label,lwd=0.0,lwd.ticks=linewd,las=orient)
}
if (med_break!=0) {
ticks <- mny1 + (mxy1-mny1)*seq(min_val,max_val,by=med_break)/(max(labels)-min(labels))
mnv2 <- (med_break*ceiling(min_val/med_break))
ticks <- ticks[!ticks %in% old_ticks]
if (min_break!=0 && min_break!=med_break) {
tck_sz <- -0.20
} else tck_sz <- -0.15
axis(side=4,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
old_ticks <- sort(c(ticks,old_ticks))
}
if (min_break!=0 && min_break!=med_break) {
ticks <- mny1 + (mxy1-mny1)*seq(min_val,max_val,by=min_break)/(max(labels)-min(labels))
# ticks <- seq(mny1,mxy1,by=(mxy1-mny1)/(max_val/min_break))[!seq(mny1,mxy1,by=(mxy1-mny1)/(max_val/min_break)) %in% old_ticks]
ticks <- ticks[!ticks %in% old_ticks]
if (med_break!=0) {
tck_sz <- -0.10
} else tck_sz <- -0.15
axis(side=4,at=ticks,tcl=tck_sz,labels=FALSE,lwd=0.0,lwd.ticks=linewd,las=orient)
}
}
#slice_coll
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
#### Phylogeny drawing routines ####
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# updated 2020-05-11
center_budding_phylogeny <- function(vector_tree,durations,sampled_ancestors) {
# function to get relative positions of lineages on a phylogeny;
venn_tree <- transform_vector_tree_to_venn_tree(vector_tree);
mtree <- transform_vector_tree_to_matrix_tree(vector_tree);
node_richness <- tally_node_richness_from_vector_tree(vector_tree = vector_tree);
nNode <- nrow(mtree);
notu <- length(vector_tree)-nNode;
node_ages <- c();
if (nrow(durations)==(notu+nNode)) {
branch_ages <- durations[,1];
} else {
for (nd in 1:nNode) node_ages <- c(node_ages,min(durations[venn_tree[nd,venn_tree[nd,]>0],1]));
branch_ages <- c(durations[1:notu,1],node_ages);
}
if (length(sampled_ancestors) < (notu+nNode))
sampled_ancestors <- c(rep(0,notu),sampled_ancestors);
ttl_richness <- c(rep(1,notu),node_richness);
##patristic_distances <- accersi_patristic_distance_from_base(atree=mtree);#max_nodes <- max(patristic_distances);
last_left <- "left"; # move up the axis
last_right <- "right"; # move down the axis
accounted <- c();
nd <- 0;
phy_pos <- rep(0,nNode+notu);
#for (nd in 1:nNode) {
while (nd < nNode) {
nd <- nd+1;
htu <- nd+notu; # htu number of node;
tf1 <- sum(mtree[nd,]>0);
if (sampled_ancestors[htu]!=0) {
tf1 <- tf1-1;
phy_pos[sampled_ancestors[htu]] <- phy_pos[notu+nd];
}
f1 <- mtree[nd,!mtree[nd,] %in% c(sampled_ancestors[htu],0)];
f1 <- f1[order(-ttl_richness[f1])];
if (length(f1)>2) {
right <- left <- 0;
prop_richness <- ttl_richness[f1]/sum(ttl_richness[f1]);
f1cc <- length(f1);
left <- f1[1];
sum_prop <- prop_richness[1];
while (sum_prop <= 0.45) {
sum_prop <- sum_prop+prop_richness[f1cc];
left <- c(left,f1[f1cc]);
f1cc <- f1cc-1;
}
right <- f1[!f1 %in% left];
right <- right[order(-abs(branch_ages[right]))];
left <- left[order(-abs(branch_ages[left]))];
# shift rest of the tree away from ancestral node
# phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]]-sum(ttl_richness[right]);
# phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+sum(ttl_richness[left]);
rr <- 1;
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]]-ttl_richness[right[rr]]
phy_pos[right[rr]] <- phy_pos[htu]-ttl_richness[right[rr]];
while (rr < length(right)) {
rr <- rr+1;
if (sum(phy_pos<phy_pos[htu])>0)
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]]-(2*ttl_richness[right[rr]]);
# phy_pos[right[rr]] <- (phy_pos[right[rr-1]]-ttl_richness[right[rr-1]])-ttl_richness[right[rr]];
phy_pos[right[rr]] <- phy_pos[htu]-ttl_richness[right[rr]];
# phy_pos[c(htu,right)];
}
ll <- 1;
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+ttl_richness[left[ll]]
# phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+sum(ttl_richness[left[ll]]);
phy_pos[left[ll]] <- phy_pos[htu] + ttl_richness[left[ll]];
while (ll < length(left)) {
ll <- ll+1;
if (sum(phy_pos>phy_pos[htu])>0)
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+(2*ttl_richness[left[ll]]);
# phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+2*ttl_richness[left[ll]]);
# phy_pos[left[ll]] <- (phy_pos[left[ll-1]]+ttl_richness[left[ll-1]])+ttl_richness[left[ll]];
phy_pos[left[ll]] <- phy_pos[htu]+ttl_richness[left[ll]];
}
} else if (length(f1)==2) {
f1 <- f1[order(-ttl_richness[f1])];
if (phy_pos[htu]<phy_pos[1+notu]) {
# going left is positive, so shift everything above this up by this amount
if (last_right=="right") {
right <- f1[2];
left <- f1[1];
last_right <- "left"
} else {
right <- f1[1];
left <- f1[2];
last_right <- "right"
}
} else {
# going right is negative, so shift everything below this down by this amount
if (last_left=="left") {
right <- f1[1];
left <- f1[2];
last_left <- "right";
} else {
right <- f1[2];
left <- f1[1];
last_left <- "left";
}
}
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[left];
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[right];
phy_pos[left] <- phy_pos[htu] + ttl_richness[left];
phy_pos[right] <- phy_pos[htu] - ttl_richness[right];
} else if (length(f1)==1) {
if (phy_pos[htu]<phy_pos[1+notu]) {
if (last_right=="right") {
# going left is positive, so shift everything above this up by this amount
# phy_y[phy_y>(phy_y[htu]+ttl_richness[f1])] <- phy_y[phy_y>phy_y[htu]] + ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] + ttl_richness[f1];
last_right <- "left";
} else {
# going right is negative, so shift everything below this down by this amount
# phy_y[phy_y<(phy_y[htu]-ttl_richness[f1])] <- phy_y[phy_y>ttl_richness[f1]] - ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] - ttl_richness[f1];
last_right <- "right";
}
} else {
if (last_left=="right") {
# phy_y[phy_y>(phy_y[htu]+ttl_richness[f1])] <- phy_y[phy_y>phy_y[htu]] + ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] + ttl_richness[f1];
last_left <- "left";
} else {
# going right is negative, so shift everything below this down by this amount
# phy_y[phy_y<(phy_y[htu]-ttl_richness[f1])] <- phy_y[phy_y>ttl_richness[f1]] - ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] - ttl_richness[f1];
last_left <- "right";
}
}
}
phy_pos <- phy_pos-phy_pos[notu+1]; # recenter around the base of the tree
# print(c(nd,phy_pos));
# now do species
accounted <- c(accounted,mtree[nd,mtree[nd,]>0]);
}
final_pos <- match(phy_pos,sort(unique(phy_pos)));
needed_edits <- hist(final_pos,breaks=0:max(final_pos),plot=F)$counts
too_many_here <- (1:length(needed_edits))[needed_edits>2];
while (length(too_many_here)>0) {
ttl_minions <- 1:(notu+nNode);
tmh <- 0;
while (tmh < length(too_many_here)) {
tmh <- tmh+1;
problems <- ttl_minions[final_pos==too_many_here[tmh]]; # taxa overlapping each other
these_nodes <- c();
for (pp in 1:length(problems))
these_nodes <- c(these_nodes,which(mtree==problems[pp],arr.ind = T)[1]); # get the nodes containing problem cases;
problem_ancestors <- problems[(notu+these_nodes) %in% problems]; # separate out sampled ancestors
problem_ancestors_htu <- notu+these_nodes[match(problem_ancestors,problems)]; # keep track of the htu to which they belong, however!
these_nodes <- these_nodes[!problems %in% problem_ancestors];
problems <- problems[!problems %in% problem_ancestors]; # remove sampled ancestors for now
starting_points <- final_pos[notu+these_nodes]; # positions of ancestral nodes/taxa
adjust2 <- adjust <- starting_points-too_many_here[tmh];
adjust2[adjust<0]<- -(length(adjust[adjust<0]):1);
adjust2[adjust>0]<- 1:length(adjust[adjust>0]);
final_pos[final_pos<too_many_here[tmh]] <- final_pos[final_pos<too_many_here[tmh]]+min(adjust2);
final_pos[final_pos>too_many_here[tmh]] <- final_pos[final_pos>too_many_here[tmh]]+max(adjust2);
phy_pos[phy_pos<phy_pos[problems[1]]] <- phy_pos[phy_pos<phy_pos[problems[1]]]+min(adjust2);
phy_pos[phy_pos>phy_pos[problems[1]]] <- phy_pos[phy_pos>phy_pos[problems[1]]]+max(adjust2);
final_pos[problems] <- final_pos[problems]+adjust;
phy_pos[problems] <- phy_pos[problems]+adjust2;
final_pos[problem_ancestors] <- final_pos[problem_ancestors_htu];
phy_pos[problem_ancestors] <- phy_pos[problem_ancestors_htu];
too_many_here <- too_many_here+max(adjust2);
}
final_pos <- match(phy_pos,sort(unique(phy_pos)));
needed_edits <- hist(final_pos,breaks=0:max(final_pos),plot=F)$counts
too_many_here <- (1:length(needed_edits))[needed_edits>2]
}
return(final_pos);
}
center_budding_phylogeny_effed <- function(vector_tree,durations) {
venn_tree <- transform_vector_tree_to_venn_tree(vector_tree);
mtree <- transform_vector_tree_to_matrix_tree(vector_tree);
node_richness <- tally_node_richness_from_vector_tree(vector_tree = vector_tree);
node_ages <- c();
nNodes <- nrow(venn_tree)
for (nd in 1:nNodes) node_ages <- c(node_ages,min(durations[venn_tree[nd,venn_tree[nd,]>0],1]));
branch_ages <- c(durations[,1],node_ages)
nNode <- nrow(mtree);
notu <- length(vector_tree)-nNode;
ttl_richness <- c(rep(1,notu),node_richness);
patristic_distances <- accersi_patristic_distance_from_base(atree=mtree);
max_nodes <- max(patristic_distances);
last_left <- "left"; # move up the axis
last_right <- "right"; # move down the axis
accounted <- c();
nd <- 0;
phy_pos <- rep(0,nNode+notu);
for (nd in 1:nNodes) {
# nd <- nd+1;
htu <- nd+notu; # htu number of node;
tf1 <- sum(mtree[nd,]>0);
if (sampled_ancestors[htu]!=0) {
tf1 <- tf1-1;
phy_pos[sampled_ancestors[htu]] <- phy_pos[notu+nd];
}
f1 <- mtree[nd,!mtree[nd,] %in% c(sampled_ancestors[htu],0)];
f1 <- f1[order(-ttl_richness[f1])];
if (length(f1)>2) {
right <- left <- 0;
prop_richness <- ttl_richness[f1]/sum(ttl_richness[f1]);
f1cc <- 1;
left <- f1[1];
sum_prop <- prop_richness[1];
while (sum_prop < 0.5) {
f1cc <- 1+f1cc;
sum_prop <- sum_prop+prop_richness[f1cc];
left <- c(left,f1);
}
right <- f1[!f1 %in% left];
right <- right[order(-branch_ages[right])];
left <- left[order(-branch_ages[left])];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]]-sum(ttl_richness[right]);
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]]+sum(ttl_richness[left]);
rr <- 1;
phy_pos[right[rr]] <- phy_pos[htu]-ttl_richness[right[rr]];
while (rr < length(right)) {
rr <- rr+1;
phy_pos[right[rr]] <- (phy_pos[right[rr-1]]-ttl_richness[right[rr-1]])-ttl_richness[right[rr]];
}
ll <- 1;
phy_pos[left[ll]] <- phy_pos[htu] + ttl_richness[left[ll]];
while (ll < length(left)) {
ll <- ll+1;
phy_pos[left[ll]] <- (phy_pos[left[ll-1]]+ttl_richness[left[ll-1]])+ttl_richness[left[ll]];
}
} else if (length(f1)==2) {
f1 <- f1[order(-ttl_richness[f1])];
if (phy_pos[htu]<phy_pos[1+notu]) {
# going left is positive, so shift everything above this up by this amount
if (last_right=="right") {
right <- f1[2];
left <- f1[1];
last_right <- "left"
} else {
right <- f1[1];
left <- f1[2];
last_right <- "right"
}
} else {
# going right is negative, so shift everything below this down by this amount
if (last_left=="left") {
right <- f1[1];
left <- f1[2];
last_left <- "right";
} else {
right <- f1[2];
left <- f1[1];
last_left <- "left";
}
}
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[left];
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[right];
phy_pos[left] <- phy_pos[htu] + ttl_richness[left];
phy_pos[right] <- phy_pos[htu] - ttl_richness[right];
} else if (length(f1)==1) {
if (phy_pos[htu]<phy_pos[1+notu]) {
if (last_right=="right") {
# going left is positive, so shift everything above this up by this amount
# phy_y[phy_y>(phy_y[htu]+ttl_richness[f1])] <- phy_y[phy_y>phy_y[htu]] + ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] + ttl_richness[f1];
last_right <- "left";
} else {
# going right is negative, so shift everything below this down by this amount
# phy_y[phy_y<(phy_y[htu]-ttl_richness[f1])] <- phy_y[phy_y>ttl_richness[f1]] - ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] - ttl_richness[f1];
last_right <- "right";
}
} else {
if (last_left=="right") {
# phy_y[phy_y>(phy_y[htu]+ttl_richness[f1])] <- phy_y[phy_y>phy_y[htu]] + ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos>phy_pos[htu]] <- phy_pos[phy_pos>phy_pos[htu]] + ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] + ttl_richness[f1];
last_left <- "left";
} else {
# going right is negative, so shift everything below this down by this amount
# phy_y[phy_y<(phy_y[htu]-ttl_richness[f1])] <- phy_y[phy_y>ttl_richness[f1]] - ttl_richness[f1];
# shift rest of the tree away from ancestral node
phy_pos[phy_pos<phy_pos[htu]] <- phy_pos[phy_pos<phy_pos[htu]] - ttl_richness[f1];
phy_pos[f1] <- phy_pos[htu] - ttl_richness[f1];
last_left <- "right";
}
}
}
phy_pos <- phy_pos-phy_pos[notu+1]; # recenter around the base of the tree
# print(c(nd,phy_pos));
# now do species
accounted <- c(accounted,mtree[nd,mtree[nd,]>0]);
}
final_pos <- match(phy_pos,sort(unique(phy_pos)));
needed_edits <- hist(final_pos,breaks=0:max(final_pos),plot=F)$counts
too_many_here <- (1:length(needed_edits))[needed_edits>2];
while (length(too_many_here)>0) {
ttl_minions <- 1:(notu+nNode);
tmh <- 0;
while (tmh < length(too_many_here)) {
tmh <- tmh+1;
problems <- ttl_minions[final_pos==too_many_here[tmh]]; # taxa overlapping each other
these_nodes <- c();
for (pp in 1:length(problems))
these_nodes <- c(these_nodes,which(mtree==problems[pp],arr.ind = T)[1]); # get the nodes containing problem cases;
problem_ancestors <- problems[(notu+these_nodes) %in% problems]; # separate out sampled ancestors
problem_ancestors_htu <- notu+these_nodes[match(problem_ancestors,problems)]; # keep track of the htu to which they belong, however!
these_nodes <- these_nodes[!problems %in% problem_ancestors];
problems <- problems[!problems %in% problem_ancestors]; # remove sampled ancestors for now
starting_points <- final_pos[notu+these_nodes]; # positions of ancestral nodes/taxa
adjust2 <- adjust <- starting_points-too_many_here[tmh];
adjust2[adjust<0]<- -(length(adjust[adjust<0]):1);
adjust2[adjust>0]<- 1:length(adjust[adjust>0]);
final_pos[final_pos<too_many_here[tmh]] <- final_pos[final_pos<too_many_here[tmh]]+min(adjust2);
final_pos[final_pos>too_many_here[tmh]] <- final_pos[final_pos>too_many_here[tmh]]+max(adjust2);
phy_pos[phy_pos<phy_pos[problems[1]]] <- phy_pos[phy_pos<phy_pos[problems[1]]]+min(adjust2);
phy_pos[phy_pos>phy_pos[problems[1]]] <- phy_pos[phy_pos>phy_pos[problems[1]]]+max(adjust2);
final_pos[problems] <- final_pos[problems]+adjust;
phy_pos[problems] <- phy_pos[problems]+adjust2;
final_pos[problem_ancestors] <- final_pos[problem_ancestors_htu];
phy_pos[problem_ancestors] <- phy_pos[problem_ancestors_htu];
too_many_here <- too_many_here+max(adjust2);
}
final_pos <- match(phy_pos,sort(unique(phy_pos)));
needed_edits <- hist(final_pos,breaks=0:max(final_pos),plot=F)$counts
too_many_here <- (1:length(needed_edits))[needed_edits>2]
}
return(final_pos);
}
# otu_cols <- rep("blue",notu)
# vtree <- vector tree in which each number gives the node from which a species evolved
# strat_ranges <- first and last appearance times of taxa
# durations <- originations and extinctions of taxa
# apos <- rep(1,length(divergence_times_1))
draw_calibrated_phylogeny_vertical <- function(vtree,strat_ranges,durations,apos,oldest=NULL,youngest=NULL,taxon_labels="",otu_cols,lazarus_col="gray50",branching_col="black") {
# draws phylogeny onto an already configured plot
notu <- nrow(strat_ranges)
svtree <- cbind(rank(vtree),vtree)
otu_order <- order(vtree[1:notu])
sampled_ancestors <- accersi_poss_ancestors_for_nodes(vtree,FA=strat_ranges[,1],apos)
if (is.null(oldest)) {
oldest <- min(durations)
}
phy_x <- vector(length=length(vtree))
if ((taxon_labels[1]!="numbers" && taxon_labels[1]!="Numbers") && taxon_labels!="") {
y_adj <- (mxy-mny)/50
x_adj <- -(notu+1)/37.5
} else {
y_adj <- x_adj <- 0;
}
for (n in 1:notu) {
phy_x[n] <- match(n,otu_order)
if (!is.na(match(n,sampled_ancestors))) {
segments(phy_x[n],durations[n,2],phy_x[n],strat_ranges[n,1],col=branching_col,lwd=4)
segments(phy_x[n],durations[n,2],phy_x[n],strat_ranges[n,1],col=otu_cols[n],lwd=2)
}
segments(phy_x[n],durations[n,1],phy_x[n],durations[n,2],col=lazarus_col,lwd=4)
if (strat_ranges[n,1]!=strat_ranges[n,2]) {
rect((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),min(youngest,strat_ranges[n,2]),col=otu_cols[n])
} else {
segments((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),strat_ranges[n,1],lwd=4)
segments((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),strat_ranges[n,1],lwd=2,col=otu_cols[n])
}
if (taxon_labels[1]=="numbers" || taxon_labels[1]=="Numbers") {
text(phy_x[n],strat_ranges[n,2],n,pos=3)
} else if (!is.na(taxon_labels[1])) {
text(phy_x[n]+x_adj,strat_ranges[n,2]+y_adj,taxon_labels[n],srt=90,pos=4)
}
}
Nnode <- max(vtree) - notu
mtree <- transform_vector_tree_to_matrix_tree(vtree)
for (nn in Nnode:1) {
n <- notu+nn
if (sampled_ancestors[n]==0) {
phy_x[n] <- mean(phy_x[mtree[nn,]])
segments(min(phy_x[mtree[nn,]]),durations[n,2],max(phy_x[mtree[nn,]]),durations[n,2],lwd=1)
segments(phy_x[n],durations[n,1],phy_x[n],durations[n,2],col=lazarus_col,lwd=4)
} else {
phy_x[n] <- phy_x[sampled_ancestors[n]]
f1 <- mtree[nn,mtree[nn,]!=sampled_ancestors[nn]]
for (f in 1:length(f1)) {
segments(phy_x[f1[f]],durations[f1[f],1],phy_x[sampled_ancestors[n]],durations[f1[f],1],lwd=1)
}
segments(phy_x[n],durations[n,2],phy_x[n],durations[n,1],lwd=4,col=lazarus_col)
}
# nn <- nn-1
}
}
#draw_calibrated_phylogeny_horizontal <- function(vtree,finds,durations,apomorphies,oldest=NA,youngest=NA,taxon_labels=NA,otu_cols,lazarus_col="gray50",branching_col="black",plot_stratigraphy="ranges",new_plot=F,xsize=4,ysize=6) {
draw_calibrated_phylogeny_horizontal <- function(vtree,finds,durations,apomorphies,taxon_labels=NA,otu_cols,lazarus_col="gray50",branching_col="black",plot_stratigraphy="ranges",new_plot=F,xsize=4,ysize=6) {
# draws phylogeny onto an already configured plot
notu <- match(-1,vtree)-1;
durations <- -abs(durations);
finds <- -abs(finds);
if (plot_stratigraphy=="ranges") {
strat_ranges <- data.frame(FAD=as.numeric(rep(0,notu)),LAD=as.numeric(rep(0,notu)),stringsAsFactors = F);
for (n in 1:notu) {
if (sum(finds[n,]!=0)>0) {
strat_ranges$FAD[n] <- min(finds[n,]);
strat_ranges$LAD[n] <- max(finds[n,finds[n,]!=0]);
}
}
}
if (new_plot) {
mxx <- -abs(0.5*ceiling(max(durations)/0.5));
mnx <- -abs(0.5*ceiling(min(durations)/0.5));
par(pin=c(ysize,xsize));
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="",xlim=c(mnx,mxx),ylim=c(1,notu))
}
phy_y <- center_budding_phylogeny(vtree,durations,sampled_ancestors)
#for (n in 1:notu) {
# phy_y[n] <- match(n,atu_order);
# if (!is.na(match(n,sampled_ancestors))) {
# segments(durations[n,2],phy_y[n],strat_ranges[n,1],phy_y[n],col=branching_col,lwd=3)
# segments(durations[n,2],phy_y[n],strat_ranges[n,1],phy_y[n],col=otu_cols[n],lwd=1.5)
# }
adj_y <- (mxx-mnx)*0.005;
nNode <- max(vtree) - notu;
for (nn in nNode:1) {
n <- notu+nn;
f1 <- mtree[nn,!mtree[nn,] %in% c(0,sampled_ancestors[n])];
for (f in 1:length(f1))
segments(durations[f1[f],1]-adj_y,phy_y[f1[f]],durations[f1[f],1]-adj_y,phy_y[n],lwd=1);
nn <- nn-1;
}
n <- 0;
while (n < notu) {
n <- n+1;
segments(durations[n,1],phy_y[n],durations[n,2],phy_y[n],col=lazarus_col,lwd=3);
if (plot_stratigraphy=="ranges") {
if (strat_ranges[n,1]!=strat_ranges[n,2]) {
rect(strat_ranges[n,1],(phy_y[n]-0.25),min(max_no,strat_ranges[n,2]),(phy_y[n]+0.25),col=otu_cols[n])
} else if (strat_ranges[n,1]!=0) {
# segments(strat_ranges[n,1],(phy_y[n]-0.25),strat_ranges[n,1],(phy_y[n]+0.25),lwd=4)
# segments(strat_ranges[n,1],(phy_y[n]-0.25),strat_ranges[n,1],(phy_y[n]+0.25),lwd=2,col=otu_cols[n])
points(strat_ranges[n,1],phy_y[n],pch=22,cex=1,bg=otu_cols[n]);
}
} else if (plot_stratigraphy=="points") {
these_finds <- finds[n,finds[n,]!=0];
tf <- 0;
while (tf < length(these_finds)) {
tf <- 1+tf;
points(these_finds[tf],phy_y[n],pch=21,cex=1,bg=otu_cols[n])
}
}
if (!is.na(taxon_labels)) {
if (taxon_labels[1]=="numbers" || taxon_labels[1]=="Numbers") {
text(phy_y[n],durations[n,2],n,pos=3)
} else if (!is.na(taxon_labels[1])) {
text(phy_y[n]+x_adj,durations[n,2]+y_adj,taxon_labels[n],srt=90,pos=4)
}
}
}
return(atu_order);
}
draw_calibrated_phylogeny_flex <- function(vtree,finds,durations,apos,orientation="vertical",oldest=NA,youngest=NA,taxon_labels=NA,otu_cols,lazarus_col="gray50",branching_col="black",plot_stratigraphy="ranges",new_plot=F,xsize=4,ysize=6) {
# draws phylogeny onto an already configured plot
notu <- match(-1,vtree)-1;
durations <- -abs(durations);
finds <- -abs(finds);
if (plot_stratigraphy=="ranges") {
strat_ranges <- data.frame(FAD=as.numeric(rep(0,notu)),LAD=as.numeric(rep(0,notu)),stringsAsFactors = F);
for (n in 1:notu) {
if (sum(finds[n,]!=0)>0) {
strat_ranges$FAD[n] <- min(finds[n,]);
strat_ranges$LAD[n] <- max(finds[n,finds[n,]!=0]);
}
}
}
phy_z <- center_budding_phylogeny(vtree,durations,sampled_ancestors);
if (new_plot) {
mxz <- -abs(0.5*ceiling(max(durations)/0.5));
mnz <- -abs(0.5*ceiling(min(durations)/0.5));
if (orientation=="vertical") {
par(pin=c(min(ysize,xsize),max(ysize,xsize)));
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="",ylim=c(mnz,mxz),xlim=c(1,max(phy_z)));
plot(NA,type='n',axes=T,main="",xlab="",ylab="",ylim=c(mnz,mxz),xlim=c(1,max(phy_z)));
} else {
par(pin=c(max(ysize,xsize),min(ysize,xsize)));
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="",xlim=c(mnz,mxz),ylim=c(1,max(phy_z)));
plot(NA,type='n',axes=T,main="",xlab="",ylab="",xlim=c(mnz,mxz),ylim=c(1,max(phy_z)));
}
}
adj_z <- (mxz-mnz)*0.0025;
nNode <- max(vtree) - notu;
for (nn in nNode:1) {
n <- notu+nn;
f1 <- mtree[nn,!mtree[nn,] %in% c(0,sampled_ancestors[n])];
for (f in 1:length(f1)) {
if (orientation=="horizontal") {
segments(durations[f1[f],1]-adj_z,phy_z[f1[f]],durations[f1[f],1]-adj_z,phy_z[n],lwd=1);
} else {
segments(phy_z[f1[f]],durations[f1[f],1]-adj_z,phy_z[n],durations[f1[f],1]-adj_z,lwd=1);
}
}
nn <- nn-1;
}
n <- 0;
while (n < notu) {
n <- n+1;
if (orientation=="horizontal") {
segments(durations[n,1],phy_z[n],durations[n,2],phy_z[n],col=lazarus_col,lwd=3);
} else {
segments(phy_z[n],durations[n,1],phy_z[n],durations[n,2],col=lazarus_col,lwd=3);
}
if (plot_stratigraphy=="ranges") {
if (strat_ranges[n,1]!=strat_ranges[n,2]) {
rect(strat_ranges[n,1],(phy_z[n]-0.25),min(youngest,strat_ranges[n,2]),(phy_z[n]+0.25),col=otu_cols[n])
} else if (strat_ranges[n,1]!=0) {
# segments(strat_ranges[n,1],(phy_y[n]-0.25),strat_ranges[n,1],(phy_y[n]+0.25),lwd=4)
# segments(strat_ranges[n,1],(phy_y[n]-0.25),strat_ranges[n,1],(phy_y[n]+0.25),lwd=2,col=otu_cols[n])
points(strat_ranges[n,1],phy_z[n],pch=22,cex=1,bg=otu_cols[n]);
}
} else if (plot_stratigraphy=="points") {
these_finds <- finds[n,finds[n,]!=0];
tf <- 0;
while (tf < length(these_finds)) {
tf <- 1+tf;
points(these_finds[tf],phy_z[n],pch=21,cex=1,bg=otu_cols[n])
}
}
if (!is.na(taxon_labels)) {
if (taxon_labels[1]=="numbers" || taxon_labels[1]=="Numbers") {
text(phy_z[n],durations[n,2],n,pos=3)
} else if (!is.na(taxon_labels[1])) {
text(phy_z[n]+x_adj,durations[n,2]+y_adj,taxon_labels[n],srt=90,pos=4)
}
}
}
# if (sampled_ancestors[n]==0) {
## phy_y[n] <- mean(phy_y[mtree[nn,]])
# segments(durations[n,2],min(phy_y[mtree[nn,]]),durations[n,2],max(phy_y[mtree[nn,]]),lwd=1)
# segments(durations[n,1],phy_y[n],durations[n,2],phy_y[n],col=lazarus_col,lwd=4)
# } else {
# phy_y[n] <- phy_y[sampled_ancestors[n]];
# segments(durations[n,2],phy_y[n],durations[n,1],phy_y[n],lwd=4,col=lazarus_col)
return(atu_order);
}
# modified 2020-05-11
draw_calibrated_phylogeny <- function(vector_tree,finds,durations,phylo_axis,apomorphies,orientation="vertical",taxon_labels="",otu_cols,lazarus_col="gray50",plot_stratigraphy="no",branching_col="black",lineage_lwd=4,branching_lwd=2,new_plot=F,height=4,width=6,taxon_cex=0.5) {
# working as of 2019-07-10
# draws phylogeny onto an already configured plot or makes a new one
# ctree: vector giving the node (htu) number to which each taxon or node is attached; -1 signifies the base of the tree.
# plot_stratigraphy: default is "n"; if "range" or "ranges", it plots those
notu <- match(-1,vector_tree)-1;
durations <- -abs(durations);
## add routine to add nodal ranges if they are not present
finds <- -abs(finds);
if (plot_stratigraphy=="ranges" || plot_stratigraphy=="range") {
strat_ranges <- data.frame(FAD=as.numeric(rep(0,notu)),LAD=as.numeric(rep(0,notu)),stringsAsFactors = F);
for (n in 1:notu) {
if (sum(finds[n,]!=0)>0) {
strat_ranges$FAD[n] <- min(finds[n,finds[n,]!=0]);
strat_ranges$LAD[n] <- max(finds[n,finds[n,]!=0]);
}
}
}
# get ancestral species that obviate "ghost taxon" nodes
sampled_ancestors <- accersi_poss_ancestors_for_nodes(vtree=vector_tree,FA=durations[,1],apos=apomorphies);
if (length(otu_cols)==1)
otu_cols <- rep(otu_cols,notu);
if (new_plot) {
mxz <- -abs(0.5*ceiling(min(abs(durations))/0.5));
mnz <- -abs(0.5*ceiling(max(abs(durations))/0.5));
if (abs(mxz-mnz) <= 25) {
maj_break <- 1;
} else if (abs(mxz-mnz) <= 50) {
maj_break <- 5;
} else if (abs(mxz-mnz) <= 200) {
maj_break <- 10;
} else {
maj_break <- 25;
}
med_break <- maj_break/2;
min_break <- maj_break/10;
if (orientation=="vertical") {
par(pin=c(width,height));
if (taxon_labels[1]=="") {
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="Time",ylim=c(mnz,mxz),xlim=c(1,max(phylo_axis)));
} else {
adj_y <- mxz + (mxz - mnz)/10;
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="Time",ylim=c(mnz,adj_y),xlim=c(1,max(phylo_axis)));
}
specified_axis(axe=2,max_val=mxz,min_val=mnz,maj_break,med_break,min_break,linewd=4/3,orient=2,print_label=TRUE);
} else {
par(pin=c(height,width));
plot(NA,type='n',axes=FALSE,main="",xlab="Time",ylab="",xlim=c(mnz,mxz),ylim=c(1,max(phylo_axis)));
specified_axis(axe=1,max_val=mxz,min_val=mnz,maj_break,med_break,min_break,linewd=4/3,orient=1,print_label=TRUE);
}
}
#if (nrow(durations)==notu)
# durations <- rbind(durations,durations[sampled_ancestors[(notu+1):length(ctree)],])
adj_z <- (mxz-mnz)*0.0025;
nNode <- max(vector_tree) - notu;
mtree <- transform_vector_tree_to_matrix_tree(vector_tree);
nn <- nNode+1;
gotcha <- vector(length=length(vector_tree));
#for (nn in nNode:1) {
while (nn > 1) {
# draw lines from ancestor to descendants
nn <- nn-1;
n <- notu+nn;
fo <- mtree[nn,!mtree[nn,] %in% 0];
f1 <- mtree[nn,!mtree[nn,] %in% c(0,sampled_ancestors[n])];
if (sampled_ancestors[n]!=0) {
durations$LAD[n] <- durations$LAD[sampled_ancestors[n]];
}
durations$LAD[n] <- max(c(durations$LAD[n],durations$FAD[f1]));
# durations$LAD[sampled_ancestors[n]] <- durations$LAD[n];
if (orientation=="horizontal") {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=lineage_lwd,col=branching_col);
if (sampled_ancestors[n]==0) {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=0.75*lineage_lwd,col=lazarus_col);
gotcha[n] <- 1;
}
} else {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=lineage_lwd,col=branching_col);
if (sampled_ancestors[n]==0) {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=0.75*lineage_lwd,col=lazarus_col);
gotcha[n] <- 1;
}
}
for (f in 1:length(f1)) {
if (orientation=="horizontal") {
segments(durations[f1[f],1]-adj_z,phylo_axis[f1[f]],durations[f1[f],1]-adj_z,phylo_axis[n],lwd=branching_lwd,col=branching_col);
if (gotcha[f1[f]]==0)
segments(durations[f1[f],1],phylo_axis[f1[f]],durations[f1[f],2],phylo_axis[f1[f]],lwd=lineage_lwd,col=branching_col);
} else {
segments(phylo_axis[f1[f]],durations[f1[f],1]-adj_z,phylo_axis[n],durations[f1[f],1]-adj_z,lwd=branching_lwd,col=branching_col);
if (gotcha[f1[f]]==0)
segments(phylo_axis[f1[f]],durations[f1[f],1],phylo_axis[f1[f]],durations[f1[f],2],lwd=lineage_lwd,col=branching_col);
}
gotcha[f1[f]] <- 1;
}
# nn <- nn-1;
}
n <- 1;
for (n in 1:notu) {
if (orientation=="horizontal") {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=0.75*lineage_lwd,col=otu_cols[n]);
if (taxon_labels[1]!="") {
text(durations[n,2],phylo_axis[n],taxon_labels[n],pos=3);
}
} else {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=0.75*lineage_lwd,col=otu_cols[n]);
if (taxon_labels[1]!="") {
text(phylo_axis[n],durations[n,2],taxon_labels[n],pos=3,cex=taxon_cex);
}
}
# n <- 1+n;
}
}
draw_calibrated_phylogeny_new <- function(vtree,finds,durations,phylo_axis,apomorphies,orientation="vertical",taxon_labels="",otu_cols,lazarus_col="gray50",plot_stratigraphy=F,branching_col="black",lineage_lwd=4,branching_lwd=2,new_plot=F,height=4,width=6,max_age=NULL) {
# working as of 2019-07-10
# draws phylogeny onto an already configured plot or makes a new one
# vtree: vector giving the node (htu) number to which each taxon or node is attached; -1 signifies the base of the tree.
# plot_stratigraphy: default is "n"; if "range" or "ranges", it plots those
notu <- match(-1,vtree)-1;
if (sum(durations$onset)>0)
finds <- -abs(finds);
#if (plot_stratigraphy=="ranges" || plot_stratigraphy=="range") {
if (plot_stratigraphy) {
strat_ranges <- data.frame(FAD=as.numeric(rep(0,notu)),LAD=as.numeric(rep(0,notu)),stringsAsFactors = F);
for (n in 1:notu) {
if (sum(finds[n,]!=0)>0) {
strat_ranges$FAD[n] <- min(finds[n,finds[n,]!=0]);
strat_ranges$LAD[n] <- max(finds[n,finds[n,]!=0]);
}
}
}
# get ancestral species that obviate "ghost taxon" nodes
sampled_ancestors <- accersi_poss_ancestors_for_nodes(vtree=vtree,FA=strat_ranges$FA,apos=apomorphies);
if (length(otu_cols)==1)
otu_cols <- rep(otu_cols,notu);
if (new_plot) {
mxz <- -abs(0.5*ceiling(min(abs(durations))/0.5));
if (is.null(max_age)) {
mnz <- -abs(0.5*ceiling(max(abs(durations))/0.5));
} else {
mnz <- -abs(0.5*ceiling(max_age)/0.5);
}
if (abs(mxz-mnz) <= 25) {
maj_break <- 1;
} else if (abs(mxz-mnz) <= 50) {
maj_break <- 5;
} else if (abs(mxz-mnz) <= 200) {
maj_break <- 10;
} else {
maj_break <- 25;
}
med_break <- maj_break/2;
min_break <- maj_break/10;
if (orientation=="vertical") {
par(pin=c(width,height));
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="Time",ylim=c(mnz,mxz),xlim=c(1,max(phylo_axis)));
specified_axis(axe=2,max_val=mxz,min_val=mnz,maj_break,med_break,min_break,linewd=4/3,orient=2,print_label=TRUE);
} else {
par(pin=c(height,width));
plot(NA,type='n',axes=FALSE,main="",xlab="Time",ylab="",xlim=c(mnz,mxz),ylim=c(1,max(phylo_axis)));
specified_axis(axe=1,max_val=mxz,min_val=mnz,maj_break,med_break,min_break,linewd=4/3,orient=1,print_label=TRUE);
}
}
#if (nrow(durations)==notu)
# durations <- rbind(durations,durations[sampled_ancestors[(notu+1):length(vtree)],])
adj_z <- (mxz-mnz)*0.0025;
nNode <- max(vtree) - notu;
mtree <- transform_vector_tree_to_matrix_tree(vtree);
nn <- nNode;
gotcha <- vector(length=length(vtree));
for (nn in nNode:1) {
# draw lines from ancestor to descendants
n <- notu+nn;
if (orientation=="horizontal") {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=lineage_lwd,col=branching_col);
if (sampled_ancestors[n]==0) {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=0.75*lineage_lwd,col=lazarus_col);
gotcha[n] <- 1;
}
} else {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=lineage_lwd,col=branching_col);
if (sampled_ancestors[n]==0) {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=0.75*lineage_lwd,col=lazarus_col);
gotcha[n] <- 1;
}
}
f1 <- mtree[nn,!mtree[nn,] %in% c(0,sampled_ancestors[n])];
for (f in 1:length(f1)) {
if (orientation=="horizontal") {
segments(durations[f1[f],1]-adj_z,phylo_axis[f1[f]],durations[f1[f],1]-adj_z,phylo_axis[n],lwd=branching_lwd,col=branching_col);
if (gotcha[f1[f]]==0)
segments(durations[f1[f],1],phylo_axis[f1[f]],durations[f1[f],2],phylo_axis[f1[f]],lwd=lineage_lwd,col=branching_col);
} else {
segments(phylo_axis[f1[f]],durations[f1[f],1]-adj_z,phylo_axis[n],durations[f1[f],1]-adj_z,lwd=branching_lwd,col=branching_col);
if (gotcha[f1[f]]==0)
segments(phylo_axis[f1[f]],durations[f1[f],1],phylo_axis[f1[f]],durations[f1[f],2],lwd=lineage_lwd,col=branching_col);
}
gotcha[f1[f]] <- 1;
}
nn <- nn-1;
}
n <- 1;
for (n in 1:notu) {
if (orientation=="horizontal") {
segments(durations[n,1],phylo_axis[n],durations[n,2],phylo_axis[n],lwd=0.75*lineage_lwd,col=otu_cols[n]);
} else {
segments(phylo_axis[n],durations[n,1],phylo_axis[n],durations[n,2],lwd=0.75*lineage_lwd,col=otu_cols[n]);
}
# n <- 1+n;
}
}
#vtree_old <- vtree <- rangeomorph_vtree;
plot_calibrated_phylogeny <- function(vtree,strat_ranges,durations,apos,oldest=NULL,youngest=NULL,taxon_labels="",otu_cols,xsize=3.5,ysize=3.5) {
#vtree <- ladderize_vector_tree(vtree);
venn_tree <- transform_vector_tree_to_venn_tree(vtree)
mtree <- transform_venn_tree_to_matrix_tree(venn_tree)
notu <- dim(strat_ranges)[1]
svtree <- cbind(rank(vtree),vtree)
ordinate <- "Ma"
otu_order <- order(vtree[1:notu])
sampled_ancestors <- accersi_poss_ancestors_for_nodes(vtree,FA=strat_ranges[,1],apos)
# set up Y-axis (time) scale
if (is.null(oldest)) {
ax_min <- min(durations)
} else {
ax_min <- min(oldest,min(durations))
}
if (is.null(youngest)) {
ax_max <- max(strat_ranges)
} else {
ax_max <- max(youngest,max(strat_ranges))
}
if (taxon_labels[1]=="") {
y_add <- 1
} else {
y_add <- 0
}
#axis_info <- wagner_set_axes(ax_min,ax_max,y_add)
#tcs <- axis_info$Ticks
#lbl_prn <- axis_info$Labels
#tick_str <- axis_info$Tick_Strength
#mny <- min(tcs)
#mxy <- max(tcs[dim(tcs)[1],tcs[dim(tcs)[1],]!=0])
par(pin=c(xsize,ysize))
plot(NA,type='n',axes=FALSE,xlab="",ylab=ordinate,xlim=c(0,notu+1),ylim=c(oldest,youngest));
tick_tock <- set_axis_breaks(max_no = youngest,min_no=oldest);
specified_axis(axe=2,max_val=youngest,min_val=oldest,maj_break=tick_tock$maj_break,med_break=tick_tock$med_break,min_break=tick_tock$min_break,linewd=4/3,orient=1,print_label=TRUE);
#axis(2,at=seq(mny,mxy,by=(abs(mxy-mny))),tcl=-0.00,labels=FALSE,lwd=1.1,lwd.ticks=0.0,las=2)
phy_x <- vector(length=length(vtree));
if ((taxon_labels[1]!="numbers" && taxon_labels[1]!="Numbers") && taxon_labels[1]!="") {
y_adj <- (youngest-oldest)/50;
x_adj <- -(notu+1)/37.5
}
for (n in 1:notu) {
# n <- n+1;
phy_x[n] <- match(n,otu_order);
if (!is.na(match(n,sampled_ancestors))) {
segments(phy_x[n],durations[n,2],phy_x[n],strat_ranges[n,1],col="black",lwd=4);
ghost_col <- paste(otu_cols[n],"4",sep="");
segments(phy_x[n],durations[n,2],phy_x[n],strat_ranges[n,1],col=ghost_col,lwd=2);
}
segments(phy_x[n],durations[n,1],phy_x[n],durations[n,2],col="gray50",lwd=4)
if (strat_ranges[n,1]!=strat_ranges[n,2]) {
rect((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),strat_ranges[n,2],col=otu_cols[n])
} else {
segments((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),strat_ranges[n,1],lwd=4);
# ghost_col <- paste(otu_cols[n],"1",sep="");
segments((phy_x[n]-0.25),strat_ranges[n,1],(phy_x[n]+0.25),strat_ranges[n,1],lwd=2,col=otu_cols[n])
}
if (taxon_labels[1]=="numbers" || taxon_labels[1]=="Numbers") {
text(phy_x[n],strat_ranges[n,2],n,pos=3)
} else if (taxon_labels[1]!="") {
text(phy_x[n]+x_adj,strat_ranges[n,2]+y_adj,taxon_labels[n],srt=90,pos=4)
}
}
Nnode <- max(vtree) - notu;
### problem is here somewhere!!!!
nn <- Nnode+1;
while (nn>1) {
#for (nn in Nnode:1) {
nn <- nn-1;
n <- notu+nn;
f1 <- mtree[nn,mtree[nn,]>0];
if (sampled_ancestors[n]==0) {
phy_x[n] <- mean(phy_x[f1])
segments(min(phy_x[f1]),durations$end[n],max(phy_x[f1]),durations$end[n],lwd=1)
segments(phy_x[n],durations[n,1],phy_x[n],durations[n,2],col="gray50",lwd=4)
} else {
phy_x[n] <- phy_x[sampled_ancestors[n]];
f1a <- f1[f1!=sampled_ancestors[n]];
for (f in 1:length(f1a)) {
segments(phy_x[f1a[f]],durations[f1a[f],1],phy_x[sampled_ancestors[n]],durations[f1a[f],1],lwd=1)
}
segments(phy_x[n],durations[n,2],phy_x[n],durations[n,1],lwd=4,col="gray50")
}
# nn <- nn-1
}
}
get_phylo_axis_from_newick_string <- function(newick_string,sampled_ancestors,root_low=T) {
newick_string_new <- gsub(")","),",newick_string);
newick_string_new <- gsub(",,","),",newick_string_new);
otu_order_string <- simplify2array(strsplit(newick_string_new,split=",")[1])[,1]
otu_order_string <- gsub("\\(","",otu_order_string);
otu_order_string <- gsub("\\)","",otu_order_string);
otu_order_string <- otu_order_string[!otu_order_string %in% c("",";")];
otu_order_string <- as.numeric(otu_order_string);
otu_order <- match(1:max(otu_order_string),otu_order_string);
notu <- length(otu_order);
cladogram <- read_newick_string(newick_string);
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree=cladogram);
nNodes <- nrow(mat_tree);
if (root_low) {
max_tax <- (1:notu)[match(max(otu_order),otu_order)];
end_node <- which(mat_tree==max_tax,arr.ind=T)[,1];
if (end_node < (nNodes/2)) {
otu_order <- notu-otu_order
}
} else {
otu_order <- otu_order-min(otu_order)
}
phylo_axis <- c(otu_order,rep(0,nNodes));
relv_ancestors <- c(sampled_ancestors,rep(0,nNodes));
for (nd in nNodes:1) {
htu <- nd+notu;
f1 <- mat_tree[nd,mat_tree[nd,]>0];
if (sum(relv_ancestors[f1])>0) {
obs_anc <- f1[relv_ancestors[f1]==1][1];
phylo_axis[htu] <- otu_order[obs_anc];
} else {
phylo_axis[htu] <- mean(phylo_axis[mat_tree[nd,mat_tree[nd,]>0]])
}
}
return(phylo_axis);
}
get_phylo_axis_from_newick_string_w_anagenesis <- function(newick_string,sampled_ancestors,anagenetic_ancestors=0,root_low=T) {
if (length(anagenetic_ancestors)==1)
anagenetic_ancestors <- rep(0,length(sampled_ancestors));
newick_string_new <- gsub(")","),",newick_string);
newick_string_new <- gsub(",,","),",newick_string_new);
otu_order_string <- simplify2array(strsplit(newick_string_new,split=",")[1])[,1]
otu_order_string <- gsub("\\(","",otu_order_string);
otu_order_string <- gsub("\\)","",otu_order_string);
otu_order_string <- otu_order_string[!otu_order_string %in% c("",";")];
otu_order_string <- as.numeric(otu_order_string);
otu_order <- match(1:max(otu_order_string),otu_order_string);
notu <- length(otu_order);
cladogram <- read_newick_string(newick_string);
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree=cladogram);
nNodes <- nrow(mat_tree);
if (root_low) {
max_tax <- (1:notu)[match(max(otu_order),otu_order)];
end_node <- which(mat_tree==max_tax,arr.ind=T)[,1];
if (end_node < (nNodes/2)) {
otu_order <- notu-otu_order
}
} else {
otu_order <- otu_order-min(otu_order)
}
observed_nodes <- rep(0,nNodes);
if (sum(anagenetic_ancestors)>0) {
anas <- (1:notu)[anagenetic_ancestors==1];
names(anas) <- names(anagenetic_ancestors)[anas];
# for (an in 1:length(anas)) {
an <- 0;
while (an < length(anas)) {
an <- an+1;
acells <- which(mat_tree==anas[an],arr.ind = T);
f1 <- mat_tree[acells[1],mat_tree[acells[1],]!=anas[an]];
f1 <- f1[f1<=notu]; # reduce to just species
if (length(f1)>0) {
otu_order[f1] <- otu_order[anas[an]];
} else {
f1 <- mat_tree[acells[1],mat_tree[acells[1],]!=anas[an]];
observed_nodes[f1[1]-notu] <-anas[an];
for (ff in 1:length(f1)) {
f2 <- mat_tree[f1[ff]-notu,mat_tree[f1[ff]-notu,]>0];
if (sum(sampled_ancestors[f2[f2<=notu]])>0) {
next_anc <-f2[sampled_ancestors[f2]==1][1];
otu_order[next_anc] <- otu_order[anas[an]];
} else if (sum(f2<=notu)>0) {
# if ancestral to node
if (root_low) {
otu_order[anas[an]] <- min(otu_order[f2[f2<=notu]])+0.5;
} else {
otu_order[anas[an]] <- min(otu_order[f2[f2<=notu]])-0.5;
}
}
}
}
}
min_ord <- min(otu_order);
otu_order <- match(otu_order,sort(unique(otu_order)));
otu_order <- otu_order-min(otu_order);
}
#hist(otu_order,breaks=-1:max(otu_order))
#hist(otu_order,breaks=sort(c(min(otu_order)-1,unique(otu_order))))
phylo_axis <- c(otu_order,rep(0,nNodes));
relv_ancestors <- c(sampled_ancestors,rep(0,nNodes));
for (nd in nNodes:1) {
htu <- nd+notu;
f1 <- mat_tree[nd,mat_tree[nd,]>0];
if (sum(relv_ancestors[f1])>0) {
# print(nd);
obs_anc <- f1[relv_ancestors[f1]==1][1];
# if (anagenetic_ancestors[obs_anc]==1) {
# }
phylo_axis[htu] <- otu_order[obs_anc];
} else if (observed_nodes[nd]!=0) {
phylo_axis[htu] <- phylo_axis[observed_nodes[nd]];
} else {
phylo_axis[htu] <- mean(phylo_axis[mat_tree[nd,mat_tree[nd,]>0]])
}
}
return(phylo_axis);
}
### This needs work!!!!
get_phylo_axis_from_vector_tree <- function(vector_tree,sampled_ancestors,root_low=T) {
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree=cladogram);
nNodes <- nrow(mat_tree);
notu <- length(vector_tree)-nNodes;
phylo_axis <- vector(length=length(vector_tree));
init_order <- vector(length=notu);
for (sp in 1:notu) {
i_o <- which(mat_tree==sp,arr.ind=T);
init_order[sp] <- xx;
}
newick_string_new <- gsub(")","),",newick_string);
newick_string_new <- gsub(",,","),",newick_string_new);
otu_order_string <- simplify2array(strsplit(newick_string_new,split=",")[1])[,1]
otu_order_string <- gsub("\\(","",otu_order_string);
otu_order_string <- gsub("\\)","",otu_order_string);
otu_order_string <- otu_order_string[!otu_order_string %in% c("",";")];
otu_order_string <- as.numeric(otu_order_string);
otu_order <- match(1:max(otu_order_string),otu_order_string);
notu <- length(otu_order);
cladogram <- read_newick_string(newick_string);
mat_tree <- transform_vector_tree_to_matrix_tree(vector_tree=cladogram);
nNodes <- nrow(mat_tree);
if (root_low) {
max_tax <- (1:notu)[match(max(otu_order),otu_order)];
end_node <- which(mat_tree==max_tax,arr.ind=T)[,1];
if (end_node < (nNodes/2)) {
otu_order <- notu-otu_order
}
} else {
otu_order <- otu_order-min(otu_order)
}
phylo_axis <- c(otu_order,rep(0,nNodes));
for (nd in nNodes:1) {
htu <- nd+notu;
phylo_axis[htu] <- mean(phylo_axis[mat_tree[nd,mat_tree[nd,]>0]])
}
return(phylo_axis);
}
draw_cladogram_from_newick_string <- function(newick_string) {
### NOTE: This assumes that each numbered taxon is in the cladogram
### IF you have only SOME taxa in the string, then this needs too be rewritten
newick_string_new <- gsub(")","),",newick_string);
newick_string_new <- gsub(",,","),",newick_string_new);
otu_order_string <- simplify2array(strsplit(newick_string_new,split=",")[1])[,1]
otu_order_string <- gsub("\\(","",otu_order_string);
otu_order_string <- gsub("\\)","",otu_order_string);
otu_order_string <- otu_order_string[!otu_order_string %in% c("",";")];
otu_order_string <- as.numeric(otu_order_string);
otu_order <- match(1:max(otu_order_string),otu_order_string);
cladogram <- read_newick_string(newick_string);
need_to_be_drawn <- sort(otu_order_string);
notu <- length(need_to_be_drawn);
tttu <- length(cladogram);
nhtu <- tttu-notu;
otu_order <- c(otu_order,rep(0,nhtu));
undrawn <- 1:nhtu;
mxy <- mxx <- notu+1;
mnx <- 0;
mny <- -mxy;
par(pin=c(4.5,4.5));
lineage_tops <- array(0,dim=tttu);
node_span <- array(0,dim=c(tttu,2));
node_span[1:notu,1] <- node_span[1:notu,2] <- otu_order[1:notu];
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="",xlim=c(mnx,mxx),ylim=c(mny,mxy));
for (nn in nhtu:1) {
node <- nn+notu;
descendants <- (1:tttu)[cladogram==(notu+nn)];
if (sum(descendants %in% need_to_be_drawn)==length(descendants)) {
node_span[node,1] <- min(node_span[descendants,1])
node_span[node,2] <- max(node_span[descendants,2])
otu_order[node] <- mean(node_span[node,]);
lineage_tops[node] <- -(node_span[node,2]-node_span[node,1]);
for (dd in 1:length(descendants)) {
segments(otu_order[descendants[dd]],lineage_tops[descendants[dd]],otu_order[node],lineage_tops[node],lwd=4);
}
need_to_be_drawn <- need_to_be_drawn[!need_to_be_drawn %in% descendants];
need_to_be_drawn <- sort(c(need_to_be_drawn,node));
}
nn <- nn-1;
}
}
draw_cladogram_from_vector_tree <- function(vector_tree) {
### NOTE: This assumes that each numbered taxon is in the cladogram
### IF you have only SOME taxa in the string, then this needs too be rewritten
newick_string_new <- gsub(")","),",newick_string);
newick_string_new <- gsub(",,","),",newick_string_new);
otu_order_string <- simplify2array(strsplit(newick_string_new,split=",")[1])[,1]
otu_order_string <- gsub("\\(","",otu_order_string);
otu_order_string <- gsub("\\)","",otu_order_string);
otu_order_string <- otu_order_string[!otu_order_string %in% c("",";")];
otu_order_string <- as.numeric(otu_order_string);
otu_order <- match(1:max(otu_order_string),otu_order_string);
cladogram <- read_newick_string(newick_string);
need_to_be_drawn <- sort(otu_order_string);
notu <- length(need_to_be_drawn);
tttu <- length(cladogram);
nhtu <- tttu-notu;
otu_order <- c(otu_order,rep(0,nhtu));
undrawn <- 1:nhtu;
mxy <- mxx <- notu+1;
mnx <- 0;
mny <- -mxy;
par(pin=c(4.5,4.5));
lineage_tops <- array(0,dim=tttu);
node_span <- array(0,dim=c(tttu,2));
node_span[1:notu,1] <- node_span[1:notu,2] <- otu_order[1:notu];
plot(NA,type='n',axes=FALSE,main="",xlab="",ylab="",xlim=c(mnx,mxx),ylim=c(mny,mxy));
for (nn in nhtu:1) {
node <- nn+notu;
descendants <- (1:tttu)[cladogram==(notu+nn)];
if (sum(descendants %in% need_to_be_drawn)==length(descendants)) {
node_span[node,1] <- min(node_span[descendants,1])
node_span[node,2] <- max(node_span[descendants,2])
otu_order[node] <- mean(node_span[node,]);
lineage_tops[node] <- -(node_span[node,2]-node_span[node,1]);
for (dd in 1:length(descendants)) {
segments(otu_order[descendants[dd]],lineage_tops[descendants[dd]],otu_order[node],lineage_tops[node],lwd=4);
}
need_to_be_drawn <- need_to_be_drawn[!need_to_be_drawn %in% descendants];
need_to_be_drawn <- sort(c(need_to_be_drawn,node));
}
nn <- nn-1;
}
}
#### Add Image Files ####
add_png <- function(png_info, x = NULL,y = NULL,width = NULL,height = NULL,interpol = TRUE,x_cent=T,y_cent=T) {
# obj, # an image file imported as an array (e.g. png::readPNG, jpeg::readJPEG)
# x = NULL, # mid x coordinate for image
# y = NULL, # mid y coordinate for image
# width = NULL, # width of image (in x coordinate units)
# height = NULL, # width of image (in x coordinate units)
# interpol = TRUE # (passed to graphics::rasterImage) A logical vector (or scalar) indicating whether to apply linear interpolation to the image when drawing.
# x_cent = TRUE # Image centered on x; if false, then it starts at X.
# y_cent = TRUE # Image centered on y; if false, then it starts at y.
if (is.null(x) | is.null(y) | (is.null(width) && is.null(height))) {
stop("Must provide args 'x', 'y', and/or 'width'")
}
USR <- par()$usr; # A vector of the form c(x1, x2, y1, y2) giving the extremes of the user coordinates of the plotting region
PIN <- par()$pin; # The current plot dimensions, (width, height), in inches
DIM <- dim(png_info); # number of x-y pixels for the image
if (!is.null(width)) {
ARp <- DIM[1]/DIM[2]; # pixel aspect ratio (y/x)
WIDi <- width/(USR[2]-USR[1])*PIN[1]; # convert width units to inches
HEIi <- WIDi * ARp; # height in inches
height <- HEIu <- HEIi/PIN[2]*(USR[4]-USR[3]); # height in units
} else {
HEIu <- height/(USR[4]-USR[3])*PIN[2]; # convert height units to inches
ARp <- DIM[2]/DIM[1]; # pixel aspect ratio (y/x)
WIDi <- HEIu * ARp; # height in inches
width <- WIDi/(PIN[1])*(USR[2]-USR[1]);
}
if (!x_cent) x <- x+width/2;
if (!y_cent) y <- y+height/2;
rasterImage(image = png_info,
xleft = x-(width/2), xright = x+(width/2),
ybottom = y-(height/2), ytop = y+(height/2),
# ybottom = y-(HEIu/2), ytop = y+(HEIu/2),
interpolate = interpol);
}
add_jpeg = function(jpg_file,x,y,width=NULL,height=NULL) {
require('jpeg');
PIN <- par()$pin; # The current plot dimensions, (width, height), in inches
USR <- par()$usr; # A vector of the form c(x1, x2, y1, y2) giving the extremes of the user coordinates of the plotting region
jpg <- readJPEG(jpg_file, native=T); # read theTfile
res <- dim(jpg)[2:1]; # get the resolution, [x, y]
measured_y_span <- abs(USR[3]-USR[4]);
measured_x_span <- abs(USR[1]-USR[2]);
if (!is.null(height)) {
measured_y_span <- abs(USR[3]-USR[4]);
y_x_ratio_1 <- PIN[2]/PIN[1];
new_ht <- height/measured_y_span;
new_ratio <- height/res[2]
rasterImage(jpg,x,y,new_ratio*res[1],height);
}
#if (!add) # initialize an empty plot area if add==FALSE
# plot(1,1,xlim=c(1,res[1]),ylim=c(1,res[2]),asp=1,type='n',xaxs='i',yaxs='i',xaxt='n',yaxt='n',xlab='',ylab='',bty='n');
rasterImage(jpg,x,y,res[1],res[2]);
}
add_Img <- function(obj, x = NULL,y = NULL,width = NULL,height = NULL,interpol = TRUE) {
# obj, # an image file imported as an array (e.g. png::readPNG, jpeg::readJPEG)
# x = NULL, # mid x coordinate for image
# y = NULL, # mid y coordinate for image
# width = NULL, # width of image (in x coordinate units)
# height = NULL, # width of image (in x coordinate units)
# interpol = TRUE # (passed to graphics::rasterImage) A logical vector (or scalar) indicating whether to apply linear interpolation to the image when drawing.
if (is.null(x) | is.null(y) | (is.null(width) && is.null(height))) {
stop("Must provide args 'x', 'y', and/or 'width'")
}
USR <- par()$usr; # A vector of the form c(x1, x2, y1, y2) giving the extremes of the user coordinates of the plotting region
PIN <- par()$pin; # The current plot dimensions, (width, height), in inches
DIM <- dim(obj); # number of x-y pixels for the image
if (!is.null(width)) {
ARp <- DIM[1]/DIM[2]; # pixel aspect ratio (y/x)
WIDi <- width/(USR[2]-USR[1])*PIN[1]; # convert width units to inches
HEIi <- WIDi * ARp; # height in inches
height <- HEIu <- HEIi/PIN[2]*(USR[4]-USR[3]); # height in units
} else {
HEIu <- height/(USR[4]-USR[3])*PIN[2]; # convert height units to inches
ARp <- DIM[2]/DIM[1]; # pixel aspect ratio (y/x)
WIDi <- HEIu * ARp; # height in inches
width <- WIDi/(PIN[1])*(USR[2]-USR[1]);
}
rasterImage(image = obj,
xleft = x-(width/2), xright = x+(width/2),
ybottom = y-(height/2), ytop = y+(height/2),
# ybottom = y-(HEIu/2), ytop = y+(HEIu/2),
interpolate = interpol);
}
|
library(tikzDevice)
library(MASS)
darkPurple <- "#5c3566"
darkBlue <- "#204a87"
darkGreen <- "#4e9a06"
darkChocolate <- "#8f5902"
darkRed <- "#a40000"
darkOrange <- "#ce5c00"
darkButter <- "#c4a000"
kBrown <- function(x,y,param=1){
param*outer(x,y,"pmin")
}
kExp <- function(x,y,param=c(1,.2)){
param[1]*exp(-abs(outer(x,y,"-"))/param[2])
}
kGauss <- function(x,y,param=c(1,.2)){
param[1]*exp(-.5*(outer(x,y,"-")/param[2])^2)
}
kMat32 <- function(x,y,param=c(1,.2)){
d <- sqrt(3)*abs(outer(x,y,"-"))/param[2]
return(param[1]*(1 + d)*exp(-d))
}
kMat52 <- function(x,y,param=c(1,.2)){
d <- sqrt(5)*abs(outer(x,y,"-"))/param[2]
return(param[1]*(1 + d + 1/3*d^2)*exp(-d))
}
kWhite <- function(x,y,param=1){
return(param*outer(x,y,"-")==0)
}
#################################
## 1d
x <- seq(-5,5,length=101)
f <- dnorm(x,0,1)
tikz('MVN_dens1.tex', standAlone = TRUE, width=5, height=5)
par(mar=c(4.5,5.1,1.5,1.5))
plot(x,f,type='l',lwd=2,col=darkBlue,ylab="density",cex.axis=2,cex.lab=2)
dev.off()
tools::texi2dvi('MVN_dens1.tex',pdf=T)
#################################
## dim d
multdens <- function(x,m,K){
d <- length(m)
xc <- matrix(x-m,ncol=1)
return(1/sqrt((2*pi)^d*det(K)) * exp(-.5*t(xc)%*%solve(K)%*%xc))
}
m <- c(0,0)
K <- matrix(c(3,2,2,3),2)
g <- seq(-5,5,length=31)
G <- as.matrix(expand.grid(g,g))
F <- rep(0,dim(G)[1])
for(i in 1:dim(G)[1]){
F[i] <- multdens(G[i,],m,K)
}
tikz('MVN_dens2.tex', standAlone = TRUE, width=5, height=5)
par(mar=c(.5,2.1,.5,1.5))
persp(g,g,matrix(F,length(g)),xlab="$x_1$",ylab="$x_2$",zlab="density",cex.axis=2,cex.lab=2,theta = 20, phi = 25)
dev.off()
tools::texi2dvi('MVN_dens2.tex',pdf=T)
#################################
## samples
K <- matrix(c(1,2,2,7),2)
Y <- mvrnorm(700,c(0,2),K)
tikz('MVN_gaussvec1.tex', standAlone = TRUE, width=5, height=5)
par(mar=c(4.5,5.1,1.5,1.5))
plot(Y,xlab='$Y_1$',ylab='$Y_2$',asp=1,col=rgb(0,0,0,.5),cex.axis=2,cex.lab=2)
dev.off()
tools::texi2dvi('MVN_gaussvec1.tex',pdf=T)
K <- matrix(c(1,0,0,1),2)
Y <- mvrnorm(1000,c(0,0),K)
tikz('MVN_gaussvec2.tex', standAlone = TRUE, width=5, height=5)
par(mar=c(4.5,5.1,1.5,1.5))
plot(Y,xlab='$Y_1$',ylab='$Y_2$',asp=1,col=rgb(0,0,0,.5),cex.axis=2,cex.lab=2)
dev.off()
tools::texi2dvi('MVN_gaussvec2.tex',pdf=T)
K <- matrix(c(4,-2,-2,1.5),2)
Y <- mvrnorm(1500,c(0,0),K)
for(i in 1:1500){
if(runif(1)>.7 ) Y[i,1] <- -Y[i,1]
}
tikz('MVN_gaussvec3.tex', standAlone = TRUE, width=5,height=5)
par(mar=c(4.5,5.1,1.5,1.5))
plot(Y,xlab='$Y_1$',ylab='$Y_2$',asp=1,col=rgb(0,0,0,.5),cex.axis=2,cex.lab=2)
dev.off()
tools::texi2dvi('MVN_gaussvec3.tex',pdf=T)
##################################################################"
### plot kernel
x <- seq(from=-5, to=5, length=201)
K <- kMat52(x,x,c(1,50))
tikz('MVN_kern150.tex', standAlone = TRUE, width=5,height=5)
par(mar=c(4.5,5.1,1.5,1.5))
plot(x,K[,100],type='l',ylab='k(x,0)',cex.axis=1.5,cex.lab=2,ylim=c(0,3))
dev.off()
tools::texi2dvi('MVN_kern150.tex',pdf=T)
m <- 0*x
Z <- mvrnorm(200,m,K)
tikz('MVN_traj150.tex', standAlone = TRUE, width=5,height=5)
plot(x,Z[1,],ylim=c(-6,6),type='l',ylab="samples of Z",cex.axis=1.5,cex.lab=2)
for(i in 2:100){
lines(x,Z[i,],col=i)
}
dev.off()
tools::texi2dvi('MVN_traj150.tex',pdf=T)
| /lectures/2_stat_models/figures/R/a_multivariate_normal.R | no_license | NicolasDurrande/Ecole_Chercheurs_Mexico | R | false | false | 3,244 | r | library(tikzDevice)
library(MASS)
darkPurple <- "#5c3566"
darkBlue <- "#204a87"
darkGreen <- "#4e9a06"
darkChocolate <- "#8f5902"
darkRed <- "#a40000"
darkOrange <- "#ce5c00"
darkButter <- "#c4a000"
kBrown <- function(x,y,param=1){
param*outer(x,y,"pmin")
}
kExp <- function(x,y,param=c(1,.2)){
param[1]*exp(-abs(outer(x,y,"-"))/param[2])
}
kGauss <- function(x,y,param=c(1,.2)){
param[1]*exp(-.5*(outer(x,y,"-")/param[2])^2)
}
kMat32 <- function(x,y,param=c(1,.2)){
d <- sqrt(3)*abs(outer(x,y,"-"))/param[2]
return(param[1]*(1 + d)*exp(-d))
}
kMat52 <- function(x,y,param=c(1,.2)){
d <- sqrt(5)*abs(outer(x,y,"-"))/param[2]
return(param[1]*(1 + d + 1/3*d^2)*exp(-d))
}
kWhite <- function(x,y,param=1){
return(param*outer(x,y,"-")==0)
}
#################################
## 1d
x <- seq(-5,5,length=101)
f <- dnorm(x,0,1)
tikz('MVN_dens1.tex', standAlone = TRUE, width=5, height=5)
par(mar=c(4.5,5.1,1.5,1.5))
plot(x,f,type='l',lwd=2,col=darkBlue,ylab="density",cex.axis=2,cex.lab=2)
dev.off()
tools::texi2dvi('MVN_dens1.tex',pdf=T)
#################################
## dim d
multdens <- function(x,m,K){
d <- length(m)
xc <- matrix(x-m,ncol=1)
return(1/sqrt((2*pi)^d*det(K)) * exp(-.5*t(xc)%*%solve(K)%*%xc))
}
m <- c(0,0)
K <- matrix(c(3,2,2,3),2)
g <- seq(-5,5,length=31)
G <- as.matrix(expand.grid(g,g))
F <- rep(0,dim(G)[1])
for(i in 1:dim(G)[1]){
F[i] <- multdens(G[i,],m,K)
}
tikz('MVN_dens2.tex', standAlone = TRUE, width=5, height=5)
par(mar=c(.5,2.1,.5,1.5))
persp(g,g,matrix(F,length(g)),xlab="$x_1$",ylab="$x_2$",zlab="density",cex.axis=2,cex.lab=2,theta = 20, phi = 25)
dev.off()
tools::texi2dvi('MVN_dens2.tex',pdf=T)
#################################
## samples
K <- matrix(c(1,2,2,7),2)
Y <- mvrnorm(700,c(0,2),K)
tikz('MVN_gaussvec1.tex', standAlone = TRUE, width=5, height=5)
par(mar=c(4.5,5.1,1.5,1.5))
plot(Y,xlab='$Y_1$',ylab='$Y_2$',asp=1,col=rgb(0,0,0,.5),cex.axis=2,cex.lab=2)
dev.off()
tools::texi2dvi('MVN_gaussvec1.tex',pdf=T)
K <- matrix(c(1,0,0,1),2)
Y <- mvrnorm(1000,c(0,0),K)
tikz('MVN_gaussvec2.tex', standAlone = TRUE, width=5, height=5)
par(mar=c(4.5,5.1,1.5,1.5))
plot(Y,xlab='$Y_1$',ylab='$Y_2$',asp=1,col=rgb(0,0,0,.5),cex.axis=2,cex.lab=2)
dev.off()
tools::texi2dvi('MVN_gaussvec2.tex',pdf=T)
K <- matrix(c(4,-2,-2,1.5),2)
Y <- mvrnorm(1500,c(0,0),K)
for(i in 1:1500){
if(runif(1)>.7 ) Y[i,1] <- -Y[i,1]
}
tikz('MVN_gaussvec3.tex', standAlone = TRUE, width=5,height=5)
par(mar=c(4.5,5.1,1.5,1.5))
plot(Y,xlab='$Y_1$',ylab='$Y_2$',asp=1,col=rgb(0,0,0,.5),cex.axis=2,cex.lab=2)
dev.off()
tools::texi2dvi('MVN_gaussvec3.tex',pdf=T)
##################################################################"
### plot kernel
x <- seq(from=-5, to=5, length=201)
K <- kMat52(x,x,c(1,50))
tikz('MVN_kern150.tex', standAlone = TRUE, width=5,height=5)
par(mar=c(4.5,5.1,1.5,1.5))
plot(x,K[,100],type='l',ylab='k(x,0)',cex.axis=1.5,cex.lab=2,ylim=c(0,3))
dev.off()
tools::texi2dvi('MVN_kern150.tex',pdf=T)
m <- 0*x
Z <- mvrnorm(200,m,K)
tikz('MVN_traj150.tex', standAlone = TRUE, width=5,height=5)
plot(x,Z[1,],ylim=c(-6,6),type='l',ylab="samples of Z",cex.axis=1.5,cex.lab=2)
for(i in 2:100){
lines(x,Z[i,],col=i)
}
dev.off()
tools::texi2dvi('MVN_traj150.tex',pdf=T)
|
library(ggplot2)
#' plotLines
#'
#' Lineplots comparing continuous variable scores across levels of another var
#'
#' Inputs:
#' df: Dataframe with vars of interest.
#' title: Title above chart
#' outDir: Directory to save plot to
plotLines <- function(df, title, outDir){
# Create plot
p <- ggplot(data=df, aes(x=scd_slp_to_obj_slp, y=prop, color=Path,
group=Path, fill=Path))
# Add lines for each group
p <- p + geom_line(aes(linetype=Path), stat='identity', size=.85, alpha=0.9)
# Unique shape for each group placed at each data point
p <- p + geom_point(aes(shape=Path), size=2.5, alpha=.45)
# Pad the distance between the x-axis and smallest plotted value slightly
p <- p + scale_y_continuous(limits=c(0,1))
# Pad/add extra space to the ends of the x-axis
p <- p + scale_x_continuous(expand=c(0.05, 0.05))
# Reverse x-axis when dealing with negative numbers
p <- p + scale_x_reverse()
# Remove gray background, format x-axis text, format legend
p <- p + theme(panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.background=element_blank(),
axis.line=element_line(colour="black"),
axis.text.x=element_text(angle=0, size=14),
axis.text.y=element_text(angle=0, size=14),
legend.key=element_rect(color=NA, fill=NA),
legend.background=element_blank(),
legend.key.width=unit(3, "line"),
legend.justification=c(1, 1),
legend.position=c(1, 1))
# Add axis labels
p <- p + labs(y='Proportion of Simulations with RMSEA<.05 and Path of Interest P<.05',
x='Path Coefficient for Objective Performance Slope ON SCD Slope')
# Remove any components to the legend that could relate to alpha or size
p <- p + scale_alpha(guide='none')
p <- p + scale_size(guide='none')
# Add a title to the plot
p <- p + ggtitle(title)
# Save Plots
pngOut <- paste(outDir, "/parallelGrowthPower-linePlot.png", sep="")
svgOut <- paste(outDir, "/parallelGrowthPower-linePlot.svg", sep="")
ggsave(pngOut, p)
ggsave(svgOut, p)
}
# Path to your csv file
csv_path <- ""
df <- read.csv(csv_path)
temp1 <- df[,c('scd_slp_to_obj_slp', 'rmsea05_si_path05_prop')]
temp2 <- df[,c('scd_slp_to_obj_slp', 'rmsea05_ss_path05_prop')]
colnames(temp1) <- c('scd_slp_to_obj_slp','prop')
temp1[,'Path'] <- "Objective Performance Slope ON SCD Intercept"
colnames(temp2) <- c('scd_slp_to_obj_slp','prop')
temp2[,'Path'] <- "Objective Performance Slope ON SCD Slope"
df <- rbind(temp1, temp2)
df$Path <- as.factor(df$Path)
# # Save data as csv
# write.csv(df, "./parallelGrowth.csv")
plotLines(df, 'Parallel Growth Power Analysis. 100 Control, 100 MCI', './') | /scripts/plot-parallel-growth-power-analysis.r | permissive | rviviano/data-tools | R | false | false | 2,941 | r | library(ggplot2)
#' plotLines
#'
#' Lineplots comparing continuous variable scores across levels of another var
#'
#' Inputs:
#' df: Dataframe with vars of interest.
#' title: Title above chart
#' outDir: Directory to save plot to
plotLines <- function(df, title, outDir){
# Create plot
p <- ggplot(data=df, aes(x=scd_slp_to_obj_slp, y=prop, color=Path,
group=Path, fill=Path))
# Add lines for each group
p <- p + geom_line(aes(linetype=Path), stat='identity', size=.85, alpha=0.9)
# Unique shape for each group placed at each data point
p <- p + geom_point(aes(shape=Path), size=2.5, alpha=.45)
# Pad the distance between the x-axis and smallest plotted value slightly
p <- p + scale_y_continuous(limits=c(0,1))
# Pad/add extra space to the ends of the x-axis
p <- p + scale_x_continuous(expand=c(0.05, 0.05))
# Reverse x-axis when dealing with negative numbers
p <- p + scale_x_reverse()
# Remove gray background, format x-axis text, format legend
p <- p + theme(panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.background=element_blank(),
axis.line=element_line(colour="black"),
axis.text.x=element_text(angle=0, size=14),
axis.text.y=element_text(angle=0, size=14),
legend.key=element_rect(color=NA, fill=NA),
legend.background=element_blank(),
legend.key.width=unit(3, "line"),
legend.justification=c(1, 1),
legend.position=c(1, 1))
# Add axis labels
p <- p + labs(y='Proportion of Simulations with RMSEA<.05 and Path of Interest P<.05',
x='Path Coefficient for Objective Performance Slope ON SCD Slope')
# Remove any components to the legend that could relate to alpha or size
p <- p + scale_alpha(guide='none')
p <- p + scale_size(guide='none')
# Add a title to the plot
p <- p + ggtitle(title)
# Save Plots
pngOut <- paste(outDir, "/parallelGrowthPower-linePlot.png", sep="")
svgOut <- paste(outDir, "/parallelGrowthPower-linePlot.svg", sep="")
ggsave(pngOut, p)
ggsave(svgOut, p)
}
# Path to your csv file
csv_path <- ""
df <- read.csv(csv_path)
temp1 <- df[,c('scd_slp_to_obj_slp', 'rmsea05_si_path05_prop')]
temp2 <- df[,c('scd_slp_to_obj_slp', 'rmsea05_ss_path05_prop')]
colnames(temp1) <- c('scd_slp_to_obj_slp','prop')
temp1[,'Path'] <- "Objective Performance Slope ON SCD Intercept"
colnames(temp2) <- c('scd_slp_to_obj_slp','prop')
temp2[,'Path'] <- "Objective Performance Slope ON SCD Slope"
df <- rbind(temp1, temp2)
df$Path <- as.factor(df$Path)
# # Save data as csv
# write.csv(df, "./parallelGrowth.csv")
plotLines(df, 'Parallel Growth Power Analysis. 100 Control, 100 MCI', './') |
source('./AIR/sgg_separate.R', encoding='utf-8')
source('./AIR/packages_need.R', encoding='utf-8')
#***************************************#
# ๋ฐ์ดํฐ๋ก๋ #
#***************************************#
# AIRSEOUL <== ๋๊ธฐ๋ถ์ ์ด๊ฑฐ ์ธ
setwd("C:\\Users\\hanso\\Desktop\\๋น
ํต๋ถ\\gitgit")
AIR <- read.csv('./data/airseoul.csv', header = T)[,-1]
AIR$SGG <- factor(AIR$SGG)
AIR$week <- as.Date(AIR$week)
#***************************************#
# ๊ธฐ๋ณธ ์๊ณ์ด ๋ถํด #
#***************************************#
#***************************************#
# forecast:: ๊ณ์ ๋ณ๋ ์๊ฐํ #
#***************************************#
# x1 <- AIR %>% group_by(SGG) %>% select(NO2)
# #x1 <- sgg1 %>% select(NO2)
# df_ts <- ts(x1, start = 2009, end = 2019, freq=52)
# df_ts_test <- ts(x1, start=2020, end = c(2020, 11), freq=52)
# seasonplot(df_ts, xlab="", main="",
# year.labels=TRUE, year.labels.left=TRUE, col=1:12, pch=20)
#***************************************#
# ์ ์ฒด ์๊ณ์ด ๊ทธ๋ฆผ #
# : ๋ถ์ ์ , ๊ตฌ๋ง๋ค ์๊ณ์ด ๊ทธ๋ฆผ #
#***************************************#
par(mfrow = c(4,2))
kind_ssg <-unique(AIR$SGG)
for (i in kind_ssg) {
plot(AIR[AIR$SGG == i,]$NO2, type = "l")
}
#***************************************#
p=ggplot(AIR, aes(x=week,y=NO2))+
geom_line(mapping=aes(x=week,y=NO2,color=NO2))+
facet_wrap(~SGG)+
labs(title="๊ตฌ๋ณ ๋๊ธฐ์ง::NO2 ํํฉ")+
ggthemes::theme_hc()
#์ถ์ธ์ ๊ทธ๋ฆฌ๊ธฐ
p + stat_smooth(color = "yellow", method = "loess")
p=ggplot(AIR, aes(x=week,y=O3))+
geom_line(mapping=aes(x=week,y=O3,color=O3))+
facet_wrap(~SGG)+
labs(title="๊ตฌ๋ณ ๋๊ธฐ์ง::O3 ํํฉ")+
ggthemes::theme_hc()
#์ถ์ธ์ ๊ทธ๋ฆฌ๊ธฐ
p + stat_smooth(color = "yellow", method = "loess")
p=ggplot(AIR, aes(x=week,y=CO))+
geom_line(mapping=aes(x=week,y=CO,color=CO))+
facet_wrap(~SGG)+
labs(title="๊ตฌ๋ณ ๋๊ธฐ์ง::CO ํํฉ")+
ggthemes::theme_hc()
#์ถ์ธ์ ๊ทธ๋ฆฌ๊ธฐ
p + stat_smooth(color = "yellow", method = "loess")
p=ggplot(AIR, aes(x=week,y=SO2))+
geom_line(mapping=aes(x=week,y=SO2,color=SO2))+
facet_wrap(~SGG)+
labs(title="๊ตฌ๋ณ ๋๊ธฐ์ง::SO2 ํํฉ")+
ggthemes::theme_hc()
#์ถ์ธ์ ๊ทธ๋ฆฌ๊ธฐ
p + stat_smooth(color = "yellow", method = "loess")
p=ggplot(AIR, aes(x=week,y=PM10))+
geom_line(mapping=aes(x=week,y=PM10,color=PM10))+
facet_wrap(~SGG)+
labs(title="๊ตฌ๋ณ ๋๊ธฐ์ง::PM10 ํํฉ")+
ggthemes::theme_hc()
#์ถ์ธ์ ๊ทธ๋ฆฌ๊ธฐ
p + stat_smooth(color = "yellow", method = "loess")
#***************************************#
# ๋ถ์ #
# ๊ฐ๋จ๊ตฌ - NO2 #
#***************************************#
sgg1.tr = sgg1 %>% filter(week < '2020-01-06')
sgg1.te = sgg1 %>% filter(week >= '2020-01-06')
ts.train <- ts(sgg1.tr, start = c(2009,12,28), frequency = 52)
ts.test <- ts(sgg1.tr, start = c(2020,1,6), frequency = 52)
plot(ts.train[,-c(1,2)])
#***************************************#
f = function(sgg){
ts1 <- ts(sgg, start = c(2009,12,28), end = c(2020,1,6), freq=52)
ts1_test <- ts(sgg, start=c(2020,1,6),end = c(2020,11,25), freq=52)
ts1 <- ts1[,-c(1,2)]; ts1_test <- ts1_test[,-c(1,2)]
plot(ts1, type="l", main="2010๋
~ 2019๋
")
plot(ts1_test, type="o", main="2020๋
")
}
f(sgg1); #...# ; f(sgg39)
#***************************************#
#NO2 - ๋์๋จ๋ถ ๊ทธ๋ ค์ ์ผ๋ง๋ ๋ค๋ฅธ์ง ์์๋ด
#***************************************#
f = function(sgg, y){
ts1 <- ts(sgg, start = c(2009,12,28), end = c(2020,1,6), freq=52)
ts1_test <- ts(sgg, start=c(2020,1,6),end = c(2020,11,25), freq=52)
plot(ts1, type="l", main="2010๋
~ 2019๋
",ylim=y)
plot(ts1_test, type="o", main="2020๋
",ylim=y)
}
f(sgg1$NO2,c(0,0.08)); #...# ; f(sgg39)
#***************************************#
f = function(sgg, y, i){
ts1 <- ts(sgg, start = c(2009,12,28), freq=52)
plot(ts1,ylim=y, col=i,lwd=1.5)
par(new = TRUE)
}
f(sgg3$NO2,c(0,0.08),3); #๋
f(sgg6$NO2,c(0,0.08),6); #์
f(sgg1$NO2,c(0,0.08),1); #๋จ
f(sgg5$NO2,c(0,0.08),5); #๋ถ
# legend
# ๊ฐ๋ถ์ด ๋๊ธฐ ์ค์ผ ๋๋๊ฐ ๋์ฒด์ ์ผ๋ก ๋ฎ์ ๊ฑธ ๋ณผ ์ ์์. (์ถ์์ ๋ฐ๋๋ถ๋?)
#***************************************#
f = function(sgg){
ts1 <- ts(sgg, start = c(2009,12,28), freq=52)
ggplot(ts1, as.numeric = FALSE) +
geom_line() +
stat_peaks(colour = "red") + #๊ทน๋์ ๋นจ๊ฐ์ ์ผ๋ก ํ์
stat_peaks(geom = "text", colour = "red", #๊ทน๋์ ๋ ์ง("%Y-%m") ํ์
vjust = -0.5, x.label.fmt = "%Y-%m") +
stat_valleys(colour = "blue") + #๊ทน์์ ํ๋์ ์ผ๋ก ํ์
stat_valleys(geom = "text", colour = "blue", angle = 45,
vjust = 1.5, hjust = 1, x.label.fmt = "%Y-%m") #๊ทน์์ ๋ ์ง("%Y-%m") ํ์
}
f(sgg3$NO2); #๋
f(sgg6$NO2); #์
f(sgg1$NO2); #๋จ
f(sgg5$NO2); #๋ถ
# legend
#***************************************#
# ๋๋ก
f = function(sgg, y, i){
ts1 <- ts(sgg, start = c(2009,12,28), freq=52)
plot(ts1,ylim=y, col=i,lwd=1.5)
par(new = TRUE)
}
f(sgg2$NO2,c(0,0.08),3);
f(sgg4$NO2,c(0,0.08),6);
f(sgg7$NO2,c(0,0.08),1);
f(sgg14$NO2,c(0,0.08),14);
f(sgg17$NO2,c(0,0.08),17);
f(sgg27$NO2,c(0,0.08),27);
f(sgg30$NO2,c(0,0.08),30);
f(sgg35$NO2,c(0,0.08),35);
f(sgg36$NO2,c(0,0.08),36);
f(sgg37$NO2,c(0,0.08),37);
f(sgg38$NO2,c(0,0.08),38);
f(sgg39$NO2,c(0,0.08),39);
# 3๊ฐ๋ฅผ ๋ณด๋ฉด, ํ๊ฐ๋๋ก๋ ๋๊ธฐ์ค์ผ ๋๋๊ฐ ๊ฐํ ๊ฒ์ ๋ณผ ์ ์์
# ์ฒญ๋ฆ๋ก๋ ๋๊ธฐ์ค์ผ ๋๋๊ฐ ๋ค๋ฅธ ๊ณณ์ ๋นํด ์ฝํจ.
# ๋๋ก๋ง๋ค ๋ค๋ฅธ ๊ฒ์ ๋ณผ ์ ์์.
# legend
#***************************************#ใ
ใ
tr = 10:19; te = 19:20
par(mar=c(0,0,0,0))
plot(0,0,xlim=c(0,25),ylim=c(0,2),xaxt="n",yaxt="n",bty="n",xlab="",ylab="",type="n")
arrows(2.5,0.5,22,0.5,0.05)
points(tr, train*0+0.5, pch=19, col="blue")
points(te, test*0+0.5, pch=19, col="red")
text(24,0.5,"Year")
text(10,0.6,"Train data",col="blue")
text(20,0.6,"Test data",col="red")
#***************************************#
# ํ๊ท ์ ์ธ ์๊ณ์ด(train dataset)
#***************************************#
train = AIR[AIR$week < '2020-01-06',]
test = AIR[AIR$week >= '2020-01-06',]
#***************************************#
c <- train %>% group_by(week) %>% summarise(mean(NO2))
tts <- ts(c$`mean(NO2)`, start = c(2009,12,28), frequency = 52)
plot(tts, ylim = c(0,0.07))
#***************************************#
f = function(sgg){
ts1 <- ts(sgg, start = c(2009,12,28), freq=52)
ggplot(ts1, as.numeric = FALSE) +
geom_line() +
stat_peaks(colour = "red") + #๊ทน๋์ ๋นจ๊ฐ์ ์ผ๋ก ํ์
stat_valleys(colour = "blue")+#๊ทน์์ ํ๋์ ์ผ๋ก ํ์
ggthemes::theme_hc()
}
f(c$`mean(NO2)`)
#***************************************#
#sgg1 == ๊ฐ๋จ๊ตฌ
#write.table(gangnam, file = "gangnam.txt", row.names = F, quote = F) #sas์์ ์ธ txtํ์ผ ์์ฑ
k <- ts(sgg1$NO2, start = c(2009,12), end = c(2020, 11), freq = 52)
diff.k <- diff(k, lag = 12) #์ฐจ๋ถ
par(mfrow = c(1,2))
plot(sgg1$NO2, type="l", main="sgg1::NO2 2010๋
~ 2020๋
", axes=F)
axis(2,las=2)
plot(diff.k, main="sgg1::NO2 ์ฐจ๋ถ", axes=F)
axis(2,las=2); axis(1,las=1)
#์๊ณ์ด ๋ถํด
stl.run <- stl(k, s.window = "periodic")
plot(stl.run)
| /AIR/original_EDA.R | no_license | S0-HYUN/Machine-Learning-Project | R | false | false | 7,486 | r | source('./AIR/sgg_separate.R', encoding='utf-8')
source('./AIR/packages_need.R', encoding='utf-8')
#***************************************#
# ๋ฐ์ดํฐ๋ก๋ #
#***************************************#
# AIRSEOUL <== ๋๊ธฐ๋ถ์ ์ด๊ฑฐ ์ธ
setwd("C:\\Users\\hanso\\Desktop\\๋น
ํต๋ถ\\gitgit")
AIR <- read.csv('./data/airseoul.csv', header = T)[,-1]
AIR$SGG <- factor(AIR$SGG)
AIR$week <- as.Date(AIR$week)
#***************************************#
# ๊ธฐ๋ณธ ์๊ณ์ด ๋ถํด #
#***************************************#
#***************************************#
# forecast:: ๊ณ์ ๋ณ๋ ์๊ฐํ #
#***************************************#
# x1 <- AIR %>% group_by(SGG) %>% select(NO2)
# #x1 <- sgg1 %>% select(NO2)
# df_ts <- ts(x1, start = 2009, end = 2019, freq=52)
# df_ts_test <- ts(x1, start=2020, end = c(2020, 11), freq=52)
# seasonplot(df_ts, xlab="", main="",
# year.labels=TRUE, year.labels.left=TRUE, col=1:12, pch=20)
#***************************************#
# ์ ์ฒด ์๊ณ์ด ๊ทธ๋ฆผ #
# : ๋ถ์ ์ , ๊ตฌ๋ง๋ค ์๊ณ์ด ๊ทธ๋ฆผ #
#***************************************#
par(mfrow = c(4,2))
kind_ssg <-unique(AIR$SGG)
for (i in kind_ssg) {
plot(AIR[AIR$SGG == i,]$NO2, type = "l")
}
#***************************************#
p=ggplot(AIR, aes(x=week,y=NO2))+
geom_line(mapping=aes(x=week,y=NO2,color=NO2))+
facet_wrap(~SGG)+
labs(title="๊ตฌ๋ณ ๋๊ธฐ์ง::NO2 ํํฉ")+
ggthemes::theme_hc()
#์ถ์ธ์ ๊ทธ๋ฆฌ๊ธฐ
p + stat_smooth(color = "yellow", method = "loess")
p=ggplot(AIR, aes(x=week,y=O3))+
geom_line(mapping=aes(x=week,y=O3,color=O3))+
facet_wrap(~SGG)+
labs(title="๊ตฌ๋ณ ๋๊ธฐ์ง::O3 ํํฉ")+
ggthemes::theme_hc()
#์ถ์ธ์ ๊ทธ๋ฆฌ๊ธฐ
p + stat_smooth(color = "yellow", method = "loess")
p=ggplot(AIR, aes(x=week,y=CO))+
geom_line(mapping=aes(x=week,y=CO,color=CO))+
facet_wrap(~SGG)+
labs(title="๊ตฌ๋ณ ๋๊ธฐ์ง::CO ํํฉ")+
ggthemes::theme_hc()
#์ถ์ธ์ ๊ทธ๋ฆฌ๊ธฐ
p + stat_smooth(color = "yellow", method = "loess")
p=ggplot(AIR, aes(x=week,y=SO2))+
geom_line(mapping=aes(x=week,y=SO2,color=SO2))+
facet_wrap(~SGG)+
labs(title="๊ตฌ๋ณ ๋๊ธฐ์ง::SO2 ํํฉ")+
ggthemes::theme_hc()
#์ถ์ธ์ ๊ทธ๋ฆฌ๊ธฐ
p + stat_smooth(color = "yellow", method = "loess")
p=ggplot(AIR, aes(x=week,y=PM10))+
geom_line(mapping=aes(x=week,y=PM10,color=PM10))+
facet_wrap(~SGG)+
labs(title="๊ตฌ๋ณ ๋๊ธฐ์ง::PM10 ํํฉ")+
ggthemes::theme_hc()
#์ถ์ธ์ ๊ทธ๋ฆฌ๊ธฐ
p + stat_smooth(color = "yellow", method = "loess")
#***************************************#
# ๋ถ์ #
# ๊ฐ๋จ๊ตฌ - NO2 #
#***************************************#
sgg1.tr = sgg1 %>% filter(week < '2020-01-06')
sgg1.te = sgg1 %>% filter(week >= '2020-01-06')
ts.train <- ts(sgg1.tr, start = c(2009,12,28), frequency = 52)
ts.test <- ts(sgg1.tr, start = c(2020,1,6), frequency = 52)
plot(ts.train[,-c(1,2)])
#***************************************#
f = function(sgg){
ts1 <- ts(sgg, start = c(2009,12,28), end = c(2020,1,6), freq=52)
ts1_test <- ts(sgg, start=c(2020,1,6),end = c(2020,11,25), freq=52)
ts1 <- ts1[,-c(1,2)]; ts1_test <- ts1_test[,-c(1,2)]
plot(ts1, type="l", main="2010๋
~ 2019๋
")
plot(ts1_test, type="o", main="2020๋
")
}
f(sgg1); #...# ; f(sgg39)
#***************************************#
#NO2 - ๋์๋จ๋ถ ๊ทธ๋ ค์ ์ผ๋ง๋ ๋ค๋ฅธ์ง ์์๋ด
#***************************************#
f = function(sgg, y){
ts1 <- ts(sgg, start = c(2009,12,28), end = c(2020,1,6), freq=52)
ts1_test <- ts(sgg, start=c(2020,1,6),end = c(2020,11,25), freq=52)
plot(ts1, type="l", main="2010๋
~ 2019๋
",ylim=y)
plot(ts1_test, type="o", main="2020๋
",ylim=y)
}
f(sgg1$NO2,c(0,0.08)); #...# ; f(sgg39)
#***************************************#
f = function(sgg, y, i){
ts1 <- ts(sgg, start = c(2009,12,28), freq=52)
plot(ts1,ylim=y, col=i,lwd=1.5)
par(new = TRUE)
}
f(sgg3$NO2,c(0,0.08),3); #๋
f(sgg6$NO2,c(0,0.08),6); #์
f(sgg1$NO2,c(0,0.08),1); #๋จ
f(sgg5$NO2,c(0,0.08),5); #๋ถ
# legend
# ๊ฐ๋ถ์ด ๋๊ธฐ ์ค์ผ ๋๋๊ฐ ๋์ฒด์ ์ผ๋ก ๋ฎ์ ๊ฑธ ๋ณผ ์ ์์. (์ถ์์ ๋ฐ๋๋ถ๋?)
#***************************************#
f = function(sgg){
ts1 <- ts(sgg, start = c(2009,12,28), freq=52)
ggplot(ts1, as.numeric = FALSE) +
geom_line() +
stat_peaks(colour = "red") + #๊ทน๋์ ๋นจ๊ฐ์ ์ผ๋ก ํ์
stat_peaks(geom = "text", colour = "red", #๊ทน๋์ ๋ ์ง("%Y-%m") ํ์
vjust = -0.5, x.label.fmt = "%Y-%m") +
stat_valleys(colour = "blue") + #๊ทน์์ ํ๋์ ์ผ๋ก ํ์
stat_valleys(geom = "text", colour = "blue", angle = 45,
vjust = 1.5, hjust = 1, x.label.fmt = "%Y-%m") #๊ทน์์ ๋ ์ง("%Y-%m") ํ์
}
f(sgg3$NO2); #๋
f(sgg6$NO2); #์
f(sgg1$NO2); #๋จ
f(sgg5$NO2); #๋ถ
# legend
#***************************************#
# ๋๋ก
f = function(sgg, y, i){
ts1 <- ts(sgg, start = c(2009,12,28), freq=52)
plot(ts1,ylim=y, col=i,lwd=1.5)
par(new = TRUE)
}
f(sgg2$NO2,c(0,0.08),3);
f(sgg4$NO2,c(0,0.08),6);
f(sgg7$NO2,c(0,0.08),1);
f(sgg14$NO2,c(0,0.08),14);
f(sgg17$NO2,c(0,0.08),17);
f(sgg27$NO2,c(0,0.08),27);
f(sgg30$NO2,c(0,0.08),30);
f(sgg35$NO2,c(0,0.08),35);
f(sgg36$NO2,c(0,0.08),36);
f(sgg37$NO2,c(0,0.08),37);
f(sgg38$NO2,c(0,0.08),38);
f(sgg39$NO2,c(0,0.08),39);
# 3๊ฐ๋ฅผ ๋ณด๋ฉด, ํ๊ฐ๋๋ก๋ ๋๊ธฐ์ค์ผ ๋๋๊ฐ ๊ฐํ ๊ฒ์ ๋ณผ ์ ์์
# ์ฒญ๋ฆ๋ก๋ ๋๊ธฐ์ค์ผ ๋๋๊ฐ ๋ค๋ฅธ ๊ณณ์ ๋นํด ์ฝํจ.
# ๋๋ก๋ง๋ค ๋ค๋ฅธ ๊ฒ์ ๋ณผ ์ ์์.
# legend
#***************************************#ใ
ใ
tr = 10:19; te = 19:20
par(mar=c(0,0,0,0))
plot(0,0,xlim=c(0,25),ylim=c(0,2),xaxt="n",yaxt="n",bty="n",xlab="",ylab="",type="n")
arrows(2.5,0.5,22,0.5,0.05)
points(tr, train*0+0.5, pch=19, col="blue")
points(te, test*0+0.5, pch=19, col="red")
text(24,0.5,"Year")
text(10,0.6,"Train data",col="blue")
text(20,0.6,"Test data",col="red")
#***************************************#
# ํ๊ท ์ ์ธ ์๊ณ์ด(train dataset)
#***************************************#
train = AIR[AIR$week < '2020-01-06',]
test = AIR[AIR$week >= '2020-01-06',]
#***************************************#
c <- train %>% group_by(week) %>% summarise(mean(NO2))
tts <- ts(c$`mean(NO2)`, start = c(2009,12,28), frequency = 52)
plot(tts, ylim = c(0,0.07))
#***************************************#
f = function(sgg){
ts1 <- ts(sgg, start = c(2009,12,28), freq=52)
ggplot(ts1, as.numeric = FALSE) +
geom_line() +
stat_peaks(colour = "red") + #๊ทน๋์ ๋นจ๊ฐ์ ์ผ๋ก ํ์
stat_valleys(colour = "blue")+#๊ทน์์ ํ๋์ ์ผ๋ก ํ์
ggthemes::theme_hc()
}
f(c$`mean(NO2)`)
#***************************************#
#sgg1 == ๊ฐ๋จ๊ตฌ
#write.table(gangnam, file = "gangnam.txt", row.names = F, quote = F) #sas์์ ์ธ txtํ์ผ ์์ฑ
k <- ts(sgg1$NO2, start = c(2009,12), end = c(2020, 11), freq = 52)
diff.k <- diff(k, lag = 12) #์ฐจ๋ถ
par(mfrow = c(1,2))
plot(sgg1$NO2, type="l", main="sgg1::NO2 2010๋
~ 2020๋
", axes=F)
axis(2,las=2)
plot(diff.k, main="sgg1::NO2 ์ฐจ๋ถ", axes=F)
axis(2,las=2); axis(1,las=1)
#์๊ณ์ด ๋ถํด
stl.run <- stl(k, s.window = "periodic")
plot(stl.run)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.