content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# This file is part of combination of R-files that contain code to perform a simulation study
# The code was used for a simulation study for which the results are presented in a currently unpublished manuscript
########################################
#### Functions to estimate models ####
#### Last modified: Feb 8 2018 ####
#### Author: M van Smeden ####
########################################
# BEGIN CODE ---------------------------------------
tryCatch.W.E <- function(expr){
W <- NULL
w.handler <- function(w){
W <<- w
invokeRestart("muffleWarning")}
list(value = withCallingHandlers(tryCatch(expr, error = function(e) e), warning = w.handler),warning = W)
}
BackwardFR <- function (object, sls,...){
working <- object
istep <- 0
scope <- attr(terms(working), "term.labels")
while ( working$df >= 1) {
istep <- istep + 1
mat <- drop1(working)
if (all(mat[, 3] < sls))
break
inscope <- match(scope, rownames(mat))
inscope <- inscope[!is.na(inscope)]
removal <- rownames(mat)[mat[, 3] == max(mat[inscope, 3])]
newform = as.formula(paste("~.-", removal))
if (working$df == 1 | working$df == mat[mat[, 3] == max(mat[,3]), 2])
working <- update(working, formula = newform, pl = FALSE,data=object$data)
else working <- update(working, formula = newform,data=object$data)
}
return(working)
}
penal.cv.lrm2 <- function(SIMxy,nfolds = nrow(SIMxy),type.measure="deviance",nlambda.ridge=200){
OUT <- list("lasso"=list(),"ridge"=list())
ridge.defaultlambda <- glmnet(x=as.matrix(SIMxy[,-which(colnames(SIMxy)=="y")]),y=factor(SIMxy[,"y"]),family="binomial",alpha=0)$lambda
steps <- log(ridge.defaultlambda[2])-log(ridge.defaultlambda[1])
OUT$ridge$default.lambda <- c("min"=min(ridge.defaultlambda),"max"=max( ridge.defaultlambda),"length"=length(ridge.defaultlambda))
OUT$ridge$lambda.sequence <- exp(seq(from=log(max(ridge.defaultlambda)),by=steps, length.out=nlambda.ridge))
OUT$ridge$cv <- tryCatch.W.E(cv.glmnet(x=as.matrix(SIMxy[,-which(colnames(SIMxy)=="y")]),
y=factor(SIMxy[,"y"]),family="binomial",
nfolds = nfolds, type.measure=type.measure,
alpha = 0,lambda=OUT$ridge$lambda.sequence))
OUT$lasso$cv <- tryCatch.W.E(cv.glmnet(x=as.matrix(SIMxy[,-which(colnames(SIMxy)=="y")]),
y=factor(SIMxy[,"y"]),family="binomial",
nfolds = nfolds, type.measure=type.measure,
alpha = 1))
OUT$lasso$default.lambda <- c("min"=min(OUT$lasso$cv$value$lambda),"max"=max(OUT$lasso$cv$value$lambda),"length"=length(OUT$lasso$cv$value$lambda))
OUT$lasso$fit <- tryCatch.W.E(glmnet(x=as.matrix(SIMxy[,1:(ncol(SIMxy)-1)]),y=factor(SIMxy$y),
family="binomial",lambda=OUT$lasso$cv$value$lambda.min, alpha = 1))
OUT$ridge$fit <- tryCatch.W.E(glmnet(x=as.matrix(SIMxy[,1:(ncol(SIMxy)-1)]),y=factor(SIMxy$y),
family="binomial",lambda=OUT$ridge$cv$value$lambda.min, alpha = 0))
OUT$lasso$boundary.lambda <- c("default.min"=as.numeric(OUT$lasso$default.lambda["min"])==OUT$lasso$cv$value$lambda.min,
"default.max"=as.numeric(OUT$lasso$default.lambda["max"])==OUT$lasso$cv$value$lambda.min)
OUT$ridge$boundary.lambda <- c("default.min"=as.numeric(OUT$ridge$default.lambda["min"])>=OUT$ridge$cv$value$lambda.min,
"default.max"=as.numeric(OUT$ridge$default.lambda["max"])==OUT$ridge$cv$value$lambda.min,
"new.min" = as.numeric(min(OUT$ridge$lambda.sequence))==OUT$ridge$cv$value$lambda.min)
OUT
}
firth.lrm.plusbw <- function(SIMxy, simlist){
formu <- as.formula(paste("y~", paste(colnames(SIMxy)[-which(colnames(SIMxy)=="y")],collapse="+"),sep=""))
TEMPF <- list(FirthTRUE=tryCatch.W.E(logistf(formu,data=SIMxy,firth = TRUE,alpha = simlist$alpha, dataout = T)),
FirthFALSE=tryCatch.W.E(logistf(formu,data=SIMxy,firth = FALSE,alpha = simlist$alpha, dataout = T)))
if(!is.null(simlist$bw.p)){
for(bwiter in 1:length(simlist$bw.p)){
TEMPF[[paste("FirthTRUEbw",simlist$bw.p[bwiter],sep="")]] <- tryCatch.W.E(BackwardFR(TEMPF$FirthTRUE$value,sls=simlist$bw.p[bwiter], simlist=simlist,firth=TRUE))
TEMPF[[paste("FirthFALSEbw",simlist$bw.p[bwiter],sep="")]] <- tryCatch.W.E(BackwardFR(TEMPF$FirthFALSE$value,sls=simlist$bw.p[bwiter], simlist=simlist,firth=FALSE))
}
}
TEMPF
}
heuristicshrink.lrm <- function(SIMxy){
formu <- as.formula(paste("y~", paste(colnames(SIMxy)[-which(colnames(SIMxy)=="y")],collapse="+"),sep=""))
SIMx <- SIMxy[,-ncol(SIMxy)]
y <- SIMxy[,ncol(SIMxy)]
TEMP.heur <- list()
unpen.fit <- glm(formu,data=SIMxy,family=binomial(link = "logit"))
TEMP.heur$int.re.est <- tryCatch.W.E(logistf(formu,data=SIMxy,firth = FALSE,alpha = simlist$alpha, dataout = T))
chisq <-unpen.fit$null.deviance-unpen.fit$deviance
s <- (chisq-TEMP.heur$int.re.est$value$df)/chisq
if(chisq!=0){
A <- TEMP.heur$int.re.est$value$coefficients["(Intercept)"]
B <- TEMP.heur$int.re.est$value$coefficients[-which(names(TEMP.heur$int.re.est$value$coefficients)=="(Intercept)")]
TEMP.heur$int.re.est$value$coefficients[-which(names(TEMP.heur$int.re.est$value$coefficients)=="(Intercept)")] <- B*s
TEMP.heur$int.approx <- TEMP.heur$int.re.est
TEMP.heur$int.approx$value$coefficients["(Intercept)"] <- (1-s)*mean(SIMxy$y)+s*A
off <- data.matrix(SIMx)%*%data.matrix(B*s)
TEMP.heur$int.re.est$value$coefficients["(Intercept)"] <- coefficients(glm(SIMxy$y~offset(off), family = binomial(link = "logit")))["(Intercept)"]
}else{TEMP.heur$int.approx <- TEMP.heur$int.re.est}
list(OUT=TEMP.heur,hs.lambda=s)
}
# END CODE ---------------------------------------------
| /sim_LRMs.R | no_license | PyramidAGI/Beyond-EPV | R | false | false | 6,362 | r | # This file is part of combination of R-files that contain code to perform a simulation study
# The code was used for a simulation study for which the results are presented in a currently unpublished manuscript
########################################
#### Functions to estimate models ####
#### Last modified: Feb 8 2018 ####
#### Author: M van Smeden ####
########################################
# BEGIN CODE ---------------------------------------
tryCatch.W.E <- function(expr){
W <- NULL
w.handler <- function(w){
W <<- w
invokeRestart("muffleWarning")}
list(value = withCallingHandlers(tryCatch(expr, error = function(e) e), warning = w.handler),warning = W)
}
BackwardFR <- function (object, sls,...){
working <- object
istep <- 0
scope <- attr(terms(working), "term.labels")
while ( working$df >= 1) {
istep <- istep + 1
mat <- drop1(working)
if (all(mat[, 3] < sls))
break
inscope <- match(scope, rownames(mat))
inscope <- inscope[!is.na(inscope)]
removal <- rownames(mat)[mat[, 3] == max(mat[inscope, 3])]
newform = as.formula(paste("~.-", removal))
if (working$df == 1 | working$df == mat[mat[, 3] == max(mat[,3]), 2])
working <- update(working, formula = newform, pl = FALSE,data=object$data)
else working <- update(working, formula = newform,data=object$data)
}
return(working)
}
penal.cv.lrm2 <- function(SIMxy,nfolds = nrow(SIMxy),type.measure="deviance",nlambda.ridge=200){
OUT <- list("lasso"=list(),"ridge"=list())
ridge.defaultlambda <- glmnet(x=as.matrix(SIMxy[,-which(colnames(SIMxy)=="y")]),y=factor(SIMxy[,"y"]),family="binomial",alpha=0)$lambda
steps <- log(ridge.defaultlambda[2])-log(ridge.defaultlambda[1])
OUT$ridge$default.lambda <- c("min"=min(ridge.defaultlambda),"max"=max( ridge.defaultlambda),"length"=length(ridge.defaultlambda))
OUT$ridge$lambda.sequence <- exp(seq(from=log(max(ridge.defaultlambda)),by=steps, length.out=nlambda.ridge))
OUT$ridge$cv <- tryCatch.W.E(cv.glmnet(x=as.matrix(SIMxy[,-which(colnames(SIMxy)=="y")]),
y=factor(SIMxy[,"y"]),family="binomial",
nfolds = nfolds, type.measure=type.measure,
alpha = 0,lambda=OUT$ridge$lambda.sequence))
OUT$lasso$cv <- tryCatch.W.E(cv.glmnet(x=as.matrix(SIMxy[,-which(colnames(SIMxy)=="y")]),
y=factor(SIMxy[,"y"]),family="binomial",
nfolds = nfolds, type.measure=type.measure,
alpha = 1))
OUT$lasso$default.lambda <- c("min"=min(OUT$lasso$cv$value$lambda),"max"=max(OUT$lasso$cv$value$lambda),"length"=length(OUT$lasso$cv$value$lambda))
OUT$lasso$fit <- tryCatch.W.E(glmnet(x=as.matrix(SIMxy[,1:(ncol(SIMxy)-1)]),y=factor(SIMxy$y),
family="binomial",lambda=OUT$lasso$cv$value$lambda.min, alpha = 1))
OUT$ridge$fit <- tryCatch.W.E(glmnet(x=as.matrix(SIMxy[,1:(ncol(SIMxy)-1)]),y=factor(SIMxy$y),
family="binomial",lambda=OUT$ridge$cv$value$lambda.min, alpha = 0))
OUT$lasso$boundary.lambda <- c("default.min"=as.numeric(OUT$lasso$default.lambda["min"])==OUT$lasso$cv$value$lambda.min,
"default.max"=as.numeric(OUT$lasso$default.lambda["max"])==OUT$lasso$cv$value$lambda.min)
OUT$ridge$boundary.lambda <- c("default.min"=as.numeric(OUT$ridge$default.lambda["min"])>=OUT$ridge$cv$value$lambda.min,
"default.max"=as.numeric(OUT$ridge$default.lambda["max"])==OUT$ridge$cv$value$lambda.min,
"new.min" = as.numeric(min(OUT$ridge$lambda.sequence))==OUT$ridge$cv$value$lambda.min)
OUT
}
firth.lrm.plusbw <- function(SIMxy, simlist){
formu <- as.formula(paste("y~", paste(colnames(SIMxy)[-which(colnames(SIMxy)=="y")],collapse="+"),sep=""))
TEMPF <- list(FirthTRUE=tryCatch.W.E(logistf(formu,data=SIMxy,firth = TRUE,alpha = simlist$alpha, dataout = T)),
FirthFALSE=tryCatch.W.E(logistf(formu,data=SIMxy,firth = FALSE,alpha = simlist$alpha, dataout = T)))
if(!is.null(simlist$bw.p)){
for(bwiter in 1:length(simlist$bw.p)){
TEMPF[[paste("FirthTRUEbw",simlist$bw.p[bwiter],sep="")]] <- tryCatch.W.E(BackwardFR(TEMPF$FirthTRUE$value,sls=simlist$bw.p[bwiter], simlist=simlist,firth=TRUE))
TEMPF[[paste("FirthFALSEbw",simlist$bw.p[bwiter],sep="")]] <- tryCatch.W.E(BackwardFR(TEMPF$FirthFALSE$value,sls=simlist$bw.p[bwiter], simlist=simlist,firth=FALSE))
}
}
TEMPF
}
heuristicshrink.lrm <- function(SIMxy){
formu <- as.formula(paste("y~", paste(colnames(SIMxy)[-which(colnames(SIMxy)=="y")],collapse="+"),sep=""))
SIMx <- SIMxy[,-ncol(SIMxy)]
y <- SIMxy[,ncol(SIMxy)]
TEMP.heur <- list()
unpen.fit <- glm(formu,data=SIMxy,family=binomial(link = "logit"))
TEMP.heur$int.re.est <- tryCatch.W.E(logistf(formu,data=SIMxy,firth = FALSE,alpha = simlist$alpha, dataout = T))
chisq <-unpen.fit$null.deviance-unpen.fit$deviance
s <- (chisq-TEMP.heur$int.re.est$value$df)/chisq
if(chisq!=0){
A <- TEMP.heur$int.re.est$value$coefficients["(Intercept)"]
B <- TEMP.heur$int.re.est$value$coefficients[-which(names(TEMP.heur$int.re.est$value$coefficients)=="(Intercept)")]
TEMP.heur$int.re.est$value$coefficients[-which(names(TEMP.heur$int.re.est$value$coefficients)=="(Intercept)")] <- B*s
TEMP.heur$int.approx <- TEMP.heur$int.re.est
TEMP.heur$int.approx$value$coefficients["(Intercept)"] <- (1-s)*mean(SIMxy$y)+s*A
off <- data.matrix(SIMx)%*%data.matrix(B*s)
TEMP.heur$int.re.est$value$coefficients["(Intercept)"] <- coefficients(glm(SIMxy$y~offset(off), family = binomial(link = "logit")))["(Intercept)"]
}else{TEMP.heur$int.approx <- TEMP.heur$int.re.est}
list(OUT=TEMP.heur,hs.lambda=s)
}
# END CODE ---------------------------------------------
|
###Creating code for larvae (I want someestimate of mortaltiy for the entire cohort)
##What data do we have for growth?
##Look at the script on "modeling zoea growth" for details but the function is as below
larvae.size=function(t){
if(t<3.5){
cl=0.49 #runif(1, 0.44, 0.54)
}
else if(t<7.5 & t>=3.5){
cl= 0.75 #runif(1, 0.72, 0.77)
}
else if(t<9.5 & t>=7.5){
cl=0.83 #runif(1, 0.79, 0.87)
}
else if(t<12 & t>=9.5){
cl=1.02 #runif(1, 0.98, 1.06)
}
else if(t<=15.5 & t>=12){
cl=1.75 #runif(1, 1.69, 1.81)
}
else print('not larva')
}
t=seq(from =0, to =15.5, by =1)
cl=rep(0, length(t))
for(i in 1:length(t))
{
cl[i]=larvae.size(t[i])
}
plot(cl~t)
##Add in mortality function
mr=0.12 #Got mortality rates from data using 'Calculating mortality rates for larvae' script
lr=1
mortality=function(l){
m=mr*(lr/l)
}
###Let's create the actual model forloop for total mortality
numbers=function(t, i) {
num=n[i]*exp(-(mortality(larvae.size(t)))*delta.t) ###Removed fishing mortrality
}
##Hmm.. for larvae, time span is limited to 15.5 days and in this case, maybe want higher resolution time point (like 0.5 days?)
delta.t=0.5 ##Resolution between time points. In case you're wondering, your final estimates do not change whether you use 0.5 or 1 for delta.t (as long as those intervals are in the same unit as mortality (per day))
t=seq(from=0, to=15.5, by=delta.t)
n=rep(0, length(t))
m=rep(0, length(t))
s=rep(0, length(t))
n[1]=1 ##Starting population size
for(i in 1:length(t))
{
n[i+1]=numbers(t=t[i], i=i)
s[i+1]=larvae.size(t[i])
m[i+1]=mortality(larvae.size(t[i]))
}
t.1=c(t, max(t)+1)
par(mfrow=(c(2,2)))
plot(s~t.1, ylab='Crab length (mm)', xlab='Days', cex.lab=1.5, cex.axis=1.5)
text(1, 1.6, labels=c('a'), cex=2 )
plot(m~t.1, ylab='Mortality rate', xlab='Days', cex.lab=1.5, cex.axis=1.5)
text(1, 0.22, labels=c('b'), cex=2 )
plot(n~t.1, ylab='Cohort proportion survival', xlab='Days', cex.lab=1.5, cex.axis=1.5)
text(1, 0.9, labels=c('c'), cex=2 )
larval.data=data.frame(n,s,m)
#n~0.09 after larvae (9% survival is decent) | /SFP working draft code for larvae.R | no_license | gng-ucdavis/SFP-Fisheries-Project | R | false | false | 2,073 | r | ###Creating code for larvae (I want someestimate of mortaltiy for the entire cohort)
##What data do we have for growth?
##Look at the script on "modeling zoea growth" for details but the function is as below
larvae.size=function(t){
if(t<3.5){
cl=0.49 #runif(1, 0.44, 0.54)
}
else if(t<7.5 & t>=3.5){
cl= 0.75 #runif(1, 0.72, 0.77)
}
else if(t<9.5 & t>=7.5){
cl=0.83 #runif(1, 0.79, 0.87)
}
else if(t<12 & t>=9.5){
cl=1.02 #runif(1, 0.98, 1.06)
}
else if(t<=15.5 & t>=12){
cl=1.75 #runif(1, 1.69, 1.81)
}
else print('not larva')
}
t=seq(from =0, to =15.5, by =1)
cl=rep(0, length(t))
for(i in 1:length(t))
{
cl[i]=larvae.size(t[i])
}
plot(cl~t)
##Add in mortality function
mr=0.12 #Got mortality rates from data using 'Calculating mortality rates for larvae' script
lr=1
mortality=function(l){
m=mr*(lr/l)
}
###Let's create the actual model forloop for total mortality
numbers=function(t, i) {
num=n[i]*exp(-(mortality(larvae.size(t)))*delta.t) ###Removed fishing mortrality
}
##Hmm.. for larvae, time span is limited to 15.5 days and in this case, maybe want higher resolution time point (like 0.5 days?)
delta.t=0.5 ##Resolution between time points. In case you're wondering, your final estimates do not change whether you use 0.5 or 1 for delta.t (as long as those intervals are in the same unit as mortality (per day))
t=seq(from=0, to=15.5, by=delta.t)
n=rep(0, length(t))
m=rep(0, length(t))
s=rep(0, length(t))
n[1]=1 ##Starting population size
for(i in 1:length(t))
{
n[i+1]=numbers(t=t[i], i=i)
s[i+1]=larvae.size(t[i])
m[i+1]=mortality(larvae.size(t[i]))
}
t.1=c(t, max(t)+1)
par(mfrow=(c(2,2)))
plot(s~t.1, ylab='Crab length (mm)', xlab='Days', cex.lab=1.5, cex.axis=1.5)
text(1, 1.6, labels=c('a'), cex=2 )
plot(m~t.1, ylab='Mortality rate', xlab='Days', cex.lab=1.5, cex.axis=1.5)
text(1, 0.22, labels=c('b'), cex=2 )
plot(n~t.1, ylab='Cohort proportion survival', xlab='Days', cex.lab=1.5, cex.axis=1.5)
text(1, 0.9, labels=c('c'), cex=2 )
larval.data=data.frame(n,s,m)
#n~0.09 after larvae (9% survival is decent) |
library(arules)
library(lme4)
library(mtk)
# R Data Type
##### R objects
# If you overwrite something, there is no warning or error!
# Be careful of what you have named your objects,
# as it might be overwritten inadvertently
#-------------------- Numeric--------------------
a<-7^3
class(a)
# Should get the answer as 'numeric'
mode(a)
# same answer!
?mode
?class
## R basic data types
# 1. Numeric
# is the basic data type in R
# Let us 'c'
# c() or concatenate is a very powerful operator in R
num_var <-c(1,2,3,4,3.5,7.82)
num_var <-c(1,2,3,4,3.5,7.82,"asda")
class(num_var)
mode(num_var)
n1 <-10
n2 <-22
n3 <- c(n1,n2)
n3
mode(num_var)
class(num_var)
# ----------------- 2. Integer -----------------
1:5
intvar <-c(1:100)
mode(intvar)
class(intvar)
intar <- seq(1,100,by=3)
help(seq)
head(intar,10)
tail(intar)
?head
help(head)
# ---------------- 3. Character ---------------------
# 3. Character: character is the third data type
mystring<-"Logistic"
mystring<-'Logistic'
Mystring
# extending the same array
mystring1<-c(mystring, "Multinomial","MOnday")
?rep
rep(2,20)
mystring<-rep(mystring,3)
mynum.rep <- rep(33,5)
num.rep <- rep(seq(2,10, by=3),5)
mode(mystring)
class(mystring)
d <- 1:10
# Categorical variables
cities<-c('Bangalore')
class(cities)
cities<-c(cities,'Mumbai')
cities<-c(cities,'Delhi')
cities <- c("Bangalore","Mumbai","Delhi","Chennai")
cities<-rep(cities, 20)
cities
?rep
factor_cities<-as.factor(x=cities)
mode(factor_cities)
levels(factor_cities)
as.numeric(factor_cities)
class(factor_cities)
mode(factor_cities)
mode(cities)
gender <- c("F","M","M","F","F","M","F")
class(gender)
mode(gender)
gender1 <- as.factor(gender)
gender2 <- as.numeric(gender1)
gender3 <- as.numeric(gender)
# Just change it to numeric
x<-as.numeric(factor_cities)
x
class(x)
x1 <- as.character(factor_cities)
x1
class(x1)
## and for the sake of completion
#---------------------- 4.logical -----------------------------
# 4. Logical
y <- 5>6
y
y1 <-5>2
y1
logic_var <- as.logical(c(1, 0, 2, 5, 0, 0))
## try it yourself
class(logic_var)
mode(logic_var)
data<-c("M","M","F","F","M","M","M")
a <- c("Ram",1,3,1)
a
class(a)
mode(a)
a1 <- as.numeric(a)
is.na(a)
is.na(a1)
d1 <- c("High",3,5)
d2 <- as.numeric(d1)
a[2]
a2 <- a1[!is.na(a1)]
#------------------------- 5.complex ---------------
# 5. Complex
cmplx_var <- 2 + 6i
class(cmplx_var)
sqrt(???1+0i)
## ------------------------- R Objects --------------------------------------
## ----- R objects -Vector, matrix, array,list, and data frame
# --------------------- vector ---
v.num <- 50:80 # create
length(v.num)
v.num #print
v.num[5] #access
ind <-10:15
v.num[ind]# access
v.num[10:15]
v.num1 <- v.num *2
v.num[10]
v.num1[10]
v.num1[2] <-100000
class(v.num1)
char <- LETTERS
char1 <- letters
letters[c(12,13)]
v.char <- LETTERS # create character vectors using existing vectors of english alphabets
v.char1 <- letters[2:7] # small alphabets with selection
c1 <- c("TT","MEL","Sd")
print(v.char1) #print
logi <- c(T,T,F,F)
logi[3]
# ------------------- matrix ---------------------------
## define a matrix
v <-seq(1,100,by=5) # vector
v[10]
length(v)
m1<-matrix(v,nrow=10)
m1[,1]
m1[,2]
m1[5,]
m1 #print
m1[6,1]# access elements
m1[4,2]
m11<-matrix(v,nrow=10,byrow = TRUE)
m1<-matrix(v,nrow=5,byrow = TRUE)
m2<-matrix(v,ncol =5)
m2<-matrix(ncol =5,v)
m2
class(m2)
mode(m2)
#character column
letters
LETTERS
letters <- c("a","b")
let = letters
letters <- let
letters[1:20]
m.char <- matrix(letters[1:20],5,byrow = TRUE)
m.char[,3]
m.char[5,]
# logical matrix
v.logic <- c(T,T,T,F,F,T,F,F,T,T)
v2 <- matrix(v.logic,nrow = 2,byrow = T)
m2<-matrix(v,nrow=5,byrow=T) # assign value first by rows
m2[,1] # first column
m2[5,] # select full rows
# Matrix Operations
m2 <-m2*100
m2
m2.tran <- t(m2)
matrix(v.logic,byrow = T)
class(x)
dim(m2)
?dim
v
# convert a vector into Matrix using dim function
dim(v) <- c(10,2)
v
### ------------------- Array --------------------
## Array: multi dimensional object
my.array <- array(1:48, dim=c(4,4,3))
my.array[2,,]
my.array[,3,]
my.array[,,3]
my.array[,,2] # access second matrix of 3,4 dimension
my.array[2,,] # second row of both the matrices
my.array[,2,] # second column of both the matrices
my.array[,2,]
letters[-c(16,18)]#To remove particular elements
### ------------------List-----------------------------
## define a list
list_var <- list(name="Fred", mynumbers=v, my=my.array, age=5.3)
## access member of a list
list_var[2] # copy of a member
list_var[[2]]
list_var[[1]] # directly the elements
list_var[[2]]
list_var$my # access member
list_var[1]
list_var[[1]]
list_var$age
list_var$age
mm <-list_var[[2]][,2]
list_var$name
list_var$
vv <-list_var$my
list_var$mynumbers[,2]
list_var$my[,2,]
class(list_var)
mode(list_var)
###----------------- Data Frame -----------------------------
## define a data frame
# Data frame is equivalent to SAS dataset.
n = c(2, 3, 5)
s = c("aa", "bb", "cc")
b = c(TRUE, FALSE, TRUE)
# create data frame from 3 vectors
df = data.frame(n, s, b) # df is a data frame
View(df)
df$s
class(df$s)
mode(df$s)
df1 = data.frame(n, s, b,stringsAsFactors =F)
class(df1$s)
class(df$b)
df1[3,]
df[,2]
df.mat <-as.matrix(df)
df.mat.2 <- data.frame(df.mat)
class(df.mat.2$b)
class(df$b)
class(df.mat.2$b)
# Convert Factor to numeric
df.mat.2$b1 <- as.numeric(df.mat.2$b)
"
- Interger
- numeric
- Character
- Logical
- Complex
"
View(df) # view data frame
names(df) # column names
nrow(df) # number of rows
ncol(df) # number of columns
names(df) <- c("Num","Char","logical")
names(df)
View(df) # view data frame
class(df$Char)
mode(df$Char)
df$logical
df$Char
d <- as.Date("2013-01-10")
class(d)
mode(d)
install.packages("datasets")
library(datasets)
require(datasets)
library(help=datasets)
mdf <-mtcars
?head
head(mdf)
head(mdf,3)
nrow(mtcars)
mtcars[2,c(3,4)]
mtcars[c(21,31,11),c(2,3)]
mtcars[c(21,31,11),c("disp","hp")]
# built in data frame
summary(mtcars)
str(mtcars)
# print of data frame
head(mtcars) # defalut first 6 rows
tail(mtcars) # last 6 rows
head(mtcars,12) # custom number of rows
m.df <- mtcars[c(10,15),c(1,3)]
mtcars[12,c(2,5,7)] # by index
# Excluding column 3
mat.car<- mtcars[, -c(3)]
View(mtcars)
var1 <-mtcars$mpg
mtcars[10:15,c("wt","am")]
## -------------------Functions---------------------------
| /Class 2 DataTypesAndObjects.R | no_license | ishaanhumble/AMMA-Class | R | false | false | 6,434 | r |
library(arules)
library(lme4)
library(mtk)
# R Data Type
##### R objects
# If you overwrite something, there is no warning or error!
# Be careful of what you have named your objects,
# as it might be overwritten inadvertently
#-------------------- Numeric--------------------
a<-7^3
class(a)
# Should get the answer as 'numeric'
mode(a)
# same answer!
?mode
?class
## R basic data types
# 1. Numeric
# is the basic data type in R
# Let us 'c'
# c() or concatenate is a very powerful operator in R
num_var <-c(1,2,3,4,3.5,7.82)
num_var <-c(1,2,3,4,3.5,7.82,"asda")
class(num_var)
mode(num_var)
n1 <-10
n2 <-22
n3 <- c(n1,n2)
n3
mode(num_var)
class(num_var)
# ----------------- 2. Integer -----------------
1:5
intvar <-c(1:100)
mode(intvar)
class(intvar)
intar <- seq(1,100,by=3)
help(seq)
head(intar,10)
tail(intar)
?head
help(head)
# ---------------- 3. Character ---------------------
# 3. Character: character is the third data type
mystring<-"Logistic"
mystring<-'Logistic'
Mystring
# extending the same array
mystring1<-c(mystring, "Multinomial","MOnday")
?rep
rep(2,20)
mystring<-rep(mystring,3)
mynum.rep <- rep(33,5)
num.rep <- rep(seq(2,10, by=3),5)
mode(mystring)
class(mystring)
d <- 1:10
# Categorical variables
cities<-c('Bangalore')
class(cities)
cities<-c(cities,'Mumbai')
cities<-c(cities,'Delhi')
cities <- c("Bangalore","Mumbai","Delhi","Chennai")
cities<-rep(cities, 20)
cities
?rep
factor_cities<-as.factor(x=cities)
mode(factor_cities)
levels(factor_cities)
as.numeric(factor_cities)
class(factor_cities)
mode(factor_cities)
mode(cities)
gender <- c("F","M","M","F","F","M","F")
class(gender)
mode(gender)
gender1 <- as.factor(gender)
gender2 <- as.numeric(gender1)
gender3 <- as.numeric(gender)
# Just change it to numeric
x<-as.numeric(factor_cities)
x
class(x)
x1 <- as.character(factor_cities)
x1
class(x1)
## and for the sake of completion
#---------------------- 4.logical -----------------------------
# 4. Logical
y <- 5>6
y
y1 <-5>2
y1
logic_var <- as.logical(c(1, 0, 2, 5, 0, 0))
## try it yourself
class(logic_var)
mode(logic_var)
data<-c("M","M","F","F","M","M","M")
a <- c("Ram",1,3,1)
a
class(a)
mode(a)
a1 <- as.numeric(a)
is.na(a)
is.na(a1)
d1 <- c("High",3,5)
d2 <- as.numeric(d1)
a[2]
a2 <- a1[!is.na(a1)]
#------------------------- 5.complex ---------------
# 5. Complex
cmplx_var <- 2 + 6i
class(cmplx_var)
sqrt(???1+0i)
## ------------------------- R Objects --------------------------------------
## ----- R objects -Vector, matrix, array,list, and data frame
# --------------------- vector ---
v.num <- 50:80 # create
length(v.num)
v.num #print
v.num[5] #access
ind <-10:15
v.num[ind]# access
v.num[10:15]
v.num1 <- v.num *2
v.num[10]
v.num1[10]
v.num1[2] <-100000
class(v.num1)
char <- LETTERS
char1 <- letters
letters[c(12,13)]
v.char <- LETTERS # create character vectors using existing vectors of english alphabets
v.char1 <- letters[2:7] # small alphabets with selection
c1 <- c("TT","MEL","Sd")
print(v.char1) #print
logi <- c(T,T,F,F)
logi[3]
# ------------------- matrix ---------------------------
## define a matrix
v <-seq(1,100,by=5) # vector
v[10]
length(v)
m1<-matrix(v,nrow=10)
m1[,1]
m1[,2]
m1[5,]
m1 #print
m1[6,1]# access elements
m1[4,2]
m11<-matrix(v,nrow=10,byrow = TRUE)
m1<-matrix(v,nrow=5,byrow = TRUE)
m2<-matrix(v,ncol =5)
m2<-matrix(ncol =5,v)
m2
class(m2)
mode(m2)
#character column
letters
LETTERS
letters <- c("a","b")
let = letters
letters <- let
letters[1:20]
m.char <- matrix(letters[1:20],5,byrow = TRUE)
m.char[,3]
m.char[5,]
# logical matrix
v.logic <- c(T,T,T,F,F,T,F,F,T,T)
v2 <- matrix(v.logic,nrow = 2,byrow = T)
m2<-matrix(v,nrow=5,byrow=T) # assign value first by rows
m2[,1] # first column
m2[5,] # select full rows
# Matrix Operations
m2 <-m2*100
m2
m2.tran <- t(m2)
matrix(v.logic,byrow = T)
class(x)
dim(m2)
?dim
v
# convert a vector into Matrix using dim function
dim(v) <- c(10,2)
v
### ------------------- Array --------------------
## Array: multi dimensional object
my.array <- array(1:48, dim=c(4,4,3))
my.array[2,,]
my.array[,3,]
my.array[,,3]
my.array[,,2] # access second matrix of 3,4 dimension
my.array[2,,] # second row of both the matrices
my.array[,2,] # second column of both the matrices
my.array[,2,]
letters[-c(16,18)]#To remove particular elements
### ------------------List-----------------------------
## define a list
list_var <- list(name="Fred", mynumbers=v, my=my.array, age=5.3)
## access member of a list
list_var[2] # copy of a member
list_var[[2]]
list_var[[1]] # directly the elements
list_var[[2]]
list_var$my # access member
list_var[1]
list_var[[1]]
list_var$age
list_var$age
mm <-list_var[[2]][,2]
list_var$name
list_var$
vv <-list_var$my
list_var$mynumbers[,2]
list_var$my[,2,]
class(list_var)
mode(list_var)
###----------------- Data Frame -----------------------------
## define a data frame
# Data frame is equivalent to SAS dataset.
n = c(2, 3, 5)
s = c("aa", "bb", "cc")
b = c(TRUE, FALSE, TRUE)
# create data frame from 3 vectors
df = data.frame(n, s, b) # df is a data frame
View(df)
df$s
class(df$s)
mode(df$s)
df1 = data.frame(n, s, b,stringsAsFactors =F)
class(df1$s)
class(df$b)
df1[3,]
df[,2]
df.mat <-as.matrix(df)
df.mat.2 <- data.frame(df.mat)
class(df.mat.2$b)
class(df$b)
class(df.mat.2$b)
# Convert Factor to numeric
df.mat.2$b1 <- as.numeric(df.mat.2$b)
"
- Interger
- numeric
- Character
- Logical
- Complex
"
View(df) # view data frame
names(df) # column names
nrow(df) # number of rows
ncol(df) # number of columns
names(df) <- c("Num","Char","logical")
names(df)
View(df) # view data frame
class(df$Char)
mode(df$Char)
df$logical
df$Char
d <- as.Date("2013-01-10")
class(d)
mode(d)
install.packages("datasets")
library(datasets)
require(datasets)
library(help=datasets)
mdf <-mtcars
?head
head(mdf)
head(mdf,3)
nrow(mtcars)
mtcars[2,c(3,4)]
mtcars[c(21,31,11),c(2,3)]
mtcars[c(21,31,11),c("disp","hp")]
# built in data frame
summary(mtcars)
str(mtcars)
# print of data frame
head(mtcars) # defalut first 6 rows
tail(mtcars) # last 6 rows
head(mtcars,12) # custom number of rows
m.df <- mtcars[c(10,15),c(1,3)]
mtcars[12,c(2,5,7)] # by index
# Excluding column 3
mat.car<- mtcars[, -c(3)]
View(mtcars)
var1 <-mtcars$mpg
mtcars[10:15,c("wt","am")]
## -------------------Functions---------------------------
|
testlist <- list(A = structure(c(2.7402879724605e+176, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613122122-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 322 | r | testlist <- list(A = structure(c(2.7402879724605e+176, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
#' DeclareDesign package
#'
#' The four main types of functions are to declare a step, combine steps into designs,
#' and manipulate designs and design templates.
#'
#' @section Design Steps:
#' \describe{
#' \item{\code{\link{declare_population}}}{Population step}
#' \item{\code{\link{declare_potential_outcomes}}}{Potential Outcomes step}
#' \item{\code{\link{declare_sampling}}}{Sampling step}
#' \item{\code{\link{declare_assignment}}}{Assignment step}
#' \item{\code{\link{declare_reveal}}}{Reveal Outcomes step}
#' \item{\code{\link{declare_estimand}}}{Estimand step}
#' \item{\code{\link{declare_estimator}}}{Estimator step}
#' \item{\code{\link{declare_citation}}}{Citation step}
#' }
#'
#' @section Design Objects:
#' \describe{
#' \item{\code{\link{declare_design}}}{Declare a design from steps}
#' \item{\code{\link{draw_data}}}{Simulate the DGP}
#' \item{\code{\link{run_design}}}{Simulate the DGP with estimands/estimators}
#' \item{\code{\link{diagnose_design}}}{Diagnose a design}
#' \item{\code{\link{cite_design}}}{Cite a design}
#' }
#'
#'
#' @section Design Editing:
#' \describe{
#' \item{\code{\link{modify_design}}}{Add, delete or replace a step}
#' \item{\code{\link{redesign}}}{Redeclare local variables within a design (advanced)}
#' }
#'
#'
#' @section Design Templates:
#' \describe{
#' \item{\code{\link{expand_design}}}{Generate Designs from a Template}
#' \item{designs}{See also the \code{designs} package for templates to use}
#' }
#'
#'
#' @docType package
#' @importFrom stats glm lm var vcov sd aggregate anova aov as.formula confint coef df.residual pt qt rbinom rnorm rmultinom update.formula
#' @importFrom utils data capture.output
#' @name DeclareDesign
NULL
.onLoad <- function(libname, pkgname) {
repos = getOption("repos")
repos["declaredesign"] = "https://declaredesign.github.io"
options(repos = repos)
invisible(repos)
}
utils::globalVariables(c("Y", "Z", "N"))
| /R/DeclareDesign.R | no_license | amirmasoudabdol/DeclareDesign | R | false | false | 1,954 | r | #' DeclareDesign package
#'
#' The four main types of functions are to declare a step, combine steps into designs,
#' and manipulate designs and design templates.
#'
#' @section Design Steps:
#' \describe{
#' \item{\code{\link{declare_population}}}{Population step}
#' \item{\code{\link{declare_potential_outcomes}}}{Potential Outcomes step}
#' \item{\code{\link{declare_sampling}}}{Sampling step}
#' \item{\code{\link{declare_assignment}}}{Assignment step}
#' \item{\code{\link{declare_reveal}}}{Reveal Outcomes step}
#' \item{\code{\link{declare_estimand}}}{Estimand step}
#' \item{\code{\link{declare_estimator}}}{Estimator step}
#' \item{\code{\link{declare_citation}}}{Citation step}
#' }
#'
#' @section Design Objects:
#' \describe{
#' \item{\code{\link{declare_design}}}{Declare a design from steps}
#' \item{\code{\link{draw_data}}}{Simulate the DGP}
#' \item{\code{\link{run_design}}}{Simulate the DGP with estimands/estimators}
#' \item{\code{\link{diagnose_design}}}{Diagnose a design}
#' \item{\code{\link{cite_design}}}{Cite a design}
#' }
#'
#'
#' @section Design Editing:
#' \describe{
#' \item{\code{\link{modify_design}}}{Add, delete or replace a step}
#' \item{\code{\link{redesign}}}{Redeclare local variables within a design (advanced)}
#' }
#'
#'
#' @section Design Templates:
#' \describe{
#' \item{\code{\link{expand_design}}}{Generate Designs from a Template}
#' \item{designs}{See also the \code{designs} package for templates to use}
#' }
#'
#'
#' @docType package
#' @importFrom stats glm lm var vcov sd aggregate anova aov as.formula confint coef df.residual pt qt rbinom rnorm rmultinom update.formula
#' @importFrom utils data capture.output
#' @name DeclareDesign
NULL
.onLoad <- function(libname, pkgname) {
repos = getOption("repos")
repos["declaredesign"] = "https://declaredesign.github.io"
options(repos = repos)
invisible(repos)
}
utils::globalVariables(c("Y", "Z", "N"))
|
\name{ctree_control}
\alias{ctree_control}
\title{ Control for Conditional Inference Trees }
\description{
Various parameters that control aspects of the `ctree' fit.
}
\usage{
ctree_control(teststat = c("quadratic", "maximum"),
splitstat = c("quadratic", "maximum"),
splittest = FALSE,
testtype = c("Bonferroni", "MonteCarlo", "Univariate", "Teststatistic"),
pargs = GenzBretz(),
nmax = Inf, alpha = 0.05, mincriterion = 1 - alpha,
logmincriterion = log(mincriterion), minsplit = 20L, minbucket = 7L,
minprob = 0.01, stump = FALSE, lookahead = FALSE, nresample = 9999L,
MIA = FALSE, maxsurrogate = 0L, numsurrogate = FALSE, mtry = Inf, maxdepth = Inf,
multiway = FALSE, splittry = 2L, intersplit = FALSE, majority = FALSE,
caseweights = TRUE, applyfun = NULL, cores = NULL, saveinfo = TRUE)
}
\arguments{
\item{teststat}{ a character specifying the type of the test statistic
to be applied for variable selection. }
\item{splitstat}{ a character specifying the type of the test statistic
to be applied for splitpoint selection. Prior to
version 2.0-0, \code{maximum} was implemented only.}
\item{splittest}{ a logical changing linear (the default \code{FALSE}) to
maximally selected statistics for
variable selection. Currently needs \code{testtype = "MonteCarlo"}.}
\item{testtype}{ a character specifying how to compute the distribution of
the test statistic. The first three options refer to
p-values as criterion, \code{Teststatistic} uses the raw
statistic as criterion. \code{Bonferroni} and
\code{Univariate} relate to p-values from the asymptotic
distribution (adjusted or unadjusted).
Bonferroni-adjusted Monte-Carlo p-values are computed
when both \code{Bonferroni} and \code{MonteCarlo} are
given.}
\item{pargs}{ control parameters for the computation of multivariate
normal probabilities, see \code{\link[mvtnorm]{GenzBretz}}.}
\item{nmax}{ an integer defining the number of bins each variable
is divided into prior to tree building. The default \code{Inf}
does not apply any binning. Highly experimental, use at your own
risk.}
\item{alpha}{ a double, the significance level for variable selection.}
\item{mincriterion}{ the value of the test statistic or 1 - p-value that
must be exceeded in order to implement a split. }
\item{logmincriterion}{ the value of the test statistic or 1 - p-value that
must be exceeded in order to implement a split on
the log-scale. }
\item{minsplit}{ the minimum sum of weights in a node in order to be considered
for splitting. }
\item{minbucket}{ the minimum sum of weights in a terminal node. }
\item{minprob}{ proportion of observations needed to establish a terminal node.}
\item{stump}{ a logical determining whether a stump (a tree with a maximum of three
nodes only) is to be computed. }
\item{lookahead}{ a logical determining whether a split is implemented only
after checking if tests in both daughter nodes can be performed.}
\item{nresample}{ number of permutations for \code{testtype = "MonteCarlo"}.}
\item{MIA}{ a logical determining the treatment of \code{NA} as a category in split,
see Twala et al. (2008).}
\item{maxsurrogate}{ number of surrogate splits to evaluate.}
\item{numsurrogate}{ a logical for backward-compatibility with party. If
\code{TRUE}, only at least ordered variables are considered for surrogate splits.}
\item{mtry}{ number of input variables randomly sampled as candidates
at each node for random forest like algorithms. The default
\code{mtry = Inf} means that no random selection takes place.}
\item{maxdepth}{ maximum depth of the tree. The default \code{maxdepth = Inf}
means that no restrictions are applied to tree sizes.}
\item{multiway}{ a logical indicating if multiway splits for all factor levels
are implemented for unordered factors.}
\item{splittry}{ number of variables that are inspected for admissible splits
if the best split doesn't meet the sample size constraints.}
\item{intersplit}{ a logical indicating if splits in numeric variables
are simply \code{x <= a} (the default) or interpolated
\code{x <= (a + b) / 2}. The latter feature is experimental, see
Galili and Meilijson (2016).}
\item{majority}{ if \code{FALSE}, observations which can't be classified to a
daughter node because of missing information are randomly
assigned (following the node distribution). If \code{TRUE},
they go with the majority (the default in \code{\link[party]{ctree}}).}
\item{caseweights}{ a logical interpreting \code{weights} as case weights.}
\item{applyfun}{an optional \code{\link[base]{lapply}}-style function with arguments
\code{function(X, FUN, \dots)}. It is used for computing the variable selection criterion.
The default is to use the basic \code{lapply}
function unless the \code{cores} argument is specified (see below).}
\item{cores}{numeric. If set to an integer the \code{applyfun} is set to
\code{\link[parallel]{mclapply}} with the desired number of \code{cores}.}
\item{saveinfo}{logical. Store information about variable selection
procedure in \code{info} slot of each \code{partynode}.}
}
\details{
The arguments \code{teststat}, \code{testtype} and \code{mincriterion}
determine how the global null hypothesis of independence between all input
variables and the response is tested (see \code{\link{ctree}}).
The variable with most extreme p-value or test statistic is selected
for splitting. If this isn't possible due to sample size constraints
explained in the next paragraph, up to \code{splittry} other variables
are inspected for possible splits.
A split is established when all of the following criteria are met:
1) the sum of the weights in the current node
is larger than \code{minsplit}, 2) a fraction of the sum of weights of more than
\code{minprob} will be contained in all daughter nodes, 3) the sum of
the weights in all daughter nodes exceeds \code{minbucket}, and 4)
the depth of the tree is smaller than \code{maxdepth}.
This avoids pathological splits deep down the tree.
When \code{stump = TRUE}, a tree with at most two terminal nodes is computed.
The argument \code{mtry > 0} means that a random forest like `variable
selection', i.e., a random selection of \code{mtry} input variables, is
performed in each node.
In each inner node, \code{maxsurrogate} surrogate splits are computed
(regardless of any missing values in the learning sample). Factors
in test samples whose levels were empty in the learning sample
are treated as missing when computing predictions (in contrast
to \code{\link[party]{ctree}}. Note also the different behaviour of
\code{majority} in the two implementations.
}
\value{
A list.
}
\references{
B. E. T. H. Twala, M. C. Jones, and D. J. Hand (2008),
Good Methods for Coping with Missing Data in Decision Trees,
\emph{Pattern Recognition Letters}, \bold{29}(7), 950--956.
Tal Galili, Isaac Meilijson (2016), Splitting Matters: How
Monotone Transformation of Predictor Variables May Improve the
Predictions of Decision Tree Models, \url{https://arxiv.org/abs/1611.04561}.
}
\keyword{misc}
| /pkg/partykit/man/ctree_control.Rd | no_license | roberthroseroo/partykit | R | false | false | 7,920 | rd | \name{ctree_control}
\alias{ctree_control}
\title{ Control for Conditional Inference Trees }
\description{
Various parameters that control aspects of the `ctree' fit.
}
\usage{
ctree_control(teststat = c("quadratic", "maximum"),
splitstat = c("quadratic", "maximum"),
splittest = FALSE,
testtype = c("Bonferroni", "MonteCarlo", "Univariate", "Teststatistic"),
pargs = GenzBretz(),
nmax = Inf, alpha = 0.05, mincriterion = 1 - alpha,
logmincriterion = log(mincriterion), minsplit = 20L, minbucket = 7L,
minprob = 0.01, stump = FALSE, lookahead = FALSE, nresample = 9999L,
MIA = FALSE, maxsurrogate = 0L, numsurrogate = FALSE, mtry = Inf, maxdepth = Inf,
multiway = FALSE, splittry = 2L, intersplit = FALSE, majority = FALSE,
caseweights = TRUE, applyfun = NULL, cores = NULL, saveinfo = TRUE)
}
\arguments{
\item{teststat}{ a character specifying the type of the test statistic
to be applied for variable selection. }
\item{splitstat}{ a character specifying the type of the test statistic
to be applied for splitpoint selection. Prior to
version 2.0-0, \code{maximum} was implemented only.}
\item{splittest}{ a logical changing linear (the default \code{FALSE}) to
maximally selected statistics for
variable selection. Currently needs \code{testtype = "MonteCarlo"}.}
\item{testtype}{ a character specifying how to compute the distribution of
the test statistic. The first three options refer to
p-values as criterion, \code{Teststatistic} uses the raw
statistic as criterion. \code{Bonferroni} and
\code{Univariate} relate to p-values from the asymptotic
distribution (adjusted or unadjusted).
Bonferroni-adjusted Monte-Carlo p-values are computed
when both \code{Bonferroni} and \code{MonteCarlo} are
given.}
\item{pargs}{ control parameters for the computation of multivariate
normal probabilities, see \code{\link[mvtnorm]{GenzBretz}}.}
\item{nmax}{ an integer defining the number of bins each variable
is divided into prior to tree building. The default \code{Inf}
does not apply any binning. Highly experimental, use at your own
risk.}
\item{alpha}{ a double, the significance level for variable selection.}
\item{mincriterion}{ the value of the test statistic or 1 - p-value that
must be exceeded in order to implement a split. }
\item{logmincriterion}{ the value of the test statistic or 1 - p-value that
must be exceeded in order to implement a split on
the log-scale. }
\item{minsplit}{ the minimum sum of weights in a node in order to be considered
for splitting. }
\item{minbucket}{ the minimum sum of weights in a terminal node. }
\item{minprob}{ proportion of observations needed to establish a terminal node.}
\item{stump}{ a logical determining whether a stump (a tree with a maximum of three
nodes only) is to be computed. }
\item{lookahead}{ a logical determining whether a split is implemented only
after checking if tests in both daughter nodes can be performed.}
\item{nresample}{ number of permutations for \code{testtype = "MonteCarlo"}.}
\item{MIA}{ a logical determining the treatment of \code{NA} as a category in split,
see Twala et al. (2008).}
\item{maxsurrogate}{ number of surrogate splits to evaluate.}
\item{numsurrogate}{ a logical for backward-compatibility with party. If
\code{TRUE}, only at least ordered variables are considered for surrogate splits.}
\item{mtry}{ number of input variables randomly sampled as candidates
at each node for random forest like algorithms. The default
\code{mtry = Inf} means that no random selection takes place.}
\item{maxdepth}{ maximum depth of the tree. The default \code{maxdepth = Inf}
means that no restrictions are applied to tree sizes.}
\item{multiway}{ a logical indicating if multiway splits for all factor levels
are implemented for unordered factors.}
\item{splittry}{ number of variables that are inspected for admissible splits
if the best split doesn't meet the sample size constraints.}
\item{intersplit}{ a logical indicating if splits in numeric variables
are simply \code{x <= a} (the default) or interpolated
\code{x <= (a + b) / 2}. The latter feature is experimental, see
Galili and Meilijson (2016).}
\item{majority}{ if \code{FALSE}, observations which can't be classified to a
daughter node because of missing information are randomly
assigned (following the node distribution). If \code{TRUE},
they go with the majority (the default in \code{\link[party]{ctree}}).}
\item{caseweights}{ a logical interpreting \code{weights} as case weights.}
\item{applyfun}{an optional \code{\link[base]{lapply}}-style function with arguments
\code{function(X, FUN, \dots)}. It is used for computing the variable selection criterion.
The default is to use the basic \code{lapply}
function unless the \code{cores} argument is specified (see below).}
\item{cores}{numeric. If set to an integer the \code{applyfun} is set to
\code{\link[parallel]{mclapply}} with the desired number of \code{cores}.}
\item{saveinfo}{logical. Store information about variable selection
procedure in \code{info} slot of each \code{partynode}.}
}
\details{
The arguments \code{teststat}, \code{testtype} and \code{mincriterion}
determine how the global null hypothesis of independence between all input
variables and the response is tested (see \code{\link{ctree}}).
The variable with most extreme p-value or test statistic is selected
for splitting. If this isn't possible due to sample size constraints
explained in the next paragraph, up to \code{splittry} other variables
are inspected for possible splits.
A split is established when all of the following criteria are met:
1) the sum of the weights in the current node
is larger than \code{minsplit}, 2) a fraction of the sum of weights of more than
\code{minprob} will be contained in all daughter nodes, 3) the sum of
the weights in all daughter nodes exceeds \code{minbucket}, and 4)
the depth of the tree is smaller than \code{maxdepth}.
This avoids pathological splits deep down the tree.
When \code{stump = TRUE}, a tree with at most two terminal nodes is computed.
The argument \code{mtry > 0} means that a random forest like `variable
selection', i.e., a random selection of \code{mtry} input variables, is
performed in each node.
In each inner node, \code{maxsurrogate} surrogate splits are computed
(regardless of any missing values in the learning sample). Factors
in test samples whose levels were empty in the learning sample
are treated as missing when computing predictions (in contrast
to \code{\link[party]{ctree}}. Note also the different behaviour of
\code{majority} in the two implementations.
}
\value{
A list.
}
\references{
B. E. T. H. Twala, M. C. Jones, and D. J. Hand (2008),
Good Methods for Coping with Missing Data in Decision Trees,
\emph{Pattern Recognition Letters}, \bold{29}(7), 950--956.
Tal Galili, Isaac Meilijson (2016), Splitting Matters: How
Monotone Transformation of Predictor Variables May Improve the
Predictions of Decision Tree Models, \url{https://arxiv.org/abs/1611.04561}.
}
\keyword{misc}
|
GenerateDataYuanEtAl <- function(dataOptionsStructure){
#----------------------------------------------------------------------------------------------------------#
# K Lloyd 2016_09_16
#----------------------------------------------------------------------------------------------------------#
# Extracts data from Synapse
#----------------------------------------------------------------------------------------------------------#
nReps <- dataOptionsStructure$nReps
cancer <- dataOptionsStructure$cancer # c('KIRC','OV','GBM','LUSC')
molPlatform <- dataOptionsStructure$molPlatform # c('SCNA','methyl','mRNA','miRNA','protein','None')
clinicalFlag <- dataOptionsStructure$clinicalFlag # c(TRUE,FALSE)
exp.ID <- dataOptionsStructure$exp.ID
clinical.ID <- dataOptionsStructure$clinical.ID
surv.ID <- dataOptionsStructure$surv.ID
train.ID <- dataOptionsStructure$train.ID
test.ID <- dataOptionsStructure$test.ID
surv.data <- myRead(surv.ID)
if(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein')){
exp.data <- myRead(exp.ID)
} else {
exp.data <- NULL
}
if(clinicalFlag){
clinical.data <- myRead.simple(clinical.ID)
if('gender'%in%colnames(clinical.data)){
clinical.data$gender <- ifelse(clinical.data$gender=="FEMALE",1, 0)
clinical.data$gender <- as.numeric(clinical.data$gender)
}
if('grade'%in%colnames(clinical.data)){
clinical.data$grade <- as.character(clinical.data$grade)
clinical.data$grade[clinical.data$grade=='GB'] <- 0.5
clinical.data$grade[clinical.data$grade=='G1'] <- 1
clinical.data$grade[clinical.data$grade=='G2'] <- 2
clinical.data$grade[clinical.data$grade=='G3'] <- 3
clinical.data$grade[clinical.data$grade=='G4'] <- 4
clinical.data$grade[clinical.data$grade=='GX'] <- 2.5
clinical.data$grade <- as.numeric(clinical.data$grade)
}
if('stage'%in%colnames(clinical.data)&cancer=='KIRC'){
clinical.data$stage <- as.character(clinical.data$stage)
clinical.data$stage[clinical.data$stage=="Stage I"] <- 1
clinical.data$stage[clinical.data$stage=="Stage II"] <- 2
clinical.data$stage[clinical.data$stage=="Stage III"] <- 3
clinical.data$stage[clinical.data$stage=="Stage IV"] <- 4
clinical.data$stage <- as.numeric(clinical.data$stage)
}
if('stage'%in%colnames(clinical.data)&cancer=='OV'){
clinical.data$stage <- as.character(clinical.data$stage)
clinical.data$stage[clinical.data$stage=='IA'] <- 1.0
clinical.data$stage[clinical.data$stage=='IB'] <- 1.3
clinical.data$stage[clinical.data$stage=='IC'] <- 1.6
clinical.data$stage[clinical.data$stage=='IIA'] <- 2.0
clinical.data$stage[clinical.data$stage=='IIB'] <- 2.3
clinical.data$stage[clinical.data$stage=='IIC'] <- 2.6
clinical.data$stage[clinical.data$stage=='IIIA'] <- 3.0
clinical.data$stage[clinical.data$stage=='IIIB'] <- 3.3
clinical.data$stage[clinical.data$stage=='IIIC'] <- 3.6
clinical.data$stage[clinical.data$stage=='IV'] <- 4
clinical.data$stage <- as.numeric(clinical.data$stage)
}
if('stage'%in%colnames(clinical.data)&cancer=='LUSC'){
clinical.data$stage <- as.character(clinical.data$stage)
clinical.data$stage[clinical.data$stage=='Stage IA'] <- 1.0
clinical.data$stage[clinical.data$stage=='Stage IB'] <- 1.3
clinical.data$stage[clinical.data$stage=='Stage II'] <- 2.0
clinical.data$stage[clinical.data$stage=='Stage IIA'] <- 2.0
clinical.data$stage[clinical.data$stage=='Stage IIB'] <- 2.3
clinical.data$stage[clinical.data$stage=='Stage IIIA'] <- 3.0
clinical.data$stage[clinical.data$stage=='Stage IIIB'] <- 3.3
clinical.data$stage <- as.numeric(clinical.data$stage)
}
} else {
clinical.data <- NULL
}
train.all <- read.table(synapse.read(train.ID),header=F, stringsAsFactors=F)
test.all <- read.table(synapse.read(test.ID),header=F, stringsAsFactors=F)
mySurv <- Surv(surv.data$OS_OS, surv.data$OS_vital_status,type='right')
toReturn <- list()
for(i in 1:nReps){
train.samples <- train.all[,i]
test.samples <- test.all[,i]
if(clinicalFlag){
sampleNames <- rownames(clinical.data)
} else if(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein')){
sampleNames <- rownames(exp.data)
}
train.row <- match(train.samples, sampleNames)
test.row <- match(test.samples, sampleNames)
y.train <- mySurv[train.row]
y.test <- mySurv[test.row]
if(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein')){
x.train <- exp.data[train.row, ]
x.test <- exp.data[test.row, ]
} else {
x.train <- NULL
x.test <- NULL
}
if(clinicalFlag){
clinical.train <- clinical.data[train.row, ]
clinical.test <- clinical.data[test.row,]
} else {
clinical.train <- NULL
clinical.test <- NULL
}
trainingTargets <- as.matrix(surv.data$OS_OS[train.row],ncol=1)
testTargets <- as.matrix(surv.data$OS_OS[test.row],ncol=1)
trainingEvents <- as.matrix(surv.data$OS_vital_status[train.row],ncol=1)
testEvents <- as.matrix(surv.data$OS_vital_status[test.row],ncol=1)
if(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein') & clinicalFlag){
trainingData <- cbind(clinical.train,x.train)
testData <- cbind(clinical.test,x.test)
} else if(!(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein')) & clinicalFlag){
trainingData <- clinical.train
testData <- clinical.test
} else if(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein') & !clinicalFlag){
trainingData <- x.train
testData <- x.test
}
dimension <- dim(trainingData)[2]
nTraining <- dim(trainingData)[1]
nTest <- dim(testData)[1]
nSamples <- nTraining + nTest
toReturn[[i]] <- list('nReps'=nReps,'cancer'=cancer,'molPlatform'=molPlatform,'clinicalFlag'=clinicalFlag,'x.train'=x.train,'y.train'=y.train,
'x.test'=x.test,'y.test'=y.test,'clinical.train'=clinical.train,'clinical.test'=clinical.test,
'trainingTargets'=trainingTargets,'trainingData'=trainingData,'events'=trainingEvents,'testTargets'=testTargets,
'testData'=testData,'testEvents'=testEvents,'dimension'=dimension,'nSamples'=nSamples,'nTraining'=nTraining,'nTest'=nTest)
}
return(toReturn)
} | /toSource/GenerateDataYuanEtAl.R | permissive | kllloyd/Thesis | R | false | false | 6,306 | r | GenerateDataYuanEtAl <- function(dataOptionsStructure){
#----------------------------------------------------------------------------------------------------------#
# K Lloyd 2016_09_16
#----------------------------------------------------------------------------------------------------------#
# Extracts data from Synapse
#----------------------------------------------------------------------------------------------------------#
nReps <- dataOptionsStructure$nReps
cancer <- dataOptionsStructure$cancer # c('KIRC','OV','GBM','LUSC')
molPlatform <- dataOptionsStructure$molPlatform # c('SCNA','methyl','mRNA','miRNA','protein','None')
clinicalFlag <- dataOptionsStructure$clinicalFlag # c(TRUE,FALSE)
exp.ID <- dataOptionsStructure$exp.ID
clinical.ID <- dataOptionsStructure$clinical.ID
surv.ID <- dataOptionsStructure$surv.ID
train.ID <- dataOptionsStructure$train.ID
test.ID <- dataOptionsStructure$test.ID
surv.data <- myRead(surv.ID)
if(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein')){
exp.data <- myRead(exp.ID)
} else {
exp.data <- NULL
}
if(clinicalFlag){
clinical.data <- myRead.simple(clinical.ID)
if('gender'%in%colnames(clinical.data)){
clinical.data$gender <- ifelse(clinical.data$gender=="FEMALE",1, 0)
clinical.data$gender <- as.numeric(clinical.data$gender)
}
if('grade'%in%colnames(clinical.data)){
clinical.data$grade <- as.character(clinical.data$grade)
clinical.data$grade[clinical.data$grade=='GB'] <- 0.5
clinical.data$grade[clinical.data$grade=='G1'] <- 1
clinical.data$grade[clinical.data$grade=='G2'] <- 2
clinical.data$grade[clinical.data$grade=='G3'] <- 3
clinical.data$grade[clinical.data$grade=='G4'] <- 4
clinical.data$grade[clinical.data$grade=='GX'] <- 2.5
clinical.data$grade <- as.numeric(clinical.data$grade)
}
if('stage'%in%colnames(clinical.data)&cancer=='KIRC'){
clinical.data$stage <- as.character(clinical.data$stage)
clinical.data$stage[clinical.data$stage=="Stage I"] <- 1
clinical.data$stage[clinical.data$stage=="Stage II"] <- 2
clinical.data$stage[clinical.data$stage=="Stage III"] <- 3
clinical.data$stage[clinical.data$stage=="Stage IV"] <- 4
clinical.data$stage <- as.numeric(clinical.data$stage)
}
if('stage'%in%colnames(clinical.data)&cancer=='OV'){
clinical.data$stage <- as.character(clinical.data$stage)
clinical.data$stage[clinical.data$stage=='IA'] <- 1.0
clinical.data$stage[clinical.data$stage=='IB'] <- 1.3
clinical.data$stage[clinical.data$stage=='IC'] <- 1.6
clinical.data$stage[clinical.data$stage=='IIA'] <- 2.0
clinical.data$stage[clinical.data$stage=='IIB'] <- 2.3
clinical.data$stage[clinical.data$stage=='IIC'] <- 2.6
clinical.data$stage[clinical.data$stage=='IIIA'] <- 3.0
clinical.data$stage[clinical.data$stage=='IIIB'] <- 3.3
clinical.data$stage[clinical.data$stage=='IIIC'] <- 3.6
clinical.data$stage[clinical.data$stage=='IV'] <- 4
clinical.data$stage <- as.numeric(clinical.data$stage)
}
if('stage'%in%colnames(clinical.data)&cancer=='LUSC'){
clinical.data$stage <- as.character(clinical.data$stage)
clinical.data$stage[clinical.data$stage=='Stage IA'] <- 1.0
clinical.data$stage[clinical.data$stage=='Stage IB'] <- 1.3
clinical.data$stage[clinical.data$stage=='Stage II'] <- 2.0
clinical.data$stage[clinical.data$stage=='Stage IIA'] <- 2.0
clinical.data$stage[clinical.data$stage=='Stage IIB'] <- 2.3
clinical.data$stage[clinical.data$stage=='Stage IIIA'] <- 3.0
clinical.data$stage[clinical.data$stage=='Stage IIIB'] <- 3.3
clinical.data$stage <- as.numeric(clinical.data$stage)
}
} else {
clinical.data <- NULL
}
train.all <- read.table(synapse.read(train.ID),header=F, stringsAsFactors=F)
test.all <- read.table(synapse.read(test.ID),header=F, stringsAsFactors=F)
mySurv <- Surv(surv.data$OS_OS, surv.data$OS_vital_status,type='right')
toReturn <- list()
for(i in 1:nReps){
train.samples <- train.all[,i]
test.samples <- test.all[,i]
if(clinicalFlag){
sampleNames <- rownames(clinical.data)
} else if(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein')){
sampleNames <- rownames(exp.data)
}
train.row <- match(train.samples, sampleNames)
test.row <- match(test.samples, sampleNames)
y.train <- mySurv[train.row]
y.test <- mySurv[test.row]
if(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein')){
x.train <- exp.data[train.row, ]
x.test <- exp.data[test.row, ]
} else {
x.train <- NULL
x.test <- NULL
}
if(clinicalFlag){
clinical.train <- clinical.data[train.row, ]
clinical.test <- clinical.data[test.row,]
} else {
clinical.train <- NULL
clinical.test <- NULL
}
trainingTargets <- as.matrix(surv.data$OS_OS[train.row],ncol=1)
testTargets <- as.matrix(surv.data$OS_OS[test.row],ncol=1)
trainingEvents <- as.matrix(surv.data$OS_vital_status[train.row],ncol=1)
testEvents <- as.matrix(surv.data$OS_vital_status[test.row],ncol=1)
if(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein') & clinicalFlag){
trainingData <- cbind(clinical.train,x.train)
testData <- cbind(clinical.test,x.test)
} else if(!(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein')) & clinicalFlag){
trainingData <- clinical.train
testData <- clinical.test
} else if(molPlatform%in%c('SCNA','methyl','mRNA','miRNA','protein') & !clinicalFlag){
trainingData <- x.train
testData <- x.test
}
dimension <- dim(trainingData)[2]
nTraining <- dim(trainingData)[1]
nTest <- dim(testData)[1]
nSamples <- nTraining + nTest
toReturn[[i]] <- list('nReps'=nReps,'cancer'=cancer,'molPlatform'=molPlatform,'clinicalFlag'=clinicalFlag,'x.train'=x.train,'y.train'=y.train,
'x.test'=x.test,'y.test'=y.test,'clinical.train'=clinical.train,'clinical.test'=clinical.test,
'trainingTargets'=trainingTargets,'trainingData'=trainingData,'events'=trainingEvents,'testTargets'=testTargets,
'testData'=testData,'testEvents'=testEvents,'dimension'=dimension,'nSamples'=nSamples,'nTraining'=nTraining,'nTest'=nTest)
}
return(toReturn)
} |
setwd("Users/rolandjacobus/Documents/Thesis")
## Read in Data
yields<-read.csv("Futures.csv")
## set up yield data
names(yields)<-c("DATE","T25","T15")
yields$Curve<-yields[,2]-yields[,3]
Diff<-diff(yields$Curve,lag=1)*100
yields<-yields[-1,]
yields$diff<-Diff
## what does data look like
summary(yields$diff)
hist(yields$diff)
## set trigger for trade
trigger<-3*sd(yields$diff)
## define variables
results<-as.data.frame(results)
class(results)
colnames(results)<-c('dates','curve1','diff',"curve2","PL","Record")
results[,1:5]<-sapply(results[,1:5],as.numeric)
days<-1
end<-length(yields$diff)
k=1 ## counter for how many times we hit trigger
## Loop to find triggers
for (i in 1:end) {
if(abs(yields[i,5])>trigger) {
results[k,1]<-yields[i,1] ## set date
results[k,2]<-yields[i,4] ## set curve
results[k,3]<-yields[i,5] ## set diff
results[k,4]<-yields[i+days,4] ## next day curve
results[k,5]<-if(results[k,3]>0) { ## determine pl.
(yields[i,4]-yields[i+days,4])*100
} else {
(yields[i+days,4]-yields[i,4])*100
}
results[k,6]<-if(results[k,5]>0) {
'W'
} else {
"L"
}
k=k+1 ## counter for results data frame
}
}
sapply(results,mode)
head(results)
hist(results$PL)
| /Loop Backtest.R | no_license | randyjacobus/Thesis | R | false | false | 1,262 | r | setwd("Users/rolandjacobus/Documents/Thesis")
## Read in Data
yields<-read.csv("Futures.csv")
## set up yield data
names(yields)<-c("DATE","T25","T15")
yields$Curve<-yields[,2]-yields[,3]
Diff<-diff(yields$Curve,lag=1)*100
yields<-yields[-1,]
yields$diff<-Diff
## what does data look like
summary(yields$diff)
hist(yields$diff)
## set trigger for trade
trigger<-3*sd(yields$diff)
## define variables
results<-as.data.frame(results)
class(results)
colnames(results)<-c('dates','curve1','diff',"curve2","PL","Record")
results[,1:5]<-sapply(results[,1:5],as.numeric)
days<-1
end<-length(yields$diff)
k=1 ## counter for how many times we hit trigger
## Loop to find triggers
for (i in 1:end) {
if(abs(yields[i,5])>trigger) {
results[k,1]<-yields[i,1] ## set date
results[k,2]<-yields[i,4] ## set curve
results[k,3]<-yields[i,5] ## set diff
results[k,4]<-yields[i+days,4] ## next day curve
results[k,5]<-if(results[k,3]>0) { ## determine pl.
(yields[i,4]-yields[i+days,4])*100
} else {
(yields[i+days,4]-yields[i,4])*100
}
results[k,6]<-if(results[k,5]>0) {
'W'
} else {
"L"
}
k=k+1 ## counter for results data frame
}
}
sapply(results,mode)
head(results)
hist(results$PL)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/priors.R
\name{get_prior}
\alias{get_prior}
\title{Overview on Priors for \pkg{brms} Models}
\usage{
get_prior(formula, data = NULL, family = gaussian(), autocor = NULL,
partial = NULL, threshold = c("flexible", "equidistant"),
internal = FALSE)
}
\arguments{
\item{formula}{An object of class "formula" (or one that can be coerced to that class):
a symbolic description of the model to be fitted.
The details of model specification are given under 'Details'.}
\item{data}{An optional data frame, list or environment (or object coercible by
\code{as.data.frame} to a data frame) containing the variables in the model.
If not found in data, the variables are taken from \code{environment(formula)},
typically the environment from which \code{brm} is called.
Although it is optional, we strongly recommend to supply a data.frame.}
\item{family}{A description of the error distribution and link function
to be used in the model. This can be a family function,
a call to a family function or a character string naming the family.
Currently, the following families are supported:
\code{gaussian}, \code{student}, \code{cauchy}, \code{binomial},
\code{bernoulli}, \code{Beta}, \code{categorical}, \code{poisson},
\code{negbinomial}, \code{geometric}, \code{Gamma}, \code{inverse.gaussian},
\code{exponential}, \code{weibull}, \code{cumulative}, \code{cratio},
\code{sratio}, \code{acat}, \code{hurdle_poisson}, \code{hurdle_negbinomial},
\code{hurdle_gamma}, \code{zero_inflated_binomial},
\code{zero_inflated_poisson}, and \code{zero_inflated_negbinomial}.
Every family function has a \code{link} argument allowing to specify
the link function to be applied on the response variable.
If not specified, default links are used.
See \code{\link[stats:family]{family}} for help on standard family functions
and \code{\link[brms:brmsfamily]{brmsfamily}} for family functions
specific to the \pkg{brms} package.
For backwards compatibility, \code{family} may also be a vector of two
character strings, the first naming the family and the second naming the link.
Further information is provided under 'Details'.}
\item{autocor}{An optional \code{\link{cor_brms}} object describing
the correlation structure
within the response variable (i.e. the 'autocorrelation').
See the documentation of \code{\link{cor_brms}} for a description
of the available correlation structures. Defaults to NULL,
corresponding to no correlations.}
\item{partial}{A one sided formula of the form \code{~expression}
allowing to specify predictors with category specific effects
in non-cumulative ordinal models
(i.e. in families \code{cratio}, \code{sratio}, or \code{acat}).}
\item{threshold}{A character string indicating the type of thresholds
(i.e. intercepts) used in an ordinal model.
\code{"flexible"} provides the standard unstructured thresholds and
\code{"equidistant"} restricts the distance between
consecutive thresholds to the same value.}
\item{internal}{A flag indicating if the names of additional internal parameters should be displayed.
Setting priors on these parameters is not recommended}
}
\value{
A data.frame with columns \code{prior}, \code{class}, \code{coef}, and \code{group}
and several rows, each providing information on a paramter (or parameter class) on which
priors can be specified. The prior column is empty except for internal default priors.
}
\description{
Get information on all parameters (and parameter classes) for which priors
may be specified including default priors.
}
\examples{
## get all parameters and parameters classes to define priors on
(prior <- get_prior(count ~ log_Age_c + log_Base4_c * Trt_c
+ (1|patient) + (1|visit),
data = epilepsy, family = poisson()))
## define a prior on all fixed effects a once
prior$prior[1] <- "normal(0,10)"
## define a specific prior on the fixed effect of Trt_c
prior$prior[5] <- "student_t(10, 0, 5)"
## verify that the priors indeed found their way into Stan's model code
make_stancode(count ~ log_Age_c + log_Base4_c * Trt_c
+ (1|patient) + (1|visit),
data = epilepsy, family = poisson(),
prior = prior)
}
\seealso{
\code{\link[brms:set_prior]{set_prior}}
}
| /man/get_prior.Rd | no_license | paulhendricks/brms | R | false | true | 4,345 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/priors.R
\name{get_prior}
\alias{get_prior}
\title{Overview on Priors for \pkg{brms} Models}
\usage{
get_prior(formula, data = NULL, family = gaussian(), autocor = NULL,
partial = NULL, threshold = c("flexible", "equidistant"),
internal = FALSE)
}
\arguments{
\item{formula}{An object of class "formula" (or one that can be coerced to that class):
a symbolic description of the model to be fitted.
The details of model specification are given under 'Details'.}
\item{data}{An optional data frame, list or environment (or object coercible by
\code{as.data.frame} to a data frame) containing the variables in the model.
If not found in data, the variables are taken from \code{environment(formula)},
typically the environment from which \code{brm} is called.
Although it is optional, we strongly recommend to supply a data.frame.}
\item{family}{A description of the error distribution and link function
to be used in the model. This can be a family function,
a call to a family function or a character string naming the family.
Currently, the following families are supported:
\code{gaussian}, \code{student}, \code{cauchy}, \code{binomial},
\code{bernoulli}, \code{Beta}, \code{categorical}, \code{poisson},
\code{negbinomial}, \code{geometric}, \code{Gamma}, \code{inverse.gaussian},
\code{exponential}, \code{weibull}, \code{cumulative}, \code{cratio},
\code{sratio}, \code{acat}, \code{hurdle_poisson}, \code{hurdle_negbinomial},
\code{hurdle_gamma}, \code{zero_inflated_binomial},
\code{zero_inflated_poisson}, and \code{zero_inflated_negbinomial}.
Every family function has a \code{link} argument allowing to specify
the link function to be applied on the response variable.
If not specified, default links are used.
See \code{\link[stats:family]{family}} for help on standard family functions
and \code{\link[brms:brmsfamily]{brmsfamily}} for family functions
specific to the \pkg{brms} package.
For backwards compatibility, \code{family} may also be a vector of two
character strings, the first naming the family and the second naming the link.
Further information is provided under 'Details'.}
\item{autocor}{An optional \code{\link{cor_brms}} object describing
the correlation structure
within the response variable (i.e. the 'autocorrelation').
See the documentation of \code{\link{cor_brms}} for a description
of the available correlation structures. Defaults to NULL,
corresponding to no correlations.}
\item{partial}{A one sided formula of the form \code{~expression}
allowing to specify predictors with category specific effects
in non-cumulative ordinal models
(i.e. in families \code{cratio}, \code{sratio}, or \code{acat}).}
\item{threshold}{A character string indicating the type of thresholds
(i.e. intercepts) used in an ordinal model.
\code{"flexible"} provides the standard unstructured thresholds and
\code{"equidistant"} restricts the distance between
consecutive thresholds to the same value.}
\item{internal}{A flag indicating if the names of additional internal parameters should be displayed.
Setting priors on these parameters is not recommended}
}
\value{
A data.frame with columns \code{prior}, \code{class}, \code{coef}, and \code{group}
and several rows, each providing information on a paramter (or parameter class) on which
priors can be specified. The prior column is empty except for internal default priors.
}
\description{
Get information on all parameters (and parameter classes) for which priors
may be specified including default priors.
}
\examples{
## get all parameters and parameters classes to define priors on
(prior <- get_prior(count ~ log_Age_c + log_Base4_c * Trt_c
+ (1|patient) + (1|visit),
data = epilepsy, family = poisson()))
## define a prior on all fixed effects a once
prior$prior[1] <- "normal(0,10)"
## define a specific prior on the fixed effect of Trt_c
prior$prior[5] <- "student_t(10, 0, 5)"
## verify that the priors indeed found their way into Stan's model code
make_stancode(count ~ log_Age_c + log_Base4_c * Trt_c
+ (1|patient) + (1|visit),
data = epilepsy, family = poisson(),
prior = prior)
}
\seealso{
\code{\link[brms:set_prior]{set_prior}}
}
|
test_data <- function(){
data(microRNAome)
checkEqualsNumeric(dim(microRNAome), c(2131, 528))
checkEqualsNumeric(dim(colData(microRNAome)), c(528, 14))
checkEqualsNumeric(assay(microRNAome)[5,5], 246)
checkEquals(colnames(assay(microRNAome)), colData(microRNAome)$SRA_number)
}
| /inst/unitTests/test_data.R | no_license | mccallm/microRNAome_original | R | false | false | 298 | r | test_data <- function(){
data(microRNAome)
checkEqualsNumeric(dim(microRNAome), c(2131, 528))
checkEqualsNumeric(dim(colData(microRNAome)), c(528, 14))
checkEqualsNumeric(assay(microRNAome)[5,5], 246)
checkEquals(colnames(assay(microRNAome)), colData(microRNAome)$SRA_number)
}
|
AVAILABLE_TRAINING_PROTOCOLS <- c('sgd', 'batch')
fitMlp <- function(X, targets, layout, learningRate, maxError=0.001, maxNumEpochs=1000, protocol='sgd',
hiddenActivation=tanh, hiddenActivationDerivative=tanhDerivative, verbose=FALSE,
outputActivation=sigmoid, outputActivationDerivative=sigmoidDerivative) {
if (!(protocol %in% AVAILABLE_TRAINING_PROTOCOLS)) {
stop(paste('Invalid training protocol:', protocol))
}
state <- list()
netDepth <- length(layout)
state$weights <- initializeRandomWeights(X, layout)
state$J <- c()
for (epoch in 1:maxNumEpochs) {
feedForwardResults <- feedForward(X, state$weights, hiddenActivation, outputActivation)
state$netActivations <- feedForwardResults$netActivations
state$layerOutputs <- feedForwardResults$layerOutputs
predictions <- state$layerOutputs[[netDepth]]
state$J <- c(state$J, computeCost(targets, predictions))
if (verbose)
print(state$J[epoch])
state$errors <- targets - predictions
deltaWeights <- initializeWeightsCorrection(state$weights)
nSamples <- nrow(X)
for (sample in 1:nSamples) {
state$currentSample <- sample
state$sensitivities <- list()
for (layer in netDepth:1) {
state$currentLayer <- layer
state$sensitivities[[layer]] <- calcSensitivity(state, layout, hiddenActivationDerivative, outputActivationDerivative)
dW <- calcWeightsCorrection(state, X, learningRate)
if (protocol == 'sgd') {
state$weights[[layer]] <- state$weights[[layer]] + dW
} else if (protocol == 'batch') {
deltaWeights[[layer]] <- deltaWeights[[layer]] + dW
}
}
}
if (protocol == 'batch') {
for (layer in netDepth:1) {
state$weights[[layer]] <- state$weights[[layer]] + deltaWeights[[layer]]
}
}
if (state$J[epoch] <= maxError)
break
}
list(
weights = state$weights,
costHistory = state$J
)
}
predictMlp <- function(X, weights, hiddenActivation, outputActivation) {
feedForwardResults <- feedForward(X, weights, hiddenActivation, outputActivation)
layerOutputs <- feedForwardResults$layerOutputs
netDepth <- length(weights)
layerOutputs[[netDepth]]
}
initializeRandomWeights <- function(X, layout) {
weights <- list()
prevLayerSize <- ncol(X)
for (layer in 1:length(layout)) {
weights[[layer]] <- initializeRandomLayerWeights(prevLayerSize + 1, layout[layer])
prevLayerSize <- layout[layer]
}
weights
}
initializeRandomLayerWeights <- function(inputSize, outputSize) {
randomValues <- runif(inputSize * outputSize)
return(matrix(randomValues, nrow=outputSize, ncol=inputSize))
}
initializeWeightsCorrection <- function(weights) {
deltaWeights <- list()
for (i in 1:length(weights)) {
deltaWeights[[i]] <- matrix(0, nrow = nrow(weights[[i]]), ncol = ncol(weights[[i]]))
}
deltaWeights
}
feedForward <- function(X, weights, hiddenActivation, outputActivation) {
netActivations <- list()
layerOutputs <- list()
layerInput <- X
netDepth <- length(weights)
for (layer in 1:netDepth) {
netActivations[[layer]] <- passForward(layerInput, weights[[layer]])
if (layer == netDepth) {
layerOutputs[[layer]] <- outputActivation(netActivations[[layer]])
} else {
layerOutputs[[layer]] <- hiddenActivation(netActivations[[layer]])
}
layerInput <- layerOutputs[[layer]]
}
list(
netActivations = netActivations,
layerOutputs = layerOutputs
)
}
passForward <- function(X, W) {
Xbiased <- addBias(X)
net <- Xbiased %*% t(W)
return(net)
}
addBias <- function(X) {
return(cbind(1, X))
}
removeBias <- function(W) {
matrix(W[, 2:ncol(W)], nrow=nrow(W), ncol=ncol(W)-1)
}
computeCost <- function(targets, z) {
return(0.5 * sum((targets - z) ^ 2))
}
calcSensitivity <- function(state, layout, hiddenActivationDerivative, outputActivationDerivative) {
layer <- state$currentLayer
sample <- state$currentSample
if (layer == length(layout)) {
dOutputdNetActivation <- outputActivationDerivative(state$netActivations[[layer]][sample, ])
error <- state$errors[sample, ]
sensitivity <- dOutputdNetActivation * error
} else {
dOutputdNetActivation <- hiddenActivationDerivative(state$netActivations[[layer]][sample, ])
weightedNextLayerSensitivitiesSum <- t(removeBias(state$weights[[layer + 1]])) %*% state$sensitivities[[layer + 1]]
sensitivity <- dOutputdNetActivation * weightedNextLayerSensitivitiesSum
}
matrix(sensitivity, nrow=layout[layer], ncol=1)
}
calcWeightsCorrection <- function(state, X, learningRate) {
layer <- state$currentLayer
sample <- state$currentSample
if (layer == 1) {
prevLayerOutput <- X
} else {
prevLayerOutput <- state$layerOutputs[[layer - 1]]
}
prevLayerOutput <- addBias(prevLayerOutput)
learningRate * (state$sensitivities[[layer]] %*% t(prevLayerOutput[sample, ]))
}
| /R/ann/mlp.R | no_license | alvarolemos/machinelearning | R | false | false | 4,973 | r | AVAILABLE_TRAINING_PROTOCOLS <- c('sgd', 'batch')
fitMlp <- function(X, targets, layout, learningRate, maxError=0.001, maxNumEpochs=1000, protocol='sgd',
hiddenActivation=tanh, hiddenActivationDerivative=tanhDerivative, verbose=FALSE,
outputActivation=sigmoid, outputActivationDerivative=sigmoidDerivative) {
if (!(protocol %in% AVAILABLE_TRAINING_PROTOCOLS)) {
stop(paste('Invalid training protocol:', protocol))
}
state <- list()
netDepth <- length(layout)
state$weights <- initializeRandomWeights(X, layout)
state$J <- c()
for (epoch in 1:maxNumEpochs) {
feedForwardResults <- feedForward(X, state$weights, hiddenActivation, outputActivation)
state$netActivations <- feedForwardResults$netActivations
state$layerOutputs <- feedForwardResults$layerOutputs
predictions <- state$layerOutputs[[netDepth]]
state$J <- c(state$J, computeCost(targets, predictions))
if (verbose)
print(state$J[epoch])
state$errors <- targets - predictions
deltaWeights <- initializeWeightsCorrection(state$weights)
nSamples <- nrow(X)
for (sample in 1:nSamples) {
state$currentSample <- sample
state$sensitivities <- list()
for (layer in netDepth:1) {
state$currentLayer <- layer
state$sensitivities[[layer]] <- calcSensitivity(state, layout, hiddenActivationDerivative, outputActivationDerivative)
dW <- calcWeightsCorrection(state, X, learningRate)
if (protocol == 'sgd') {
state$weights[[layer]] <- state$weights[[layer]] + dW
} else if (protocol == 'batch') {
deltaWeights[[layer]] <- deltaWeights[[layer]] + dW
}
}
}
if (protocol == 'batch') {
for (layer in netDepth:1) {
state$weights[[layer]] <- state$weights[[layer]] + deltaWeights[[layer]]
}
}
if (state$J[epoch] <= maxError)
break
}
list(
weights = state$weights,
costHistory = state$J
)
}
predictMlp <- function(X, weights, hiddenActivation, outputActivation) {
feedForwardResults <- feedForward(X, weights, hiddenActivation, outputActivation)
layerOutputs <- feedForwardResults$layerOutputs
netDepth <- length(weights)
layerOutputs[[netDepth]]
}
initializeRandomWeights <- function(X, layout) {
weights <- list()
prevLayerSize <- ncol(X)
for (layer in 1:length(layout)) {
weights[[layer]] <- initializeRandomLayerWeights(prevLayerSize + 1, layout[layer])
prevLayerSize <- layout[layer]
}
weights
}
initializeRandomLayerWeights <- function(inputSize, outputSize) {
randomValues <- runif(inputSize * outputSize)
return(matrix(randomValues, nrow=outputSize, ncol=inputSize))
}
initializeWeightsCorrection <- function(weights) {
deltaWeights <- list()
for (i in 1:length(weights)) {
deltaWeights[[i]] <- matrix(0, nrow = nrow(weights[[i]]), ncol = ncol(weights[[i]]))
}
deltaWeights
}
feedForward <- function(X, weights, hiddenActivation, outputActivation) {
netActivations <- list()
layerOutputs <- list()
layerInput <- X
netDepth <- length(weights)
for (layer in 1:netDepth) {
netActivations[[layer]] <- passForward(layerInput, weights[[layer]])
if (layer == netDepth) {
layerOutputs[[layer]] <- outputActivation(netActivations[[layer]])
} else {
layerOutputs[[layer]] <- hiddenActivation(netActivations[[layer]])
}
layerInput <- layerOutputs[[layer]]
}
list(
netActivations = netActivations,
layerOutputs = layerOutputs
)
}
passForward <- function(X, W) {
Xbiased <- addBias(X)
net <- Xbiased %*% t(W)
return(net)
}
addBias <- function(X) {
return(cbind(1, X))
}
removeBias <- function(W) {
matrix(W[, 2:ncol(W)], nrow=nrow(W), ncol=ncol(W)-1)
}
computeCost <- function(targets, z) {
return(0.5 * sum((targets - z) ^ 2))
}
calcSensitivity <- function(state, layout, hiddenActivationDerivative, outputActivationDerivative) {
layer <- state$currentLayer
sample <- state$currentSample
if (layer == length(layout)) {
dOutputdNetActivation <- outputActivationDerivative(state$netActivations[[layer]][sample, ])
error <- state$errors[sample, ]
sensitivity <- dOutputdNetActivation * error
} else {
dOutputdNetActivation <- hiddenActivationDerivative(state$netActivations[[layer]][sample, ])
weightedNextLayerSensitivitiesSum <- t(removeBias(state$weights[[layer + 1]])) %*% state$sensitivities[[layer + 1]]
sensitivity <- dOutputdNetActivation * weightedNextLayerSensitivitiesSum
}
matrix(sensitivity, nrow=layout[layer], ncol=1)
}
calcWeightsCorrection <- function(state, X, learningRate) {
layer <- state$currentLayer
sample <- state$currentSample
if (layer == 1) {
prevLayerOutput <- X
} else {
prevLayerOutput <- state$layerOutputs[[layer - 1]]
}
prevLayerOutput <- addBias(prevLayerOutput)
learningRate * (state$sensitivities[[layer]] %*% t(prevLayerOutput[sample, ]))
}
|
spa <- function(Y,X,C){
both <- suppressWarnings(ma(cbind(Y=Y,X=X,C=C),partition=list(c(1),c(2,3)))$A)
one <- suppressWarnings(ma(cbind(Y=Y,C=C),partition=list(c(1),c(2)))$A)
return (max(both-one,0))
}
| /matie/R/spa.R | no_license | ingted/R-Examples | R | false | false | 208 | r | spa <- function(Y,X,C){
both <- suppressWarnings(ma(cbind(Y=Y,X=X,C=C),partition=list(c(1),c(2,3)))$A)
one <- suppressWarnings(ma(cbind(Y=Y,C=C),partition=list(c(1),c(2)))$A)
return (max(both-one,0))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinfitr_srtm_v.R
\name{srtm_v}
\alias{srtm_v}
\title{Simplified Reference Tissue Model with Blood Volumes}
\usage{
srtm_v(t_tac, reftac, roitac, bloodtac, weights, vBr_fixed, frameStartEnd,
R1.start = 1, R1.lower = 1e-04, R1.upper = 10, k2.start = 0.1,
k2.lower = 1e-04, k2.upper = 1, bp.start = 1.5, bp.lower = -10,
bp.upper = 15, vBr.start = 0.05, vBr.lower = 1e-04, vBr.upper = 0.15,
vBt.start = 0.05, vBt.lower = 1e-04, vBt.upper = 0.15,
multstart_iter = 1, multstart_lower, multstart_upper, printvals = F)
}
\arguments{
\item{t_tac}{Numeric vector of times for each frame in minutes. We use the
time halfway through the frame as well as a zero. If a time zero frame is
not included, it will be added.}
\item{reftac}{Numeric vector of radioactivity concentrations in the reference
tissue for each frame. We include zero at time zero: if not included, it is
added.}
\item{roitac}{Numeric vector of radioactivity concentrations in the target
tissue for each frame. We include zero at time zero: if not included, it is
added.}
\item{bloodtac}{Numeric vector of radioactivity concentrations in the blood
for each frame. We include zero at time zero: if not included, it is
added.}
\item{weights}{Optional. Numeric vector of the weights assigned to each frame
in the fitting. We include zero at time zero: if not included, it is added.
If not specified, uniform weights will be used.}
\item{vBr_fixed}{Optional. The blood volume fraction of the reference region. If not
specified, this will be fitted. This parameter was fixed in the original article.}
\item{frameStartEnd}{Optional. This allows one to specify the beginning and
final frame to use for modelling, e.g. c(1,20). This is to assess time
stability.}
\item{R1.start}{Optional. Starting parameter for fitting of R1. Default is 1.}
\item{R1.lower}{Optional. Lower bound for the fitting of R1. Default is 0.0001.}
\item{R1.upper}{Optional. Upper bound for the fitting of R1. Default is 10.}
\item{k2.start}{Optional. Starting parameter for fitting of k2. Default is
0.1.}
\item{k2.lower}{Optional. Lower bound for the fitting of k2. Default is 0.0001.}
\item{k2.upper}{Optional. Upper bound for the fitting of k2. Default is 1.}
\item{bp.start}{Optional. Starting parameter for fitting of bp. Default is
1.5.}
\item{bp.lower}{Optional. Lower bound for the fitting of bp. Default is -10.}
\item{bp.upper}{Optional. Upper bound for the fitting of bp. Default is 15.}
\item{vBr.start}{Optional. Starting parameter for fitting of vBr. Default is
0.05.}
\item{vBr.lower}{Optional. Lower bound for the fitting of vBr. Default is 0.0001.}
\item{vBr.upper}{Optional. Upper bound for the fitting of vBr. Default is
0.15.}
\item{vBt.start}{Optional. Starting parameter for fitting of vBt. Default is
0.05.}
\item{vBt.lower}{Optional. Lower bound for the fitting of vBt. Default is 0.0001.}
\item{vBt.upper}{Optional. Upper bound for the fitting of vBt. Default is
0.15.}
\item{multstart_iter}{Number of iterations for starting parameters. Default is 1.
For more information, see \code{\link[nls.multstart]{nls_multstart}}. If
specified as 1 for any parameters, the original starting value will be
used, and the multstart_lower and multstart_upper values ignored.}
\item{multstart_lower}{Optional. Lower bounds for starting parameters. Defaults
to the lower bounds. Named list of whichever parameters' starting bounds should
be altered.}
\item{multstart_upper}{Optional. Upper bounds for starting parameters. Defaults
to the upper bounds. Named list of whichever parameters' starting bounds should
be altered.}
\item{printvals}{Optional. This displays the parameter values for each
iteration of the model. This is useful for debugging and changing starting
values and upper and lower bounds for parameters.}
}
\value{
A list with a data frame of the fitted parameters \code{out$par}, the
model fit object \code{out$fit}, the model weights \code{out$weights}, and
a dataframe containing the TACs both of the data and the fitted values
\code{out$tacs}.
}
\description{
Function to fit the SRTM_V model of Tomasi et al (2008) to data.
}
\examples{
srtm_v(t_tac, reftac, roitac)
srtm_v(t_tac, reftac, roitac, weights, frameStartEnd = c(1,11), bp.upper=1)
}
\references{
Tomasi, G., Edison, P., ... & Turkheimer, F. E. (2008). Novel
reference region model reveals increased microglial and reduced vascular
binding of 11C-(R)-PK11195 in patients with Alzheimer's disease. Journal of
Nuclear Medicine, 49(8), 1249-1256.
}
\author{
Granville J Matheson, \email{mathesong@gmail.com}
}
| /man/srtm_v.Rd | no_license | eebrown/kinfitr | R | false | true | 4,649 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinfitr_srtm_v.R
\name{srtm_v}
\alias{srtm_v}
\title{Simplified Reference Tissue Model with Blood Volumes}
\usage{
srtm_v(t_tac, reftac, roitac, bloodtac, weights, vBr_fixed, frameStartEnd,
R1.start = 1, R1.lower = 1e-04, R1.upper = 10, k2.start = 0.1,
k2.lower = 1e-04, k2.upper = 1, bp.start = 1.5, bp.lower = -10,
bp.upper = 15, vBr.start = 0.05, vBr.lower = 1e-04, vBr.upper = 0.15,
vBt.start = 0.05, vBt.lower = 1e-04, vBt.upper = 0.15,
multstart_iter = 1, multstart_lower, multstart_upper, printvals = F)
}
\arguments{
\item{t_tac}{Numeric vector of times for each frame in minutes. We use the
time halfway through the frame as well as a zero. If a time zero frame is
not included, it will be added.}
\item{reftac}{Numeric vector of radioactivity concentrations in the reference
tissue for each frame. We include zero at time zero: if not included, it is
added.}
\item{roitac}{Numeric vector of radioactivity concentrations in the target
tissue for each frame. We include zero at time zero: if not included, it is
added.}
\item{bloodtac}{Numeric vector of radioactivity concentrations in the blood
for each frame. We include zero at time zero: if not included, it is
added.}
\item{weights}{Optional. Numeric vector of the weights assigned to each frame
in the fitting. We include zero at time zero: if not included, it is added.
If not specified, uniform weights will be used.}
\item{vBr_fixed}{Optional. The blood volume fraction of the reference region. If not
specified, this will be fitted. This parameter was fixed in the original article.}
\item{frameStartEnd}{Optional. This allows one to specify the beginning and
final frame to use for modelling, e.g. c(1,20). This is to assess time
stability.}
\item{R1.start}{Optional. Starting parameter for fitting of R1. Default is 1.}
\item{R1.lower}{Optional. Lower bound for the fitting of R1. Default is 0.0001.}
\item{R1.upper}{Optional. Upper bound for the fitting of R1. Default is 10.}
\item{k2.start}{Optional. Starting parameter for fitting of k2. Default is
0.1.}
\item{k2.lower}{Optional. Lower bound for the fitting of k2. Default is 0.0001.}
\item{k2.upper}{Optional. Upper bound for the fitting of k2. Default is 1.}
\item{bp.start}{Optional. Starting parameter for fitting of bp. Default is
1.5.}
\item{bp.lower}{Optional. Lower bound for the fitting of bp. Default is -10.}
\item{bp.upper}{Optional. Upper bound for the fitting of bp. Default is 15.}
\item{vBr.start}{Optional. Starting parameter for fitting of vBr. Default is
0.05.}
\item{vBr.lower}{Optional. Lower bound for the fitting of vBr. Default is 0.0001.}
\item{vBr.upper}{Optional. Upper bound for the fitting of vBr. Default is
0.15.}
\item{vBt.start}{Optional. Starting parameter for fitting of vBt. Default is
0.05.}
\item{vBt.lower}{Optional. Lower bound for the fitting of vBt. Default is 0.0001.}
\item{vBt.upper}{Optional. Upper bound for the fitting of vBt. Default is
0.15.}
\item{multstart_iter}{Number of iterations for starting parameters. Default is 1.
For more information, see \code{\link[nls.multstart]{nls_multstart}}. If
specified as 1 for any parameters, the original starting value will be
used, and the multstart_lower and multstart_upper values ignored.}
\item{multstart_lower}{Optional. Lower bounds for starting parameters. Defaults
to the lower bounds. Named list of whichever parameters' starting bounds should
be altered.}
\item{multstart_upper}{Optional. Upper bounds for starting parameters. Defaults
to the upper bounds. Named list of whichever parameters' starting bounds should
be altered.}
\item{printvals}{Optional. This displays the parameter values for each
iteration of the model. This is useful for debugging and changing starting
values and upper and lower bounds for parameters.}
}
\value{
A list with a data frame of the fitted parameters \code{out$par}, the
model fit object \code{out$fit}, the model weights \code{out$weights}, and
a dataframe containing the TACs both of the data and the fitted values
\code{out$tacs}.
}
\description{
Function to fit the SRTM_V model of Tomasi et al (2008) to data.
}
\examples{
srtm_v(t_tac, reftac, roitac)
srtm_v(t_tac, reftac, roitac, weights, frameStartEnd = c(1,11), bp.upper=1)
}
\references{
Tomasi, G., Edison, P., ... & Turkheimer, F. E. (2008). Novel
reference region model reveals increased microglial and reduced vascular
binding of 11C-(R)-PK11195 in patients with Alzheimer's disease. Journal of
Nuclear Medicine, 49(8), 1249-1256.
}
\author{
Granville J Matheson, \email{mathesong@gmail.com}
}
|
#' Aggregate the slot data.
#'
#' \code{processSlots} gets slot data from a rdf list and aggregates it as
#' specified.
#'
#' @param slotsAnnualize A string vector with three entries.
#' `slotsAnnualize[1]` is the slot to process. `slotsAnnualize[2]` is the
#' aggregation method to use. `slotsAnnualize[3]` is the threshold or scaling
#' factor to use. `slotsAnnualize[4]` is the variable name to use. If
#' `slotsAnnualize[4]` is `NA`, then the variable is constructed as
#' `slotsAnnualize[1]_slotsAnnualize[2]_slotsAnnualize[3]`.
#'
#' @param rdf The rdf list returned by [read.rdf()] to get the slot data from.
#'
#' @param rdfName String of the rdf name.
#'
#' @return A data frame table with the aggregated slot data.
#'
#' @keywords internal
#' @noRd
processSlots <- function(slotsAnnualize, rdf, rdfName, findAllSlots)
{
ann <- slotsAnnualize[2]
thresh <- as.numeric(slotsAnnualize[3])
# can use thresh as a scale also. If it is not specified, then multiply by 1.
thresh[is.na(thresh)] <- 1
slot <- slotsAnnualize[1]
if (!(slot %in% rdf_slot_names(rdf))) {
if (findAllSlots) {
stop(paste("slot:", slot, "not found in rdf:", rdfName))
} else {
# Trace Year Variable Value
# construct a df indicating the slot couldn't be found, and return it
zz <- data.frame(
Trace = -99,
Year = -99,
Variable = ifelse(
is.na(slotsAnnualize[4]),
paste(slotsAnnualize[1],ann,thresh,sep = '_'),
slotsAnnualize[4]
),
Value = -99
)
return(zz)
}
}
slot <- rdf_get_slot(rdf, slot)
startData <- strsplit(rdf$runs[[1]]$start, '-')[[1]] # start year
endData <- strsplit(rdf$runs[[1]]$end, '-')[[1]] # end year
yy <- seq(as.numeric(startData[1]), as.numeric(endData[1]), 1)
tsUnit <- rdf$runs[[1]]$time_step_unit # should either be 'year' or 'month'
if (!(tsUnit %in% c('month','year'))) {
stop(
'rdf: ', rdfName,
' contains data that is on a timestep other than year or month.\n',
'Currently, RWDataPlyr can only handle monthly and annual rdf data.',
call. = FALSE
)
}
if (tsUnit == 'year' & ann != 'AnnualRaw') {
# data is annual, so none of the aggregation methods besides annualRaw
# make sense
warning(
"rdf contains annual data, but the aggregation method is not 'AnnualRaw'.\n",
"Processing using 'AnnualRaw' instead.\n",
"Edit the slotAggList and call `getDataForAllScens()` again, if necessary.",
call. = FALSE
)
ann <- "AnnualRaw"
}
# XXX
# Need to add other summerization methods to this area
# XXX
# now summarize in some way
if(ann == 'AnnMin'){
slot <- apply(slot, 2, trace_min_ann) # minimum annual value
rownames(slot) <- yy
} else if(ann == 'EOWY'){
slot <- slot[seq(9, nrow(slot), 12),,drop = FALSE] # 9 is september
slot[is.nan(slot)] <- 0
slot <- slot * thresh
rownames(slot) <- yy
} else if(ann == 'EOCY'){
slot <- slot[seq(12, nrow(slot), 12),,drop = FALSE]
slot[is.nan(slot)] <- 0
slot <- slot * thresh
rownames(slot) <- yy
} else if(ann == 'BOCY'){
slot <- slot[seq(1, nrow(slot), 12),,drop = FALSE]
slot[is.nan(slot)] <- 0
slot <- slot * thresh
rownames(slot) <- yy
} else if(ann == 'AnnMax'){
slot <- apply(slot, 2, trace_max_ann) # maximum annual value
slot <- slot * thresh
rownames(slot) <- yy
} else if(ann == 'AnnualSum'){
slot <- rwslot_annual_sum(slot,thresh)
rownames(slot) <- yy
} else if(ann == 'AnnMinLTE'){
slot <- apply(slot, 2, trace_min_ann) # minimum annual value
slot <- (slot <= thresh) * 1 # convert to numeric
rownames(slot) <- yy
} else if(ann == 'Monthly'){
rownames(slot) <- as.character(
zoo::as.yearmon(yy[1] + seq(0, (length(yy) * 12) - 1) / 12)
)
slot <- slot*thresh
} else if(ann == 'WYMinLTE'){
slot <- rbind(slot[1,],slot[1,],slot[1,],slot)
slot <- slot[1:(nrow(slot)-3),, drop = FALSE]
slot <- apply(slot, 2, trace_min_ann) # minimum annual value
slot <- (slot <= thresh) * 1 # convert to numeric
rownames(slot) <- yy
} else if(ann == 'WYMaxLTE'){
slot <- rbind(slot[1,],slot[1,],slot[1,],slot)
slot <- slot[1:(nrow(slot)-3),, drop = FALSE]
slot <- apply(slot, 2, trace_max_ann) # minimum annual value
slot <- (slot <= thresh) * 1 # convert to numeric
rownames(slot) <- yy
} else if(ann == 'EOCYLTE'){
slot <- slot[seq(12, nrow(slot), 12),,drop = FALSE]
slot[is.nan(slot)] <- 0
slot <- (slot <= thresh) * 1 # convert to numeric
rownames(slot) <- yy
} else if(ann == 'EOCYGTE'){
slot <- slot[seq(12, nrow(slot), 12),, drop = FALSE]
slot[is.nan(slot)] <- 0
slot <- (slot >= thresh) * 1 # convert to numeric
rownames(slot) <- yy
} else if(ann == 'AnnualRaw'){
if(tsUnit == 'month'){
# data is monthly, so will use EOCY
warning(
"User specified aggregation is 'AnnualRaw', but the rdf contains monthly data.\n",
"Will use 'EOCY' aggregation instead.\n",
"If other aggregation method is desired, edit the slotAggList and call `getDataForAllScens()` again.",
call. = FALSE
)
slot <- slot[seq(12, nrow(slot), 12),, drop = FALSE]
slot[is.nan(slot)] <- 0
slot <- slot * thresh
rownames(slot) <- yy
} else{
# data is annual
rownames(slot) <- yy
slot <- slot*thresh
}
} else{
stop(paste0("'",ann, "'", " is an invalid aggregation method.\n",
" Fix the slot aggregation list and try again."))
}
colnames(slot) <- seq_len(ncol(slot))
if(ann != 'Monthly'){
slot <- tidyr::gather(
tibble::rownames_to_column(as.data.frame(slot), var = "Year"),
Trace,
Value,
-Year
) %>%
dplyr::mutate(
Year = as.numeric(Year),
Trace = as.integer(Trace),
Variable = dplyr::if_else(
is.na(slotsAnnualize[4]),
paste(slotsAnnualize[1],ann,thresh,sep = '_'),
slotsAnnualize[4]
)
) %>%
dplyr::select(Trace, Year, Variable, Value)
} else{
slot <- tidyr::gather(
tibble::rownames_to_column(as.data.frame(slot), var = "Month"),
Trace,
Value,
-Month
) %>%
dplyr::mutate(
Year = as.numeric(simplify2array(strsplit(Month, ' '))[2,]),
Month = month.name[match(
simplify2array(strsplit(Month, " "))[1,],
month.abb
)],
Trace = as.integer(Trace),
Variable = dplyr::if_else(
is.na(slotsAnnualize[4]),
paste(slotsAnnualize[1],ann,thresh,sep = '_'),
slotsAnnualize[4]
)
) %>%
dplyr::select(Trace, Month, Year, Variable, Value)
}
slot
}
#' Get and aggregate data from a single rdf file.
#'
#' `getSlots()` gets all of the slots contained in a single rdf file and
#' aggregates them as specified by the summary functions in `slotAggList`.
#'
#' @param slotAggList The slot aggregation list. A list containing the slots
#' that will be imported and aggregated, the aggregation method(s) to use,
#' and the rdf files that contain the slots. See [slot_agg_list()].
#' @param scenPath A relative or absolute path to the scenario folder.
#'
#' @keywords internal
#' @noRd
getSlots <- function(slotAggList, scenPath, findAllSlots)
{
rdf <- slotAggList$rdf
rdf <- read.rdf(paste(scenPath,'/',rdf,sep = ''))
if(slotAggList$slots[1] == 'all'){
# if slots is all, then need to create the slotAggList
# after reading in all the slot names
slots <- rdf_slot_names(rdf)
nSlots <- length(slots)
if(rdf$runs[[1]]$time_step_unit == 'month'){
aggMeth <- 'Monthly'
} else if (rdf$runs[[1]]$time_step_unit == 'year'){
aggMeth <- 'AnnualRaw'
} else{
stop(
paste('The', slotAggList$rdf,
'contains data of an unexpected timestep.'),
call. = FALSE
)
}
slotAggList <- slot_agg_list(cbind(
rep(slotAggList$rdf,nSlots), slots, rep(aggMeth, nSlots), rep(NA, nSlots)
))
# go in one level into the list as that is what happens when
# this function is called if using the normal slotAggList structure
slotAggList <- slotAggList[[1]]
}
slotsAnnualize <- rbind(
slotAggList$slots,
slotAggList$annualize,
slotAggList$varNames
)
allSlots <- apply(
slotsAnnualize,
2,
processSlots,
rdf,
slotAggList$rdf,
findAllSlots
)
allSlots <- do.call(rbind, lapply(allSlots, function(X) X))
allSlots
}
#' Get and aggregate data from rdf file(s) for one scenario.
#'
#' `getAndProcessAllSlots()` gets data for a single scenario. The slots
#' from each rdf are processed and aggregated together.
#'
#' @param scenPath A relative or absolute path to the scenario folder.
#'
#' @inheritParams getDataForAllScens
#'
#' @seealso \code{\link{getDataForAllScens}}
#'
#' @keywords internal
#' @noRd
getAndProcessAllSlots <- function(scenPath, slotAggList, findAllSlots)
{
sPath <- scenPath[1]
sName <- scenPath[2]
zz <- lapply(slotAggList, getSlots, sPath, findAllSlots)
allRes <- do.call(rbind, lapply(zz, function(X) X))
nn <- colnames(allRes)
allRes$Scenario <- rep(sName, nrow(allRes))
allRes <- subset(allRes, select=c('Scenario', nn))
allRes
}
#' Get and aggregate data from an rdf file(s)
#'
#' `getDataForAllScens()` gets slot data from multiple rdf files and/or multiple
#' scenarios, aggregates it, and saves it as a data.frame. The slot data can be
#' aggregated in multiple ways (see [slot_agg_list]).
#'
#' @param scenFolders A string vector containing the folder names (scenarios)
#' that the rdf files are saved in.
#'
#' @param scenNames A string vector containing the scenario names. This should
#' be the same length as `scenFolders`. The scenario names are used as
#' attributes to the data in the `Scenario` column.
#'
#' @param slotAggList The slot aggregation list. Either an object of class
#' [slot_agg_list] or a "special" list with the keyword `"all"`. If, it is
#' a [slot_agg_list], see that documentation for how to control the
#' aggregation methods used in this function. If all of the slots in an
#' entire rdf are desired, use a list of lists with each entry containing an
#' rdf file and the keyword `"all"` for the slots, e.g.,
#' `list(list(rdf = 'KeySlots.rdf',slots = 'all'))`. If this option is used,
#' the function will return raw monthly, or annual data, i.e., no aggregation
#' methods will be applied to the data in the rdf file.
#'
#' @param scenPath An absolute or relative path to the folder containing
#' `scenFolders`.
#'
#' @param oFile If not `NULL`, then an absolute or relative path with the file
#' name of the location the table will be saved to. Valid file types are
#' .csv, .txt, or .feather.
#'
#' @param findAllSlots Boolean; if `TRUE` (default), then the function will
#' abort if it cannot find a particular slot. If \code{FALSE}, then the
#' function will continue, even if a slot cannot be found. If a slot is not
#' found, then the function will return `-99` for the Trace, Year, and Value.
#'
#' @param retFile Deprecated. Data are always returned invisibly.
#'
#' @return A data.frame returned invisibly.
#'
#' @examples
#' # get a specified set of slots and apply some aggregation method to them
#' # get the data from two scenarios
#' scenFolders <- c('ISM1988_2014,2007Dems,IG,Most',
#' 'ISM1988_2014,2007Dems,IG,2002')
#' # slotAggTable.csv lists the slots to obtain, and the aggregation method to
#' # apply to them
#' slotAggList <- slot_agg_list(
#' system.file('extdata','SlotAggTable.csv',package = 'RWDataPlyr')
#' )
#' scenPath <- system.file('extdata','Scenario/',package = 'RWDataPlyr')
#' # expect Deprecated warning
#' testthat::expect_warning(
#' keyData <- getDataForAllScens(
#' scenFolders,
#' scenNames = scenFolders,
#' slotAggList = slotAggList,
#' scenPath = scenPath
#' )
#' )
#'
#' # get all of the data from the KeySlots rdf file
#' scenFolders <- scenFolders[1] # only one scenario
#' slotAggList <- list(list(rdf = 'KeySlots.rdf', slots = 'all'))
#' # will return monthly data for all slots in KeySlots.rdf
#' # expect Deprecated warning
#' testthat::expect_warning(
#' allData <- getDataForAllScens(
#' scenFolders,
#' scenNames = scenFolders,
#' slotAggList = slotAggList,
#' scenPath = scenPath
#' )
#' )
#'
#' @seealso [slot_agg_list()]
#'
#' @export
#'
getDataForAllScens <- function(scenFolders, scenNames, slotAggList, scenPath,
oFile = NULL, retFile = NULL, findAllSlots = TRUE)
{
.Deprecated(
"`rw_scen_aggregate()`",
msg = paste(
"`getDataForAllScens()` is deprecated.",
"Use `rw_scen_aggregate()` instead.",
"`rw_scen_aggregate()` provides a more user friendly way of specifying",
"and cusomizing the aggregation of RiverWare data.",
"`getDataForAllScens()` will be removed in a future release.",
sep = "\n"
)
)
# determine file type to save data as:
if (!is.null(oFile)) {
fExt <- tools::file_ext(oFile)
if (!(fExt %in% c('txt', 'csv', 'feather'))) {
stop(paste0('oFile has an invalid file exention.\n',
'getDataForAllScens does not know how to handle ".', fExt,
'" extensions.'))
}
}
if (!missing(retFile)) {
warning(
"In `getDataForAllScens()`, `retFile` is deprecated.\n",
"Data are always invisibly returned.",
call. = FALSE
)
}
scenPath <- file.path(scenPath, scenFolders)
scen <- cbind(scenPath, scenNames)
zz <- apply(scen, 1, getAndProcessAllSlots, slotAggList, findAllSlots)
zz <- do.call(rbind, lapply(zz, function(X) X))
if (!is.null(oFile))
write_rw_data(zz, oFile)
invisible(zz)
}
#' Write out csv, txt, or a feather file.
#' @noRd
write_rw_data <- function(zz, oFile)
{
fExt <- tools::file_ext(oFile)
if(fExt == 'txt'){
data.table::fwrite(zz, file = oFile, row.names = FALSE, sep = '\t')
} else if(fExt == 'csv'){
data.table::fwrite(zz, oFile, row.names = FALSE, sep = ",")
} else if(fExt == 'feather'){
feather::write_feather(zz, oFile)
}
invisible(zz)
}
| /fuzzedpackages/RWDataPlyr/R/getDataFromRdf.R | permissive | akhikolla/testpackages | R | false | false | 14,592 | r |
#' Aggregate the slot data.
#'
#' \code{processSlots} gets slot data from a rdf list and aggregates it as
#' specified.
#'
#' @param slotsAnnualize A string vector with three entries.
#' `slotsAnnualize[1]` is the slot to process. `slotsAnnualize[2]` is the
#' aggregation method to use. `slotsAnnualize[3]` is the threshold or scaling
#' factor to use. `slotsAnnualize[4]` is the variable name to use. If
#' `slotsAnnualize[4]` is `NA`, then the variable is constructed as
#' `slotsAnnualize[1]_slotsAnnualize[2]_slotsAnnualize[3]`.
#'
#' @param rdf The rdf list returned by [read.rdf()] to get the slot data from.
#'
#' @param rdfName String of the rdf name.
#'
#' @return A data frame table with the aggregated slot data.
#'
#' @keywords internal
#' @noRd
processSlots <- function(slotsAnnualize, rdf, rdfName, findAllSlots)
{
ann <- slotsAnnualize[2]
thresh <- as.numeric(slotsAnnualize[3])
# can use thresh as a scale also. If it is not specified, then multiply by 1.
thresh[is.na(thresh)] <- 1
slot <- slotsAnnualize[1]
if (!(slot %in% rdf_slot_names(rdf))) {
if (findAllSlots) {
stop(paste("slot:", slot, "not found in rdf:", rdfName))
} else {
# Trace Year Variable Value
# construct a df indicating the slot couldn't be found, and return it
zz <- data.frame(
Trace = -99,
Year = -99,
Variable = ifelse(
is.na(slotsAnnualize[4]),
paste(slotsAnnualize[1],ann,thresh,sep = '_'),
slotsAnnualize[4]
),
Value = -99
)
return(zz)
}
}
slot <- rdf_get_slot(rdf, slot)
startData <- strsplit(rdf$runs[[1]]$start, '-')[[1]] # start year
endData <- strsplit(rdf$runs[[1]]$end, '-')[[1]] # end year
yy <- seq(as.numeric(startData[1]), as.numeric(endData[1]), 1)
tsUnit <- rdf$runs[[1]]$time_step_unit # should either be 'year' or 'month'
if (!(tsUnit %in% c('month','year'))) {
stop(
'rdf: ', rdfName,
' contains data that is on a timestep other than year or month.\n',
'Currently, RWDataPlyr can only handle monthly and annual rdf data.',
call. = FALSE
)
}
if (tsUnit == 'year' & ann != 'AnnualRaw') {
# data is annual, so none of the aggregation methods besides annualRaw
# make sense
warning(
"rdf contains annual data, but the aggregation method is not 'AnnualRaw'.\n",
"Processing using 'AnnualRaw' instead.\n",
"Edit the slotAggList and call `getDataForAllScens()` again, if necessary.",
call. = FALSE
)
ann <- "AnnualRaw"
}
# XXX
# Need to add other summerization methods to this area
# XXX
# now summarize in some way
if(ann == 'AnnMin'){
slot <- apply(slot, 2, trace_min_ann) # minimum annual value
rownames(slot) <- yy
} else if(ann == 'EOWY'){
slot <- slot[seq(9, nrow(slot), 12),,drop = FALSE] # 9 is september
slot[is.nan(slot)] <- 0
slot <- slot * thresh
rownames(slot) <- yy
} else if(ann == 'EOCY'){
slot <- slot[seq(12, nrow(slot), 12),,drop = FALSE]
slot[is.nan(slot)] <- 0
slot <- slot * thresh
rownames(slot) <- yy
} else if(ann == 'BOCY'){
slot <- slot[seq(1, nrow(slot), 12),,drop = FALSE]
slot[is.nan(slot)] <- 0
slot <- slot * thresh
rownames(slot) <- yy
} else if(ann == 'AnnMax'){
slot <- apply(slot, 2, trace_max_ann) # maximum annual value
slot <- slot * thresh
rownames(slot) <- yy
} else if(ann == 'AnnualSum'){
slot <- rwslot_annual_sum(slot,thresh)
rownames(slot) <- yy
} else if(ann == 'AnnMinLTE'){
slot <- apply(slot, 2, trace_min_ann) # minimum annual value
slot <- (slot <= thresh) * 1 # convert to numeric
rownames(slot) <- yy
} else if(ann == 'Monthly'){
rownames(slot) <- as.character(
zoo::as.yearmon(yy[1] + seq(0, (length(yy) * 12) - 1) / 12)
)
slot <- slot*thresh
} else if(ann == 'WYMinLTE'){
slot <- rbind(slot[1,],slot[1,],slot[1,],slot)
slot <- slot[1:(nrow(slot)-3),, drop = FALSE]
slot <- apply(slot, 2, trace_min_ann) # minimum annual value
slot <- (slot <= thresh) * 1 # convert to numeric
rownames(slot) <- yy
} else if(ann == 'WYMaxLTE'){
slot <- rbind(slot[1,],slot[1,],slot[1,],slot)
slot <- slot[1:(nrow(slot)-3),, drop = FALSE]
slot <- apply(slot, 2, trace_max_ann) # minimum annual value
slot <- (slot <= thresh) * 1 # convert to numeric
rownames(slot) <- yy
} else if(ann == 'EOCYLTE'){
slot <- slot[seq(12, nrow(slot), 12),,drop = FALSE]
slot[is.nan(slot)] <- 0
slot <- (slot <= thresh) * 1 # convert to numeric
rownames(slot) <- yy
} else if(ann == 'EOCYGTE'){
slot <- slot[seq(12, nrow(slot), 12),, drop = FALSE]
slot[is.nan(slot)] <- 0
slot <- (slot >= thresh) * 1 # convert to numeric
rownames(slot) <- yy
} else if(ann == 'AnnualRaw'){
if(tsUnit == 'month'){
# data is monthly, so will use EOCY
warning(
"User specified aggregation is 'AnnualRaw', but the rdf contains monthly data.\n",
"Will use 'EOCY' aggregation instead.\n",
"If other aggregation method is desired, edit the slotAggList and call `getDataForAllScens()` again.",
call. = FALSE
)
slot <- slot[seq(12, nrow(slot), 12),, drop = FALSE]
slot[is.nan(slot)] <- 0
slot <- slot * thresh
rownames(slot) <- yy
} else{
# data is annual
rownames(slot) <- yy
slot <- slot*thresh
}
} else{
stop(paste0("'",ann, "'", " is an invalid aggregation method.\n",
" Fix the slot aggregation list and try again."))
}
colnames(slot) <- seq_len(ncol(slot))
if(ann != 'Monthly'){
slot <- tidyr::gather(
tibble::rownames_to_column(as.data.frame(slot), var = "Year"),
Trace,
Value,
-Year
) %>%
dplyr::mutate(
Year = as.numeric(Year),
Trace = as.integer(Trace),
Variable = dplyr::if_else(
is.na(slotsAnnualize[4]),
paste(slotsAnnualize[1],ann,thresh,sep = '_'),
slotsAnnualize[4]
)
) %>%
dplyr::select(Trace, Year, Variable, Value)
} else{
slot <- tidyr::gather(
tibble::rownames_to_column(as.data.frame(slot), var = "Month"),
Trace,
Value,
-Month
) %>%
dplyr::mutate(
Year = as.numeric(simplify2array(strsplit(Month, ' '))[2,]),
Month = month.name[match(
simplify2array(strsplit(Month, " "))[1,],
month.abb
)],
Trace = as.integer(Trace),
Variable = dplyr::if_else(
is.na(slotsAnnualize[4]),
paste(slotsAnnualize[1],ann,thresh,sep = '_'),
slotsAnnualize[4]
)
) %>%
dplyr::select(Trace, Month, Year, Variable, Value)
}
slot
}
#' Get and aggregate data from a single rdf file.
#'
#' `getSlots()` gets all of the slots contained in a single rdf file and
#' aggregates them as specified by the summary functions in `slotAggList`.
#'
#' @param slotAggList The slot aggregation list. A list containing the slots
#' that will be imported and aggregated, the aggregation method(s) to use,
#' and the rdf files that contain the slots. See [slot_agg_list()].
#' @param scenPath A relative or absolute path to the scenario folder.
#'
#' @keywords internal
#' @noRd
getSlots <- function(slotAggList, scenPath, findAllSlots)
{
rdf <- slotAggList$rdf
rdf <- read.rdf(paste(scenPath,'/',rdf,sep = ''))
if(slotAggList$slots[1] == 'all'){
# if slots is all, then need to create the slotAggList
# after reading in all the slot names
slots <- rdf_slot_names(rdf)
nSlots <- length(slots)
if(rdf$runs[[1]]$time_step_unit == 'month'){
aggMeth <- 'Monthly'
} else if (rdf$runs[[1]]$time_step_unit == 'year'){
aggMeth <- 'AnnualRaw'
} else{
stop(
paste('The', slotAggList$rdf,
'contains data of an unexpected timestep.'),
call. = FALSE
)
}
slotAggList <- slot_agg_list(cbind(
rep(slotAggList$rdf,nSlots), slots, rep(aggMeth, nSlots), rep(NA, nSlots)
))
# go in one level into the list as that is what happens when
# this function is called if using the normal slotAggList structure
slotAggList <- slotAggList[[1]]
}
slotsAnnualize <- rbind(
slotAggList$slots,
slotAggList$annualize,
slotAggList$varNames
)
allSlots <- apply(
slotsAnnualize,
2,
processSlots,
rdf,
slotAggList$rdf,
findAllSlots
)
allSlots <- do.call(rbind, lapply(allSlots, function(X) X))
allSlots
}
#' Get and aggregate data from rdf file(s) for one scenario.
#'
#' `getAndProcessAllSlots()` gets data for a single scenario. The slots
#' from each rdf are processed and aggregated together.
#'
#' @param scenPath A relative or absolute path to the scenario folder.
#'
#' @inheritParams getDataForAllScens
#'
#' @seealso \code{\link{getDataForAllScens}}
#'
#' @keywords internal
#' @noRd
getAndProcessAllSlots <- function(scenPath, slotAggList, findAllSlots)
{
sPath <- scenPath[1]
sName <- scenPath[2]
zz <- lapply(slotAggList, getSlots, sPath, findAllSlots)
allRes <- do.call(rbind, lapply(zz, function(X) X))
nn <- colnames(allRes)
allRes$Scenario <- rep(sName, nrow(allRes))
allRes <- subset(allRes, select=c('Scenario', nn))
allRes
}
#' Get and aggregate data from an rdf file(s)
#'
#' `getDataForAllScens()` gets slot data from multiple rdf files and/or multiple
#' scenarios, aggregates it, and saves it as a data.frame. The slot data can be
#' aggregated in multiple ways (see [slot_agg_list]).
#'
#' @param scenFolders A string vector containing the folder names (scenarios)
#' that the rdf files are saved in.
#'
#' @param scenNames A string vector containing the scenario names. This should
#' be the same length as `scenFolders`. The scenario names are used as
#' attributes to the data in the `Scenario` column.
#'
#' @param slotAggList The slot aggregation list. Either an object of class
#' [slot_agg_list] or a "special" list with the keyword `"all"`. If, it is
#' a [slot_agg_list], see that documentation for how to control the
#' aggregation methods used in this function. If all of the slots in an
#' entire rdf are desired, use a list of lists with each entry containing an
#' rdf file and the keyword `"all"` for the slots, e.g.,
#' `list(list(rdf = 'KeySlots.rdf',slots = 'all'))`. If this option is used,
#' the function will return raw monthly, or annual data, i.e., no aggregation
#' methods will be applied to the data in the rdf file.
#'
#' @param scenPath An absolute or relative path to the folder containing
#' `scenFolders`.
#'
#' @param oFile If not `NULL`, then an absolute or relative path with the file
#' name of the location the table will be saved to. Valid file types are
#' .csv, .txt, or .feather.
#'
#' @param findAllSlots Boolean; if `TRUE` (default), then the function will
#' abort if it cannot find a particular slot. If \code{FALSE}, then the
#' function will continue, even if a slot cannot be found. If a slot is not
#' found, then the function will return `-99` for the Trace, Year, and Value.
#'
#' @param retFile Deprecated. Data are always returned invisibly.
#'
#' @return A data.frame returned invisibly.
#'
#' @examples
#' # get a specified set of slots and apply some aggregation method to them
#' # get the data from two scenarios
#' scenFolders <- c('ISM1988_2014,2007Dems,IG,Most',
#' 'ISM1988_2014,2007Dems,IG,2002')
#' # slotAggTable.csv lists the slots to obtain, and the aggregation method to
#' # apply to them
#' slotAggList <- slot_agg_list(
#' system.file('extdata','SlotAggTable.csv',package = 'RWDataPlyr')
#' )
#' scenPath <- system.file('extdata','Scenario/',package = 'RWDataPlyr')
#' # expect Deprecated warning
#' testthat::expect_warning(
#' keyData <- getDataForAllScens(
#' scenFolders,
#' scenNames = scenFolders,
#' slotAggList = slotAggList,
#' scenPath = scenPath
#' )
#' )
#'
#' # get all of the data from the KeySlots rdf file
#' scenFolders <- scenFolders[1] # only one scenario
#' slotAggList <- list(list(rdf = 'KeySlots.rdf', slots = 'all'))
#' # will return monthly data for all slots in KeySlots.rdf
#' # expect Deprecated warning
#' testthat::expect_warning(
#' allData <- getDataForAllScens(
#' scenFolders,
#' scenNames = scenFolders,
#' slotAggList = slotAggList,
#' scenPath = scenPath
#' )
#' )
#'
#' @seealso [slot_agg_list()]
#'
#' @export
#'
getDataForAllScens <- function(scenFolders, scenNames, slotAggList, scenPath,
oFile = NULL, retFile = NULL, findAllSlots = TRUE)
{
.Deprecated(
"`rw_scen_aggregate()`",
msg = paste(
"`getDataForAllScens()` is deprecated.",
"Use `rw_scen_aggregate()` instead.",
"`rw_scen_aggregate()` provides a more user friendly way of specifying",
"and cusomizing the aggregation of RiverWare data.",
"`getDataForAllScens()` will be removed in a future release.",
sep = "\n"
)
)
# determine file type to save data as:
if (!is.null(oFile)) {
fExt <- tools::file_ext(oFile)
if (!(fExt %in% c('txt', 'csv', 'feather'))) {
stop(paste0('oFile has an invalid file exention.\n',
'getDataForAllScens does not know how to handle ".', fExt,
'" extensions.'))
}
}
if (!missing(retFile)) {
warning(
"In `getDataForAllScens()`, `retFile` is deprecated.\n",
"Data are always invisibly returned.",
call. = FALSE
)
}
scenPath <- file.path(scenPath, scenFolders)
scen <- cbind(scenPath, scenNames)
zz <- apply(scen, 1, getAndProcessAllSlots, slotAggList, findAllSlots)
zz <- do.call(rbind, lapply(zz, function(X) X))
if (!is.null(oFile))
write_rw_data(zz, oFile)
invisible(zz)
}
#' Write out csv, txt, or a feather file.
#' @noRd
write_rw_data <- function(zz, oFile)
{
fExt <- tools::file_ext(oFile)
if(fExt == 'txt'){
data.table::fwrite(zz, file = oFile, row.names = FALSE, sep = '\t')
} else if(fExt == 'csv'){
data.table::fwrite(zz, oFile, row.names = FALSE, sep = ",")
} else if(fExt == 'feather'){
feather::write_feather(zz, oFile)
}
invisible(zz)
}
|
## GET NEW CANDIDATE SOLUTION THAT MEETS TABOOLIST CRITERIA
getnewcandidate <- function(grid, taboo, taboolistlength, uniquepreprocessors, copyofcurrentbest){
repeatcounter <- 0
set.seed(as.numeric(Sys.time()))
repeat{
# sample a random phase (phase number) and a random preproccessor (preprocessor number)
candidate_phase <- sample(1:ncol(grid@grid),1)
#candidate_preprocessor <- sample(1:length(uniquepreprocessors[[candidate_phase]]), 1)
## Experimental
currentpreprocessorinphase <- unlist(copyofcurrentbest[,candidate_phase])
allpreprocessorsinphase <- as.character(unlist(uniquepreprocessors[[candidate_phase]]))
leftpreprocessorsinphase <- allpreprocessorsinphase[-match(currentpreprocessorinphase, allpreprocessorsinphase)]
candidate_preprocessor <- sample(leftpreprocessorsinphase,1)
#candidate_preprocessor <- unlist(grid@grid[candidate_preprocessor, candidate_phase])
candidate_new <- copyofcurrentbest
# place new random preprocessing technique to the random phase of the current best solution
candidate_new[,candidate_phase] <- candidate_preprocessor
# test that the new candidate is NOT in the taboolist
# at minimum it can not be the current best solution, corresponding to taboo list length 1
condition1 <- lapply(utils::tail(taboo,taboolistlength), function(x) identical(unname(unlist(candidate_new)),unname(unlist(x))))
condition2 <- all(unlist(condition1)==FALSE)
repeatcounter <- repeatcounter +1
if (repeatcounter > 100 ) {stop("The system was not able to find a solution candidate that is not in the taboo list")}
if(condition2==TRUE) {
break}
}
return(candidate_new)
}
| /R/04ModificationComponent.R | no_license | mvattulainen/metaheur | R | false | false | 1,661 | r |
## GET NEW CANDIDATE SOLUTION THAT MEETS TABOOLIST CRITERIA
getnewcandidate <- function(grid, taboo, taboolistlength, uniquepreprocessors, copyofcurrentbest){
repeatcounter <- 0
set.seed(as.numeric(Sys.time()))
repeat{
# sample a random phase (phase number) and a random preproccessor (preprocessor number)
candidate_phase <- sample(1:ncol(grid@grid),1)
#candidate_preprocessor <- sample(1:length(uniquepreprocessors[[candidate_phase]]), 1)
## Experimental
currentpreprocessorinphase <- unlist(copyofcurrentbest[,candidate_phase])
allpreprocessorsinphase <- as.character(unlist(uniquepreprocessors[[candidate_phase]]))
leftpreprocessorsinphase <- allpreprocessorsinphase[-match(currentpreprocessorinphase, allpreprocessorsinphase)]
candidate_preprocessor <- sample(leftpreprocessorsinphase,1)
#candidate_preprocessor <- unlist(grid@grid[candidate_preprocessor, candidate_phase])
candidate_new <- copyofcurrentbest
# place new random preprocessing technique to the random phase of the current best solution
candidate_new[,candidate_phase] <- candidate_preprocessor
# test that the new candidate is NOT in the taboolist
# at minimum it can not be the current best solution, corresponding to taboo list length 1
condition1 <- lapply(utils::tail(taboo,taboolistlength), function(x) identical(unname(unlist(candidate_new)),unname(unlist(x))))
condition2 <- all(unlist(condition1)==FALSE)
repeatcounter <- repeatcounter +1
if (repeatcounter > 100 ) {stop("The system was not able to find a solution candidate that is not in the taboo list")}
if(condition2==TRUE) {
break}
}
return(candidate_new)
}
|
unemployment_all<-ts(read.csv("./Data/unemployment_all.csv"), freq=12, start=c(2000, 1))
unemployment_males<-ts(read.csv("./Data/unemployment_males.csv"), freq=12, start=c(2000, 1))
unemployment_females<-ts(read.csv("./Data/unemployment_females.csv"), freq=12, start=c(2000, 1))
| /R files/unemployment.R | no_license | palatej/ESTP | R | false | false | 279 | r | unemployment_all<-ts(read.csv("./Data/unemployment_all.csv"), freq=12, start=c(2000, 1))
unemployment_males<-ts(read.csv("./Data/unemployment_males.csv"), freq=12, start=c(2000, 1))
unemployment_females<-ts(read.csv("./Data/unemployment_females.csv"), freq=12, start=c(2000, 1))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FuncionesAcceso.R
\name{vForecast}
\alias{vForecast}
\title{Extender series con proyecciones de auto.arima (paquete Forecast)}
\usage{
vForecast(SERIE, N = 6, ...)
}
\arguments{
\item{SERIE}{XTS a extender}
\item{N}{Cantidad de períodos a extender (detecta automáticamente la frecuencia de la serie)}
\item{...}{Otros parámetros para \code{auto.arima}}
}
\value{
XTS con la series expandidas, acepta xts con muchas series
}
\description{
Recomendado sólo para estimaciones rápidas. A diferencia de \code{Forecast}, no devuelve intervalos de confianza, pero acepta
como input un XTS con múltiples series de tiempo.
}
\examples{
\donttest{
#' # Forecast de 12 meses del tipo de cambio
TCN <- vForecast(Get("120.1_PCE_1993_0_24,120.1_ED1_1993_0_26", start_date = 2010),12)
}
}
| /man/vForecast.Rd | no_license | fmgarciadiaz/PortalHacienda-CRAN | R | false | true | 860 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FuncionesAcceso.R
\name{vForecast}
\alias{vForecast}
\title{Extender series con proyecciones de auto.arima (paquete Forecast)}
\usage{
vForecast(SERIE, N = 6, ...)
}
\arguments{
\item{SERIE}{XTS a extender}
\item{N}{Cantidad de períodos a extender (detecta automáticamente la frecuencia de la serie)}
\item{...}{Otros parámetros para \code{auto.arima}}
}
\value{
XTS con la series expandidas, acepta xts con muchas series
}
\description{
Recomendado sólo para estimaciones rápidas. A diferencia de \code{Forecast}, no devuelve intervalos de confianza, pero acepta
como input un XTS con múltiples series de tiempo.
}
\examples{
\donttest{
#' # Forecast de 12 meses del tipo de cambio
TCN <- vForecast(Get("120.1_PCE_1993_0_24,120.1_ED1_1993_0_26", start_date = 2010),12)
}
}
|
library(liquidSVM)
### Name: liquidSVM-package
### Title: liquidSVM for R
### Aliases: liquidSVM-package liquidSVM
### Keywords: SVM
### ** Examples
set.seed(123)
## Multiclass classification
modelIris <- svm(Species ~ ., iris)
y <- predict(modelIris, iris)
## Least Squares
modelTrees <- svm(Height ~ Girth + Volume, trees)
y <- predict(modelTrees, trees)
plot(trees$Height, y)
test(modelTrees, trees)
## Quantile regression
modelTrees <- qtSVM(Height ~ Girth + Volume, trees, scale=TRUE)
y <- predict(modelTrees, trees)
## ROC curve
modelWarpbreaks <- rocSVM(wool ~ ., warpbreaks, scale=TRUE)
y <- test(modelWarpbreaks, warpbreaks)
plotROC(y,warpbreaks$wool)
| /data/genthat_extracted_code/liquidSVM/examples/liquidSVM-package.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 671 | r | library(liquidSVM)
### Name: liquidSVM-package
### Title: liquidSVM for R
### Aliases: liquidSVM-package liquidSVM
### Keywords: SVM
### ** Examples
set.seed(123)
## Multiclass classification
modelIris <- svm(Species ~ ., iris)
y <- predict(modelIris, iris)
## Least Squares
modelTrees <- svm(Height ~ Girth + Volume, trees)
y <- predict(modelTrees, trees)
plot(trees$Height, y)
test(modelTrees, trees)
## Quantile regression
modelTrees <- qtSVM(Height ~ Girth + Volume, trees, scale=TRUE)
y <- predict(modelTrees, trees)
## ROC curve
modelWarpbreaks <- rocSVM(wool ~ ., warpbreaks, scale=TRUE)
y <- test(modelWarpbreaks, warpbreaks)
plotROC(y,warpbreaks$wool)
|
### Sampling Schemes
library(PsyFuns)
# The reasons are modeled after Wichman,Hill paper and book
# Sampling schemes are defined in ordinal values for inner function
# They are generated by INNER WEIBULL FUNCTION: F = 1 - e^((x/a)^b)
# with parameters: a=8.85/((log(2)^(1/3))), b=3
pars <- c(8.85/((log(2)^(1/3))), 3)
OrdToAbs <- function(ordinates){
PsyFuns:::polynom.inverse_x.cdf(
PsyFuns:::exponential.inverse.cdf(
ordinates),
pars)
}
#midpoint <- function(x){(x[1] + x[length(x)])/2}
# performance threshold is set to 8.85
perf_th <- OrdToAbs(0.5)
perf_th
#
#Interquartile range is set to 4.5
iqr <- OrdToAbs(0.75) - OrdToAbs(0.25)
iqr
#
# Width range is set to 10.7
width <- OrdToAbs(0.95) - OrdToAbs(0.05)
width
#s1 - symetric close
# Ordinal values of s1 (0.3,0.4,0.48,0.52,0.6,0.7)
o1 <- c(0.3,0.4,0.48,0.52,0.6,0.7)
x1 <- OrdToAbs(o1)
x1 <- round(x1,2)
x1
#s2 - symetric apart
# Ordinal values of s2 (0.1,0.3,0.40,0.6,0.7,0.9)
o2 <- c(0.1,0.3,0.40,0.6,0.7,0.9)
x2 <- OrdToAbs(o2)
x2 <- round(x2,2)
x2
#s3 - nonsymetric skewed towards high performance
# Ordinal values of s3 (0.3,0.44,0.7,0.8,0.9,0.98)
o3 <- c(0.3,0.44,0.7,0.8,0.9,0.98)
x3 <- OrdToAbs(o3)
x3 <- round(x3,2)
x3
#s4 - nonsymetric skewed towards low performance
# Ordinal values of s4 (0.1,0.2,0.3,0.4,0.5,0.6)
o4 <- c(0.1,0.2,0.3,0.4,0.5,0.6)
x4 <- OrdToAbs(o4)
x4 <- round(x4,2)
x4
#s5 - nonsymetric skewed towards high performance
# Ordinal values of s5 (0.08,0.18,0.28,0.70,0.85,0.99)
o5 <- c(0.08,0.18,0.28,0.70,0.85,0.99)
x5 <- OrdToAbs(o5)
x5 <- round(x5,2)
x5
#s6 - nonsymetric closely disperse, one outlier
# Ordinal values of s6 (0.3,0.4,0.5,0.6,0.7,0.99)
o6 <- c(0.3,0.4,0.5,0.6,0.7,0.99)
x6 <- OrdToAbs(o6)
x6 <- round(x6,2)
x6
#s5 - nonsymetric skewed towards high performance
# Ordinal values of s7 (0.34,0.44,0.54,0.8,0.9,0.98)
o7 <- c(0.34,0.44,0.54,0.8,0.9,0.98)
x7 <- OrdToAbs(o7)
x7 <- round(x7,2)
x7
| /SamplingSchemes_AFC.R | no_license | LuchTiarna/ThesisScripts | R | false | false | 1,927 | r | ### Sampling Schemes
library(PsyFuns)
# The reasons are modeled after Wichman,Hill paper and book
# Sampling schemes are defined in ordinal values for inner function
# They are generated by INNER WEIBULL FUNCTION: F = 1 - e^((x/a)^b)
# with parameters: a=8.85/((log(2)^(1/3))), b=3
pars <- c(8.85/((log(2)^(1/3))), 3)
OrdToAbs <- function(ordinates){
PsyFuns:::polynom.inverse_x.cdf(
PsyFuns:::exponential.inverse.cdf(
ordinates),
pars)
}
#midpoint <- function(x){(x[1] + x[length(x)])/2}
# performance threshold is set to 8.85
perf_th <- OrdToAbs(0.5)
perf_th
#
#Interquartile range is set to 4.5
iqr <- OrdToAbs(0.75) - OrdToAbs(0.25)
iqr
#
# Width range is set to 10.7
width <- OrdToAbs(0.95) - OrdToAbs(0.05)
width
#s1 - symetric close
# Ordinal values of s1 (0.3,0.4,0.48,0.52,0.6,0.7)
o1 <- c(0.3,0.4,0.48,0.52,0.6,0.7)
x1 <- OrdToAbs(o1)
x1 <- round(x1,2)
x1
#s2 - symetric apart
# Ordinal values of s2 (0.1,0.3,0.40,0.6,0.7,0.9)
o2 <- c(0.1,0.3,0.40,0.6,0.7,0.9)
x2 <- OrdToAbs(o2)
x2 <- round(x2,2)
x2
#s3 - nonsymetric skewed towards high performance
# Ordinal values of s3 (0.3,0.44,0.7,0.8,0.9,0.98)
o3 <- c(0.3,0.44,0.7,0.8,0.9,0.98)
x3 <- OrdToAbs(o3)
x3 <- round(x3,2)
x3
#s4 - nonsymetric skewed towards low performance
# Ordinal values of s4 (0.1,0.2,0.3,0.4,0.5,0.6)
o4 <- c(0.1,0.2,0.3,0.4,0.5,0.6)
x4 <- OrdToAbs(o4)
x4 <- round(x4,2)
x4
#s5 - nonsymetric skewed towards high performance
# Ordinal values of s5 (0.08,0.18,0.28,0.70,0.85,0.99)
o5 <- c(0.08,0.18,0.28,0.70,0.85,0.99)
x5 <- OrdToAbs(o5)
x5 <- round(x5,2)
x5
#s6 - nonsymetric closely disperse, one outlier
# Ordinal values of s6 (0.3,0.4,0.5,0.6,0.7,0.99)
o6 <- c(0.3,0.4,0.5,0.6,0.7,0.99)
x6 <- OrdToAbs(o6)
x6 <- round(x6,2)
x6
#s5 - nonsymetric skewed towards high performance
# Ordinal values of s7 (0.34,0.44,0.54,0.8,0.9,0.98)
o7 <- c(0.34,0.44,0.54,0.8,0.9,0.98)
x7 <- OrdToAbs(o7)
x7 <- round(x7,2)
x7
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inverseMatrix <- NULL
set <- function(y){
x <<- y
inverseMatrix <<- NULL
}
get <- function() x
setinverse <- function(inverse) inverseMatrix <<- inverse
getinverse <- function() inverseMatrix
list(set = set, get = get,
getinverse = getinverse,
setinverse = setinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverseMatrix <- x$getinverse()
if(!is.null(inverseMatrix)){
message("getting cached inverse matrix")
return(inverseMatrix)
}
matr <- x$get()
inverseMatrix <- solve(matr)
x$setinverse(inverseMatrix)
inverseMatrix
}
| /cachematrix.R | no_license | Nishara97/ProgrammingAssignment2 | R | false | false | 1,004 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inverseMatrix <- NULL
set <- function(y){
x <<- y
inverseMatrix <<- NULL
}
get <- function() x
setinverse <- function(inverse) inverseMatrix <<- inverse
getinverse <- function() inverseMatrix
list(set = set, get = get,
getinverse = getinverse,
setinverse = setinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverseMatrix <- x$getinverse()
if(!is.null(inverseMatrix)){
message("getting cached inverse matrix")
return(inverseMatrix)
}
matr <- x$get()
inverseMatrix <- solve(matr)
x$setinverse(inverseMatrix)
inverseMatrix
}
|
#' @title Compute the Transcriptome Divergence Index (TDI)
#' @description This function computes the sequence distance based transcriptome divergence index (TDI) introduced by
#' Quint et al., 2012.
#' @param DivergenceExpressionSet a standard PhyloExpressionSet or DivergenceExpressionSet object.
#' @details
#'
#' The TDI measure represents the weighted arithmetic mean (expression levels as
#' weights for the divergence-stratum value) over all gene divergence categories denoted as \emph{divergence-strata}.
#'
#'
#' \deqn{TDI_s = \sum (e_is * ds_i) / \sum e_is}
#'
#' where TDI_s denotes the TDI value in developmental stage s, e_is denotes the gene expression level of gene i in stage s, and ds_i denotes the corresponding divergence-stratum of gene i, \eqn{i = 1,...,N} and N = total number of genes.
#'
#' Internally the function is written in C++ to speed up TDI computations.
#' @return a numeric vector containing the TDI values for all given developmental stages.
#' @references
#' Quint M et al. (2012). \emph{A transcriptomic hourglass in plant embryogenesis}. Nature (490): 98-101.
#'
#' Drost HG et al. (2015). \emph{Evidence for Active Maintenance of Phylotranscriptomic Hourglass Patterns in Animal and Plant Embryogenesis}. Mol Biol Evol. 32 (5): 1221-1231 doi:10.1093/molbev/msv012.
#'
#' @author Hajk-Georg Drost
#' @seealso \code{\link{TAI}}, \code{\link{PlotPattern}}, \code{\link{FlatLineTest}}, \code{\link{ReductiveHourglassTest}}
#' @examples
#'
#' # reading a standard DivergenceExpressionSet
#' data(DivergenceExpressionSetExample)
#'
#' # computing the TDI profile of a given DivergenceExpressionSet object
#' TDIs <- TDI(DivergenceExpressionSetExample)
#'
#'
#' @export
TDI <- function(DivergenceExpressionSet)
{
is.ExpressionSet(DivergenceExpressionSet)
nCols <- dim(DivergenceExpressionSet)[2]
ExpressionMatrix <- dplyr::select(DivergenceExpressionSet, 3:ncol(DivergenceExpressionSet))
Divergencestratum <- unlist(dplyr::select(DivergenceExpressionSet, 1))
TDIProfile <- vector(mode = "numeric", length = nCols - 2)
TDIProfile <- cpp_TAI(as.matrix(ExpressionMatrix), as.vector(Divergencestratum))
names(TDIProfile) <- names(ExpressionMatrix)
return(TDIProfile)
}
| /R/TDI.R | no_license | cnyuanh/myTAI | R | false | false | 2,325 | r | #' @title Compute the Transcriptome Divergence Index (TDI)
#' @description This function computes the sequence distance based transcriptome divergence index (TDI) introduced by
#' Quint et al., 2012.
#' @param DivergenceExpressionSet a standard PhyloExpressionSet or DivergenceExpressionSet object.
#' @details
#'
#' The TDI measure represents the weighted arithmetic mean (expression levels as
#' weights for the divergence-stratum value) over all gene divergence categories denoted as \emph{divergence-strata}.
#'
#'
#' \deqn{TDI_s = \sum (e_is * ds_i) / \sum e_is}
#'
#' where TDI_s denotes the TDI value in developmental stage s, e_is denotes the gene expression level of gene i in stage s, and ds_i denotes the corresponding divergence-stratum of gene i, \eqn{i = 1,...,N} and N = total number of genes.
#'
#' Internally the function is written in C++ to speed up TDI computations.
#' @return a numeric vector containing the TDI values for all given developmental stages.
#' @references
#' Quint M et al. (2012). \emph{A transcriptomic hourglass in plant embryogenesis}. Nature (490): 98-101.
#'
#' Drost HG et al. (2015). \emph{Evidence for Active Maintenance of Phylotranscriptomic Hourglass Patterns in Animal and Plant Embryogenesis}. Mol Biol Evol. 32 (5): 1221-1231 doi:10.1093/molbev/msv012.
#'
#' @author Hajk-Georg Drost
#' @seealso \code{\link{TAI}}, \code{\link{PlotPattern}}, \code{\link{FlatLineTest}}, \code{\link{ReductiveHourglassTest}}
#' @examples
#'
#' # reading a standard DivergenceExpressionSet
#' data(DivergenceExpressionSetExample)
#'
#' # computing the TDI profile of a given DivergenceExpressionSet object
#' TDIs <- TDI(DivergenceExpressionSetExample)
#'
#'
#' @export
TDI <- function(DivergenceExpressionSet)
{
is.ExpressionSet(DivergenceExpressionSet)
nCols <- dim(DivergenceExpressionSet)[2]
ExpressionMatrix <- dplyr::select(DivergenceExpressionSet, 3:ncol(DivergenceExpressionSet))
Divergencestratum <- unlist(dplyr::select(DivergenceExpressionSet, 1))
TDIProfile <- vector(mode = "numeric", length = nCols - 2)
TDIProfile <- cpp_TAI(as.matrix(ExpressionMatrix), as.vector(Divergencestratum))
names(TDIProfile) <- names(ExpressionMatrix)
return(TDIProfile)
}
|
#' Display the abstract syntax tree
#'
#' This is a useful alterantive to `str()` for expression objects.
#'
#' @param x A language object to display.
#' @export
#' @examples
#' # Leaves
#' ast(1)
#' ast(x)
#'
#' # Simple calls
#' ast(f())
#' ast(f(x, 1, g(), h(i())))
#' ast(f()())
#' ast(f(x)(y))
#'
#' ast((x + 1))
#'
#' # All operations have this same structure
#' ast(if (TRUE) 3 else 4)
#' ast(y <- x * 10)
#' ast(function(x = 1, y = 2) { x + y } )
#'
#' # Operator precedence
#' ast(1 * 2 + 3)
#' ast(!1 + !1)
ast <- function(x) {
expr <- enexpr(x)
new_raw(ast_tree(expr))
}
ast_tree <- function(x, layout = box_chars()) {
if (is_quosure(x)) {
x <- quo_expr(x)
}
# base cases
if (rlang::is_syntactic_literal(x)) {
return(ast_leaf_constant(x))
} else if (is_symbol(x)) {
return(ast_leaf_symbol(x))
} else if (!is.pairlist(x) && !is.call(x)) {
return(paste0("<inline ", paste0(class(x), collapse = "/"), ">"))
}
# recursive case
subtrees <- lapply(x, ast_tree, layout = layout)
subtrees <- name_subtree(subtrees)
n <- length(x)
if (n == 0) {
character()
} else if (n == 1) {
str_indent(subtrees[[1]],
paste0(layout$n, layout$h),
" "
)
} else {
c(
str_indent(subtrees[[1]],
paste0(layout$n, layout$h),
paste0(layout$v, " ")
),
unlist(lapply(subtrees[-c(1, n)],
str_indent,
paste0(layout$j, layout$h),
paste0(layout$v, " ")
)),
str_indent(subtrees[[n]],
paste0(layout$l, layout$h),
" "
)
)
}
}
name_subtree <- function(x) {
nm <- names(x)
if (is.null(nm))
return(x)
has_name <- nm != ""
label <- paste0(crayon::italic(grey(nm)), " = ")
indent <- str_dup(" ", nchar(nm) + 3)
x[has_name] <- Map(str_indent, x[has_name], label[has_name], indent[has_name])
x
}
ast_leaf_symbol <- function(x) {
x <- as.character(x)
if (!is.syntactic(x)) {
x <- encodeString(x, quote = "`")
}
crayon::bold(crayon::magenta(x))
}
ast_leaf_constant <- function(x) {
if (is.character(x)) {
encodeString(x, quote = '"')
} else {
as.character(x)
}
}
is.syntactic <- function(x) make.names(x) == x
| /R/ast.R | no_license | grearte/lobstr | R | false | false | 2,204 | r | #' Display the abstract syntax tree
#'
#' This is a useful alterantive to `str()` for expression objects.
#'
#' @param x A language object to display.
#' @export
#' @examples
#' # Leaves
#' ast(1)
#' ast(x)
#'
#' # Simple calls
#' ast(f())
#' ast(f(x, 1, g(), h(i())))
#' ast(f()())
#' ast(f(x)(y))
#'
#' ast((x + 1))
#'
#' # All operations have this same structure
#' ast(if (TRUE) 3 else 4)
#' ast(y <- x * 10)
#' ast(function(x = 1, y = 2) { x + y } )
#'
#' # Operator precedence
#' ast(1 * 2 + 3)
#' ast(!1 + !1)
ast <- function(x) {
expr <- enexpr(x)
new_raw(ast_tree(expr))
}
ast_tree <- function(x, layout = box_chars()) {
if (is_quosure(x)) {
x <- quo_expr(x)
}
# base cases
if (rlang::is_syntactic_literal(x)) {
return(ast_leaf_constant(x))
} else if (is_symbol(x)) {
return(ast_leaf_symbol(x))
} else if (!is.pairlist(x) && !is.call(x)) {
return(paste0("<inline ", paste0(class(x), collapse = "/"), ">"))
}
# recursive case
subtrees <- lapply(x, ast_tree, layout = layout)
subtrees <- name_subtree(subtrees)
n <- length(x)
if (n == 0) {
character()
} else if (n == 1) {
str_indent(subtrees[[1]],
paste0(layout$n, layout$h),
" "
)
} else {
c(
str_indent(subtrees[[1]],
paste0(layout$n, layout$h),
paste0(layout$v, " ")
),
unlist(lapply(subtrees[-c(1, n)],
str_indent,
paste0(layout$j, layout$h),
paste0(layout$v, " ")
)),
str_indent(subtrees[[n]],
paste0(layout$l, layout$h),
" "
)
)
}
}
name_subtree <- function(x) {
nm <- names(x)
if (is.null(nm))
return(x)
has_name <- nm != ""
label <- paste0(crayon::italic(grey(nm)), " = ")
indent <- str_dup(" ", nchar(nm) + 3)
x[has_name] <- Map(str_indent, x[has_name], label[has_name], indent[has_name])
x
}
ast_leaf_symbol <- function(x) {
x <- as.character(x)
if (!is.syntactic(x)) {
x <- encodeString(x, quote = "`")
}
crayon::bold(crayon::magenta(x))
}
ast_leaf_constant <- function(x) {
if (is.character(x)) {
encodeString(x, quote = '"')
} else {
as.character(x)
}
}
is.syntactic <- function(x) make.names(x) == x
|
# In this script we do an MRMC analysis of the auc for each scanner
# (OR method: Obuchowski and Rockette, Obuchowski1995_Commun-Stat-Simulat_v24p285).
# Since the data is binary, auc is the average of sensitivity and specificity
# or half of (Youden's index + 1). Sensitivity is defined as the number of
# MFs detected by an observer divided by the number of true MFs.
# Specificity is defined as one minus the false-positive fraction,
# where the false-positive fraction is the number of false MFs that were positively marked,
# divided by the total number of false MFs.
# Furthermore, we account for the fact that there are multiple observations per case
# (multiple ROIs per WSI, clustered data: Obuchowski1997_Biometrics_v53p567)
# when calculating the reader by modality covariances that are used in the OR method.
#
# The results of this script yield:
# Table 4: Accuracy for all readers and observation methods
# Figure 3: Accuracy (average of sensitivity and specificity) for each viewing mode
# averaged over all the readers with 95% confidence intervals. The asterisks indicate
# that the difference in accuracy of the viewing mode compared to that of microscopy
# is statistically significant. All analyses account for the correlations and variability
# from the readers reading the same ROIs.
# Initialize functions ####
library(mitoticFigureCounts)
doMRMCaucORcluster <- function(df) {
modalities <- levels(df$modalityID)
nModalities <- nlevels(df$modalityID)
readers <- levels(df$readerID)
nReaders <- nlevels(df$readerID)
# Split the data frame by readers and modalities
df.byModalityReader <- split(df, list(df$readerID, df$modalityID))
# Calculate covariances for each reader/modality combination ####
auc <- vector(mode = "numeric", nModalities*nReaders)
cov <- matrix(-1.0,
nrow = nModalities*nReaders,
ncol = nModalities*nReaders)
for (i in 1:(nModalities*nReaders)) {
for (j in i:(nModalities*nReaders)) {
print(i)
print(j)
df.merge <- merge(df.byModalityReader[[i]],
df.byModalityReader[[j]],
by = "targetID", all = TRUE)
result <- doAUCcluster(
predictor1 = df.merge$score.x,
predictor2 = df.merge$score.y,
response = df.merge$truth.x,
clusterID = df.merge$caseID.x,
alpha = 0.05)
cov[i,j] <- (result$auc.var.A + result$auc.var.B
- result$auc.var.AminusB)/2
cov[j,i] <- cov[i,j]
}
auc[i] <- result$auc.A
}
# mrmcAnalysisOR ####
auc <- matrix(auc, nrow = 2, ncol = nReaders, byrow = TRUE)
aucMTG.OR.new <- mrmcAnalysisOR(auc, cov)
aucMTG.OR.new$botCI <- aucMTG.OR.new$theta.i - qt(0.975, df = aucMTG.OR.new$df.sgl) * aucMTG.OR.new$se.i
aucMTG.OR.new$topCI <- aucMTG.OR.new$theta.i + qt(0.975, df = aucMTG.OR.new$df.sgl) * aucMTG.OR.new$se.i
print(aucMTG.OR.new)
}
# Initialize data ####
df.orig <- mitoticFigureCounts::dfClassify20180627
# Convert data to list-mode
readers <- c(
"observer.1",
"observer.2",
"observer.3",
"observer.4",
"observer.5"
)
df.convert <- convertDF(df.orig, "matrixWithTruth", "listWithTruth", readers, nameTruth)
df.convert$caseID <- df.convert$targetID
df.convert$readerID <- factor(df.convert$readerID)
df.convert$locationID <- df.convert$roiID
df.convert$modalityID <- factor(df.convert$modalityID)
# Split the data by modality
df.convert <- split(df.convert, df.convert$modalityID)
# Analyze modality.A ####
start <- proc.time()
df.A <- rbind(df.convert$microscope, df.convert$scanner.A)
df.A$modalityID <- factor(df.A$modalityID)
result.A <- doMRMCaucORcluster(df.A)
finish <- proc.time()
print(finish - start)
# Analyze modality.B ####
start <- proc.time()
df.B <- rbind(df.convert$microscope, df.convert$scanner.B)
df.B$modalityID <- factor(df.B$modalityID)
result.B <- doMRMCaucORcluster(df.B)
finish <- proc.time()
print(finish - start)
# Analyze modality.C ####
start <- proc.time()
df.C <- rbind(df.convert$microscope, df.convert$scanner.C)
df.C$modalityID <- factor(df.C$modalityID)
result.C <- doMRMCaucORcluster(df.C)
finish <- proc.time()
print(finish - start)
# Analyze modality.D ####
start <- proc.time()
df.D <- rbind(df.convert$microscope, df.convert$scanner.D)
df.D$modalityID <- factor(df.D$modalityID)
result.D <- doMRMCaucORcluster(df.D)
finish <- proc.time()
print(finish - start)
aucMRMCcluster <- list(result.A, result.B, result.C, result.D)
names(aucMRMCcluster) <- c("ScannerA", "ScannerB", "ScannerC", "ScannerD")
usethis::use_data(aucMRMCcluster, overwrite = TRUE)
| /inst/extra/docs/05_doMRMCaucORcluster.R | permissive | DIDSR/mitoticFigureCounts | R | false | false | 4,763 | r |
# In this script we do an MRMC analysis of the auc for each scanner
# (OR method: Obuchowski and Rockette, Obuchowski1995_Commun-Stat-Simulat_v24p285).
# Since the data is binary, auc is the average of sensitivity and specificity
# or half of (Youden's index + 1). Sensitivity is defined as the number of
# MFs detected by an observer divided by the number of true MFs.
# Specificity is defined as one minus the false-positive fraction,
# where the false-positive fraction is the number of false MFs that were positively marked,
# divided by the total number of false MFs.
# Furthermore, we account for the fact that there are multiple observations per case
# (multiple ROIs per WSI, clustered data: Obuchowski1997_Biometrics_v53p567)
# when calculating the reader by modality covariances that are used in the OR method.
#
# The results of this script yield:
# Table 4: Accuracy for all readers and observation methods
# Figure 3: Accuracy (average of sensitivity and specificity) for each viewing mode
# averaged over all the readers with 95% confidence intervals. The asterisks indicate
# that the difference in accuracy of the viewing mode compared to that of microscopy
# is statistically significant. All analyses account for the correlations and variability
# from the readers reading the same ROIs.
# Initialize functions ####
library(mitoticFigureCounts)
doMRMCaucORcluster <- function(df) {
modalities <- levels(df$modalityID)
nModalities <- nlevels(df$modalityID)
readers <- levels(df$readerID)
nReaders <- nlevels(df$readerID)
# Split the data frame by readers and modalities
df.byModalityReader <- split(df, list(df$readerID, df$modalityID))
# Calculate covariances for each reader/modality combination ####
auc <- vector(mode = "numeric", nModalities*nReaders)
cov <- matrix(-1.0,
nrow = nModalities*nReaders,
ncol = nModalities*nReaders)
for (i in 1:(nModalities*nReaders)) {
for (j in i:(nModalities*nReaders)) {
print(i)
print(j)
df.merge <- merge(df.byModalityReader[[i]],
df.byModalityReader[[j]],
by = "targetID", all = TRUE)
result <- doAUCcluster(
predictor1 = df.merge$score.x,
predictor2 = df.merge$score.y,
response = df.merge$truth.x,
clusterID = df.merge$caseID.x,
alpha = 0.05)
cov[i,j] <- (result$auc.var.A + result$auc.var.B
- result$auc.var.AminusB)/2
cov[j,i] <- cov[i,j]
}
auc[i] <- result$auc.A
}
# mrmcAnalysisOR ####
auc <- matrix(auc, nrow = 2, ncol = nReaders, byrow = TRUE)
aucMTG.OR.new <- mrmcAnalysisOR(auc, cov)
aucMTG.OR.new$botCI <- aucMTG.OR.new$theta.i - qt(0.975, df = aucMTG.OR.new$df.sgl) * aucMTG.OR.new$se.i
aucMTG.OR.new$topCI <- aucMTG.OR.new$theta.i + qt(0.975, df = aucMTG.OR.new$df.sgl) * aucMTG.OR.new$se.i
print(aucMTG.OR.new)
}
# Initialize data ####
df.orig <- mitoticFigureCounts::dfClassify20180627
# Convert data to list-mode
readers <- c(
"observer.1",
"observer.2",
"observer.3",
"observer.4",
"observer.5"
)
df.convert <- convertDF(df.orig, "matrixWithTruth", "listWithTruth", readers, nameTruth)
df.convert$caseID <- df.convert$targetID
df.convert$readerID <- factor(df.convert$readerID)
df.convert$locationID <- df.convert$roiID
df.convert$modalityID <- factor(df.convert$modalityID)
# Split the data by modality
df.convert <- split(df.convert, df.convert$modalityID)
# Analyze modality.A ####
start <- proc.time()
df.A <- rbind(df.convert$microscope, df.convert$scanner.A)
df.A$modalityID <- factor(df.A$modalityID)
result.A <- doMRMCaucORcluster(df.A)
finish <- proc.time()
print(finish - start)
# Analyze modality.B ####
start <- proc.time()
df.B <- rbind(df.convert$microscope, df.convert$scanner.B)
df.B$modalityID <- factor(df.B$modalityID)
result.B <- doMRMCaucORcluster(df.B)
finish <- proc.time()
print(finish - start)
# Analyze modality.C ####
start <- proc.time()
df.C <- rbind(df.convert$microscope, df.convert$scanner.C)
df.C$modalityID <- factor(df.C$modalityID)
result.C <- doMRMCaucORcluster(df.C)
finish <- proc.time()
print(finish - start)
# Analyze modality.D ####
start <- proc.time()
df.D <- rbind(df.convert$microscope, df.convert$scanner.D)
df.D$modalityID <- factor(df.D$modalityID)
result.D <- doMRMCaucORcluster(df.D)
finish <- proc.time()
print(finish - start)
aucMRMCcluster <- list(result.A, result.B, result.C, result.D)
names(aucMRMCcluster) <- c("ScannerA", "ScannerB", "ScannerC", "ScannerD")
usethis::use_data(aucMRMCcluster, overwrite = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/indices.R
\name{ineq_cov}
\alias{ineq_cov}
\title{ineq_cov}
\usage{
ineq_cov(age, dx, ex, ax, distribution_type = c("aad", "rl"), check = TRUE)
}
\arguments{
\item{age}{numeric. vector of lower age bounds.}
\item{dx}{numeric. vector of the lifetable death distribution.}
\item{ex}{numeric. vector of remaining life expectancy.}
\item{ax}{numeric. vector of the average time spent in the age interval of those dying within the interval.}
\item{distribution_type}{character. Either \code{"aad"} (age at death) or \code{"rl"} (remaining life)}
\item{check}{logical. Shall we perform basic checks on input vectors? Default TRUE}
}
\description{
Calculate a lifetable column for the conditional coefficient of variation in lifetable ages at death
}
\details{
All input vectors must be the same length. Also, we recommend using input data from a life table by single year of age with a highest age group of at least age 110. If your data have a lower upper age bound, consider extrapolation methods, for instance a parametric Kannisto model (implemented in \code{MortalityLaws::MortalityLaw}). If your data are abridged, consider first smoothing over age, and calculating a life table by single year of age (for instance by smoothing with a pclm model in package \code{ungroup} or with a penalized B-spline approach in package \code{MortalitySmooth}).
}
\examples{
data(LT)
# A vector containing the conditional coefficient of variation in age at death
CoV = ineq_cov(age=LT$Age,dx=LT$dx,ex=LT$ex,ax=LT$ax)
# The coefficient of variation in age at death from birth
CoV[1]
# The coefficient of variation in age at death conditional upon survival to age 10
CoV[11]
}
\seealso{
\code{MortalityLaws::\link[MortalityLaws]{MortalityLaw}}
\code{ungroup::\link[ungroup]{pclm}}
\code{MortalitySmooth::\link[MortalitySmooth]{Mort1Dsmooth}}
}
| /man/ineq_cov.Rd | no_license | alysonvanraalte/LifeIneq | R | false | true | 1,913 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/indices.R
\name{ineq_cov}
\alias{ineq_cov}
\title{ineq_cov}
\usage{
ineq_cov(age, dx, ex, ax, distribution_type = c("aad", "rl"), check = TRUE)
}
\arguments{
\item{age}{numeric. vector of lower age bounds.}
\item{dx}{numeric. vector of the lifetable death distribution.}
\item{ex}{numeric. vector of remaining life expectancy.}
\item{ax}{numeric. vector of the average time spent in the age interval of those dying within the interval.}
\item{distribution_type}{character. Either \code{"aad"} (age at death) or \code{"rl"} (remaining life)}
\item{check}{logical. Shall we perform basic checks on input vectors? Default TRUE}
}
\description{
Calculate a lifetable column for the conditional coefficient of variation in lifetable ages at death
}
\details{
All input vectors must be the same length. Also, we recommend using input data from a life table by single year of age with a highest age group of at least age 110. If your data have a lower upper age bound, consider extrapolation methods, for instance a parametric Kannisto model (implemented in \code{MortalityLaws::MortalityLaw}). If your data are abridged, consider first smoothing over age, and calculating a life table by single year of age (for instance by smoothing with a pclm model in package \code{ungroup} or with a penalized B-spline approach in package \code{MortalitySmooth}).
}
\examples{
data(LT)
# A vector containing the conditional coefficient of variation in age at death
CoV = ineq_cov(age=LT$Age,dx=LT$dx,ex=LT$ex,ax=LT$ax)
# The coefficient of variation in age at death from birth
CoV[1]
# The coefficient of variation in age at death conditional upon survival to age 10
CoV[11]
}
\seealso{
\code{MortalityLaws::\link[MortalityLaws]{MortalityLaw}}
\code{ungroup::\link[ungroup]{pclm}}
\code{MortalitySmooth::\link[MortalitySmooth]{Mort1Dsmooth}}
}
|
#### Creating ASF list of options for ASF() version 0.15.1
#### Includes the following functions:
####
#### ASFoptions
####
#### The output is a list of parameters.
#### The output of the model is produced by the summary function.
ASFoptions<-function(...){
args<-list(...)
## List of default values:
defaults<-list(
############################################################
n=1, # Number of simulated epidemics
runID=NULL, # ID used for output and temp. files
# using a random number if NULL
stepInFile=NULL, # File used to step into an outbreak
# and start all iterations from
# same setup.
maxTime=365, # Maximum length of each outbreak
##DF=3,
Tstoch=FALSE, # Number of infected animals in newly
# infected herd: stochastic (T) or
# exactly 1 (F)
RFstoch=FALSE, # Number of intraherd disease
# transmissions:
# binomial chain model (T) or
# Reed-Frost model (F)
seed=NULL, # Seed for random number generator.
# If negative the seed is found as:
# set.seed(iteration+abs(seed))
pause=0, # Time in seconds to pause between
# new graphs.
delaySteps=1, # Vector of starting times for time
# dependent diagnosis delays
delayTimes=expression(round(rpert(n,1,2,4))), # Vector of
# diagnosis delays for starting times
# in delaySteps. These should be
# expressions or text that can be
# parsed to expressions
ignoreStatus=TRUE, # T/F value for ingoring disease
# status information in input file
# (all reset to 1 if True)
indexHerdFunction="selectIndexHerd", # Name of function used
# to select index herd for each
# simulated epidemic (iteration)
indexHerdSelect=list(herdType=1:18), # Argument to function
#basicScenario=TRUE,
# used to select index herd for each
# simulated epidemic (iteration)
indexDirect=FALSE, # Index herd infected by direct (T)
# or indirect (F) contact
depopTeams=Inf, # Number of locations that can be
# depoped at a time. not used anymore
Capacity=c(4800), # The culling capacity per day
# 4800 swine
TracePeriod=30, # Tracing will go back to the defined number of days; default 30 days
#CapSurvay=c(450), # Surveillance capacity; number of herds/day
RepVisSurvZone=14, # How often the visit within the surveillance zone be repeated
traceBack=FALSE, # Should traceback information be saved
traceBackDelay=1,
rateBefD=1, # Abattoir rate before detection first
# infected herd for swine when there is
# a movement from swine herds to slaughter
rateAftD=2, # Abattoir rate after detection first
# infected herd when there is
# a movement from the herds to slaughter (less frequent visits despite of a higher value for rate
# because the Exp function will compress it more ;-))
FirstDetPara=0.0255, # proportion of sick and dead animals
ProbSelDiag=1, # The probability of diagnosing a selected herd for diagnosis
Detailed=FALSE, # should detailed surveillance output be printed
DumpData=1, # Number of data lines that can be reached before
# data about survyed herds can be dumpped in the output file
ProbSelPV1=0, # proportion of herds that will be tested (PCR) during first protection zone visit
ProbSelSV1=0, # proportion of herds that will be tested (PCR) during first surveillance zone visit
ProbSelSV2=0, # proportion of herds that will be tested during second surveillance zone visit
ProbSelTIDC=0.1, # proportion of traced herds from indirect contacts that will be tested (PCR) visit
SecSurVisitOLSZ=0, # number of days. herds in overlapping surveillance zones will get a new visit every SecSurVisitOLSZ days.
DelayStartVisitOLPZ=0, # number of days. herds in overlapping protection zone will get a new visit every DelayStartVisitOLPZ once the
# they continue in the protection zone and the time of second PZ visit has passed
SecSurVisit=40, # surveillance visit. called second here because the first is not mandatory.
SecProtVisit=45, # second visit in protection zone
firstSurvVisit=FALSE, # Allow first surveillance visit(Yes/No)
DelayStartVisit=2, # Number of days before the visiting of herds for surveillance would start
DelayVisit=7, # delay for the extra visits for herds in overlapping zones.
MortalityIncrease=2, # level of increase in mortality before potential detection.
MortalityIncreaseZone=1.5, # level of increase in mortality before potential detection.
InfPropSub=0.1, # risk of infection from subclinical animals (before clinical signs appeared)
PerDeadAnim=0.95, # percentage of animals that die following infection
DaysDead=5, # number of past days to be used to determine infectiousness of leftovers of dead animals
#ReqSampSiz=30, # sample size when herds are tested.
DeadImpact=1, # parameter to address the impact of leftovers on disease spread
ImpDeadTime=1, # parameter to address uncertainty of survivability of virus in leftovers
PZoneDuration=50, # protection Zones duration should be 50 days at the start of each iteration
SZoneDuration=45, # Surveillance Zones duration should be 45 days at the start of each iteration
SerologyTesting=c(1,5,6), # The type of visit (1=PV2, 2=PV1, 3=SV1, 4=SV2, 5=trace IDC, 6=Trace DC) where serology testing will be applied.
PCRTesting=c(5,6), # The type of visit (1=PV2, 2=PV1, 3=SV1, 4=SV2, 5=trace IDC, 6=Trace DC) where PCR testing will be applied.
NumDeadAnimFirst=5, # Number of dead animals in the herd for first detection
NumDeadAnimAftFirst=1, # Number of dead animals in the herds for detection after first detection occured
NumDeadAnimSurv=1, # Number of dead animals in the herd for detection through surveillance.
DaysSurDead=7, # Number of past days to be used to survay dead animals
numTestDead=5, # Number of dead animals tested
DelaySubDeadSamp=1, # Delay on the submited samples to arrive to the laboratory
ToTracedIDC=2:4,
probSelectTIDC=c(0,0.838,0.2,0.125), # the probability that a movement will not be forgetten and it will be traced and visited,
LocSpLim=c(0.1,0.5,1,2) # cutoffs in km for local spread distance probabilities
,
DistList=list( ## a list that includes the probability of infection (distProb) through local spread given the distance in km (distCat)
distCat=c(1,2,3,4,5), ## from the infectious herd
distProb=c(0.1,0.006,0.002,0.000015,0) ## these probabilities are based on Boklund et al. (2009) for CSF after reduction by 50% to include
) ## the lower infectivity of ASF as carried out by Nigsch et al., 2013.
,
probList=list( ## Default distributions
DistCat=c(0,1,3,10,15,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200,210,220,230,240,250,300), ## distance categories in km
distcat=c(0,1,3,10,15,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200,210,220,230,240,250,300)
),
LambdaWB=c(0.10824,0.055,0)# the categories represent distances of highRisk, lowRisk and noRisk herds
,
LocalSPWB=c(0.0006,0.0002,0.0000015,0),# the categories represent distances of highRisk (within 0.5km), medium Risk (from 0.5 to 1km)
# low risk (from 1 to 2km) and no Risk herds (>2km)
# these values are from Boklund et al. 2008 and reduced 20 fold, due to
# lower spread of ASF than CSF (Nigsch et al., 2013) and smaller herd sizes
# of wild boar
newInfFunctions=c( # Vector of functions used to make new infections (including parameters).
"DIRinf3('LamAll',MovSwProb,'pMatAll','RiskDC',MovMatAll,restMovedSize=35,label=1)",
"DIRinf3('LambdaWeaners',MovWeProb,'pMatWea','RiskDC',MovMatWean,restMovedSize=10,label=1)",
"INDflex('LamAb',SwMovAbProb,'relDC','pMatMovAb','RiskAb',probMatrix=MovAb,Reduction=0.5,Abattoir=TRUE,label=2)",
"INDflex('LamMRC',MedRiskMovProb,'relIMC','pMatMRC','RiskMRC',Reduction=1,label=3)",
"INDflex('LamLRC',LowRiskMovProb,'relILC','pMatLRC','RiskLRC',Reduction=1,label=4)",
"LASinf(localsize=2,label=5)",
"WildBoar(relCont='relWB',RiskVar='RiskCatToWB2',ProbCont='RiskWB',label=6)",
"LSWildBoar(RiskVar='LocSpWB',label=7)"),
controlFunctions=c( # Vector of functions used for movement controls, tracing and surveillance
"controlAll(effectDC='rpert(n,0.95,0.98,1)',label='SS')",
"controlDiag(effectDC=1,effectIMC='rpert(n,0.7,0.8,0.95)',effectILC='rpert(n,0.95,0.98,1)',label='CD')",
"SurvZone(size=10,effectDC='rpert(n,0.95,0.98,1)',effectIMC='rpert(n,0.7,0.8,0.95)',effectILC='rpert(n,0.2,0.3,0.5)',label='SZ')",
"ProtZone(size=3,effectDC='rpert(n,0.95,0.98,1)',effectIMC='rpert(n,0.7,0.8,0.95)',effectILC='rpert(n,0.2,0.3,0.5)',label='PZ')",
"traceDC(prob=0.99,probdetect=0.95,delay='round(rpert(n,1,2,3))',tracetime='round(runif(n,0,2))',duration=30,label='traceDirect')",
"traceIDC(timetotrace='round(runif(n,0,4))',delayvisitMed='round(rpert(n,0,1,2))',delayvisitLow='round(rpert(n,0,2,4))',duration=30,label='traceInDirect')",
"SurvZonesHerds()"),
# "SurvDead()"),
############################################################
## Files
infofile="DataDADSASFWB.csv", # File with herd locations and type
typesfile="typesfile.csv", # Definitions of type parameters
runfile="", # File used for additional output
fileMovMatAll="MovMatAll.csv", # bla bla
fileMovMatWean="MovMatWean.csv",
fileMovAb="MovAb.csv",
fileMovSwProb="MovSwProb.csv",
fileMovWeProb="MovWeProb.csv",
fileSwMovAbProb="SwMovAbProb.csv",
fileMedRiskMovProb="MedRiskMovProb.csv",
fileLowRiskMovProb="LowRiskMovProb.csv",
chroniclefile=FALSE, # File name or FALSE to use runID.
############################################################
## Output, Graphs and text
hideMap=NULL, # Hide map (T) or show map (F)
# while running
itupdate=10, # Update period (number of iterations)
# for summary graphs
tornCol=NULL, # Typesfile column to reduce for
# tornado plot (default NULL)
tornMult=0.9, # Multiplier for tornCol column of
# typesfile
hidePlots=FALSE, # Hide (T) or show (F) summary plots,
# including final risk map
summaryFunction="sumTh", # Name of function used for
# summaries. It is called with
# arguments: "init", "day", "iter",
# and "final"
verbose=FALSE, # Make verbose output while running
############################################################
## movement control
## this is done in the initialization function
#gDaysUntilBaseline=eval(parse('round(rpert(n,18,21,23))')), # Number of days until baseline
# controls go into effect or NA to
# get random detection using a
# distribution in the types file.
############################################################
interventionFunctions=c( # Vector of functions used to make
# interventions (including parameters)
"DummyInter()"
), # Format: strings or expressions.
#DayStartcull=14, # default day to start pre-emptive culling
cullTypes=c(1:18), # herd types to be culled
CullDays=1:365, # which days culling should be considered
###########################################################
## Economic Values. Costs in DKK
CostAnimPCR= 530, # costs of PCR testing per animal
CostAnimSer= 68, # costs of serology testing per animal
CosIndsendelse=179, # costs of sending a package of max 1kg. from postDanmark
CostsSamTesSer=137, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) per sample from a herd. time used 3 hours
CostsSamTesPCR=599, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) per sample from a herd. time used 3 hours
#Vet/hour = 800kr., technician/hour=400kr., 3 hours for 60 animals and 2 hours for 30 animals (including traveling), materials 9kr/sample
#CostsSamTesSer60=8220, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) 60 animals from a herd. time used 3 hours
#CostsSamTesPCR60=35940, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) 60 animals from a herd. time used 3 hours
#CostsSamTesSer30=4710, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) 30 animals from a herd. time used 2 hours
#CostsSamTesPCR30=18570, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) 30 animals from a herd. time used 2 hours
CostsVisFarSw=1565, # costs of clinical surveillence swine farms
logCosSw=92956, # logistic costs per herd for swine herds
clCosSw=935.77, # cleaning and disinfection costs per animal for swine herds
clCosFin=230.5, # cleaning and disinfection costs per animal for swine herds
clCosPig=1000000, # cleaning and disinfection costs per herd swine herd
CESPSow=9.99, # costs of empty sow stable per day
CESPFin=0.55, # costs of empty finisher stable per day
CosWSFinishers=750, # costs of welfare slaughter of finishers
FactWSFinishers=0.011, # factor for finishers welfare slaughter per sow
CosWSWeaners=375, # costs of welfare salughter of weaners
FactWSWeaners=0.067, # factor for weaners welfare slaughter per sow
comSows=3365.34, # compensation per sow
comFin=521.91, # compensation per finisher pig
WeanValue=4.2, # value of wanears for compensation
expBanAftCEU=0, # export ban after culling the last infected herd and lifting the zone in relation to EU countries
expBanAftCNEU=90, # export ban after culling the last infected herd and lifting the zone to non EU countries
totExpSwNEU=28286093, # total returns on export of live swine and pork products to non-EU countries per day
totExpLivSwEU=15662784, # total returns on export of live swine to EU countries per day
totExpSwProdEU=55500143, # total returns on export of pork products to EU countries per day
reducedPrice=0.25 # reduction in the price for products to non-EU
)
if (length(args)>0){
## Check which values to change
changes<-match(names(args),names(defaults))
## Stop if trying to change nonexisting parameter
if (any(is.na(changes)))
stop(paste( paste(names(args)[is.na(changes)],collapse=", ") ,
" is/are not valid parameter names. \n See help file."))
## Change the requested values and return
for (i in 1:length(changes)){
defaults[[changes[i]]]<-args[[i]]
}
}##EndOf if (length(args))
## Converting strings to functions
defaults$indexHerdFunction<-match.fun(defaults$indexHerdFunction)
defaults$summaryFunction<-match.fun(defaults$summaryFunction)
if (defaults$verbose) cat("Leaving ASFoptions. ")
return(defaults)
}##EndOf ASFoptions
| /ASF-WB/ASFoptions.R | no_license | ChBrei/DTU-DADS-ASF | R | false | false | 19,474 | r | #### Creating ASF list of options for ASF() version 0.15.1
#### Includes the following functions:
####
#### ASFoptions
####
#### The output is a list of parameters.
#### The output of the model is produced by the summary function.
ASFoptions<-function(...){
args<-list(...)
## List of default values:
defaults<-list(
############################################################
n=1, # Number of simulated epidemics
runID=NULL, # ID used for output and temp. files
# using a random number if NULL
stepInFile=NULL, # File used to step into an outbreak
# and start all iterations from
# same setup.
maxTime=365, # Maximum length of each outbreak
##DF=3,
Tstoch=FALSE, # Number of infected animals in newly
# infected herd: stochastic (T) or
# exactly 1 (F)
RFstoch=FALSE, # Number of intraherd disease
# transmissions:
# binomial chain model (T) or
# Reed-Frost model (F)
seed=NULL, # Seed for random number generator.
# If negative the seed is found as:
# set.seed(iteration+abs(seed))
pause=0, # Time in seconds to pause between
# new graphs.
delaySteps=1, # Vector of starting times for time
# dependent diagnosis delays
delayTimes=expression(round(rpert(n,1,2,4))), # Vector of
# diagnosis delays for starting times
# in delaySteps. These should be
# expressions or text that can be
# parsed to expressions
ignoreStatus=TRUE, # T/F value for ingoring disease
# status information in input file
# (all reset to 1 if True)
indexHerdFunction="selectIndexHerd", # Name of function used
# to select index herd for each
# simulated epidemic (iteration)
indexHerdSelect=list(herdType=1:18), # Argument to function
#basicScenario=TRUE,
# used to select index herd for each
# simulated epidemic (iteration)
indexDirect=FALSE, # Index herd infected by direct (T)
# or indirect (F) contact
depopTeams=Inf, # Number of locations that can be
# depoped at a time. not used anymore
Capacity=c(4800), # The culling capacity per day
# 4800 swine
TracePeriod=30, # Tracing will go back to the defined number of days; default 30 days
#CapSurvay=c(450), # Surveillance capacity; number of herds/day
RepVisSurvZone=14, # How often the visit within the surveillance zone be repeated
traceBack=FALSE, # Should traceback information be saved
traceBackDelay=1,
rateBefD=1, # Abattoir rate before detection first
# infected herd for swine when there is
# a movement from swine herds to slaughter
rateAftD=2, # Abattoir rate after detection first
# infected herd when there is
# a movement from the herds to slaughter (less frequent visits despite of a higher value for rate
# because the Exp function will compress it more ;-))
FirstDetPara=0.0255, # proportion of sick and dead animals
ProbSelDiag=1, # The probability of diagnosing a selected herd for diagnosis
Detailed=FALSE, # should detailed surveillance output be printed
DumpData=1, # Number of data lines that can be reached before
# data about survyed herds can be dumpped in the output file
ProbSelPV1=0, # proportion of herds that will be tested (PCR) during first protection zone visit
ProbSelSV1=0, # proportion of herds that will be tested (PCR) during first surveillance zone visit
ProbSelSV2=0, # proportion of herds that will be tested during second surveillance zone visit
ProbSelTIDC=0.1, # proportion of traced herds from indirect contacts that will be tested (PCR) visit
SecSurVisitOLSZ=0, # number of days. herds in overlapping surveillance zones will get a new visit every SecSurVisitOLSZ days.
DelayStartVisitOLPZ=0, # number of days. herds in overlapping protection zone will get a new visit every DelayStartVisitOLPZ once the
# they continue in the protection zone and the time of second PZ visit has passed
SecSurVisit=40, # surveillance visit. called second here because the first is not mandatory.
SecProtVisit=45, # second visit in protection zone
firstSurvVisit=FALSE, # Allow first surveillance visit(Yes/No)
DelayStartVisit=2, # Number of days before the visiting of herds for surveillance would start
DelayVisit=7, # delay for the extra visits for herds in overlapping zones.
MortalityIncrease=2, # level of increase in mortality before potential detection.
MortalityIncreaseZone=1.5, # level of increase in mortality before potential detection.
InfPropSub=0.1, # risk of infection from subclinical animals (before clinical signs appeared)
PerDeadAnim=0.95, # percentage of animals that die following infection
DaysDead=5, # number of past days to be used to determine infectiousness of leftovers of dead animals
#ReqSampSiz=30, # sample size when herds are tested.
DeadImpact=1, # parameter to address the impact of leftovers on disease spread
ImpDeadTime=1, # parameter to address uncertainty of survivability of virus in leftovers
PZoneDuration=50, # protection Zones duration should be 50 days at the start of each iteration
SZoneDuration=45, # Surveillance Zones duration should be 45 days at the start of each iteration
SerologyTesting=c(1,5,6), # The type of visit (1=PV2, 2=PV1, 3=SV1, 4=SV2, 5=trace IDC, 6=Trace DC) where serology testing will be applied.
PCRTesting=c(5,6), # The type of visit (1=PV2, 2=PV1, 3=SV1, 4=SV2, 5=trace IDC, 6=Trace DC) where PCR testing will be applied.
NumDeadAnimFirst=5, # Number of dead animals in the herd for first detection
NumDeadAnimAftFirst=1, # Number of dead animals in the herds for detection after first detection occured
NumDeadAnimSurv=1, # Number of dead animals in the herd for detection through surveillance.
DaysSurDead=7, # Number of past days to be used to survay dead animals
numTestDead=5, # Number of dead animals tested
DelaySubDeadSamp=1, # Delay on the submited samples to arrive to the laboratory
ToTracedIDC=2:4,
probSelectTIDC=c(0,0.838,0.2,0.125), # the probability that a movement will not be forgetten and it will be traced and visited,
LocSpLim=c(0.1,0.5,1,2) # cutoffs in km for local spread distance probabilities
,
DistList=list( ## a list that includes the probability of infection (distProb) through local spread given the distance in km (distCat)
distCat=c(1,2,3,4,5), ## from the infectious herd
distProb=c(0.1,0.006,0.002,0.000015,0) ## these probabilities are based on Boklund et al. (2009) for CSF after reduction by 50% to include
) ## the lower infectivity of ASF as carried out by Nigsch et al., 2013.
,
probList=list( ## Default distributions
DistCat=c(0,1,3,10,15,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200,210,220,230,240,250,300), ## distance categories in km
distcat=c(0,1,3,10,15,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200,210,220,230,240,250,300)
),
LambdaWB=c(0.10824,0.055,0)# the categories represent distances of highRisk, lowRisk and noRisk herds
,
LocalSPWB=c(0.0006,0.0002,0.0000015,0),# the categories represent distances of highRisk (within 0.5km), medium Risk (from 0.5 to 1km)
# low risk (from 1 to 2km) and no Risk herds (>2km)
# these values are from Boklund et al. 2008 and reduced 20 fold, due to
# lower spread of ASF than CSF (Nigsch et al., 2013) and smaller herd sizes
# of wild boar
newInfFunctions=c( # Vector of functions used to make new infections (including parameters).
"DIRinf3('LamAll',MovSwProb,'pMatAll','RiskDC',MovMatAll,restMovedSize=35,label=1)",
"DIRinf3('LambdaWeaners',MovWeProb,'pMatWea','RiskDC',MovMatWean,restMovedSize=10,label=1)",
"INDflex('LamAb',SwMovAbProb,'relDC','pMatMovAb','RiskAb',probMatrix=MovAb,Reduction=0.5,Abattoir=TRUE,label=2)",
"INDflex('LamMRC',MedRiskMovProb,'relIMC','pMatMRC','RiskMRC',Reduction=1,label=3)",
"INDflex('LamLRC',LowRiskMovProb,'relILC','pMatLRC','RiskLRC',Reduction=1,label=4)",
"LASinf(localsize=2,label=5)",
"WildBoar(relCont='relWB',RiskVar='RiskCatToWB2',ProbCont='RiskWB',label=6)",
"LSWildBoar(RiskVar='LocSpWB',label=7)"),
controlFunctions=c( # Vector of functions used for movement controls, tracing and surveillance
"controlAll(effectDC='rpert(n,0.95,0.98,1)',label='SS')",
"controlDiag(effectDC=1,effectIMC='rpert(n,0.7,0.8,0.95)',effectILC='rpert(n,0.95,0.98,1)',label='CD')",
"SurvZone(size=10,effectDC='rpert(n,0.95,0.98,1)',effectIMC='rpert(n,0.7,0.8,0.95)',effectILC='rpert(n,0.2,0.3,0.5)',label='SZ')",
"ProtZone(size=3,effectDC='rpert(n,0.95,0.98,1)',effectIMC='rpert(n,0.7,0.8,0.95)',effectILC='rpert(n,0.2,0.3,0.5)',label='PZ')",
"traceDC(prob=0.99,probdetect=0.95,delay='round(rpert(n,1,2,3))',tracetime='round(runif(n,0,2))',duration=30,label='traceDirect')",
"traceIDC(timetotrace='round(runif(n,0,4))',delayvisitMed='round(rpert(n,0,1,2))',delayvisitLow='round(rpert(n,0,2,4))',duration=30,label='traceInDirect')",
"SurvZonesHerds()"),
# "SurvDead()"),
############################################################
## Files
infofile="DataDADSASFWB.csv", # File with herd locations and type
typesfile="typesfile.csv", # Definitions of type parameters
runfile="", # File used for additional output
fileMovMatAll="MovMatAll.csv", # bla bla
fileMovMatWean="MovMatWean.csv",
fileMovAb="MovAb.csv",
fileMovSwProb="MovSwProb.csv",
fileMovWeProb="MovWeProb.csv",
fileSwMovAbProb="SwMovAbProb.csv",
fileMedRiskMovProb="MedRiskMovProb.csv",
fileLowRiskMovProb="LowRiskMovProb.csv",
chroniclefile=FALSE, # File name or FALSE to use runID.
############################################################
## Output, Graphs and text
hideMap=NULL, # Hide map (T) or show map (F)
# while running
itupdate=10, # Update period (number of iterations)
# for summary graphs
tornCol=NULL, # Typesfile column to reduce for
# tornado plot (default NULL)
tornMult=0.9, # Multiplier for tornCol column of
# typesfile
hidePlots=FALSE, # Hide (T) or show (F) summary plots,
# including final risk map
summaryFunction="sumTh", # Name of function used for
# summaries. It is called with
# arguments: "init", "day", "iter",
# and "final"
verbose=FALSE, # Make verbose output while running
############################################################
## movement control
## this is done in the initialization function
#gDaysUntilBaseline=eval(parse('round(rpert(n,18,21,23))')), # Number of days until baseline
# controls go into effect or NA to
# get random detection using a
# distribution in the types file.
############################################################
interventionFunctions=c( # Vector of functions used to make
# interventions (including parameters)
"DummyInter()"
), # Format: strings or expressions.
#DayStartcull=14, # default day to start pre-emptive culling
cullTypes=c(1:18), # herd types to be culled
CullDays=1:365, # which days culling should be considered
###########################################################
## Economic Values. Costs in DKK
CostAnimPCR= 530, # costs of PCR testing per animal
CostAnimSer= 68, # costs of serology testing per animal
CosIndsendelse=179, # costs of sending a package of max 1kg. from postDanmark
CostsSamTesSer=137, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) per sample from a herd. time used 3 hours
CostsSamTesPCR=599, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) per sample from a herd. time used 3 hours
#Vet/hour = 800kr., technician/hour=400kr., 3 hours for 60 animals and 2 hours for 30 animals (including traveling), materials 9kr/sample
#CostsSamTesSer60=8220, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) 60 animals from a herd. time used 3 hours
#CostsSamTesPCR60=35940, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) 60 animals from a herd. time used 3 hours
#CostsSamTesSer30=4710, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) 30 animals from a herd. time used 2 hours
#CostsSamTesPCR30=18570, # costs for sampling (traveling time, sampling time, sending the samples and materials) and testing (serology) 30 animals from a herd. time used 2 hours
CostsVisFarSw=1565, # costs of clinical surveillence swine farms
logCosSw=92956, # logistic costs per herd for swine herds
clCosSw=935.77, # cleaning and disinfection costs per animal for swine herds
clCosFin=230.5, # cleaning and disinfection costs per animal for swine herds
clCosPig=1000000, # cleaning and disinfection costs per herd swine herd
CESPSow=9.99, # costs of empty sow stable per day
CESPFin=0.55, # costs of empty finisher stable per day
CosWSFinishers=750, # costs of welfare slaughter of finishers
FactWSFinishers=0.011, # factor for finishers welfare slaughter per sow
CosWSWeaners=375, # costs of welfare salughter of weaners
FactWSWeaners=0.067, # factor for weaners welfare slaughter per sow
comSows=3365.34, # compensation per sow
comFin=521.91, # compensation per finisher pig
WeanValue=4.2, # value of wanears for compensation
expBanAftCEU=0, # export ban after culling the last infected herd and lifting the zone in relation to EU countries
expBanAftCNEU=90, # export ban after culling the last infected herd and lifting the zone to non EU countries
totExpSwNEU=28286093, # total returns on export of live swine and pork products to non-EU countries per day
totExpLivSwEU=15662784, # total returns on export of live swine to EU countries per day
totExpSwProdEU=55500143, # total returns on export of pork products to EU countries per day
reducedPrice=0.25 # reduction in the price for products to non-EU
)
if (length(args)>0){
## Check which values to change
changes<-match(names(args),names(defaults))
## Stop if trying to change nonexisting parameter
if (any(is.na(changes)))
stop(paste( paste(names(args)[is.na(changes)],collapse=", ") ,
" is/are not valid parameter names. \n See help file."))
## Change the requested values and return
for (i in 1:length(changes)){
defaults[[changes[i]]]<-args[[i]]
}
}##EndOf if (length(args))
## Converting strings to functions
defaults$indexHerdFunction<-match.fun(defaults$indexHerdFunction)
defaults$summaryFunction<-match.fun(defaults$summaryFunction)
if (defaults$verbose) cat("Leaving ASFoptions. ")
return(defaults)
}##EndOf ASFoptions
|
source("data_reading.R")
#mydata <- read_yield("yield_csvs/N_Japan_yield.csv")
myfiles <- list.files("yield_csvs")
myfiles
mydf <- list()
for(i in 1:length(myfiles)){
f <- myfiles[i]
thisdf <- read_yield(paste("yield_csvs", f, sep = "/"))
if(nrow(thisdf) < 30) next # skip this data frame
if(!endsWith(f, ".csv")) next
mydf[[i]] <- thisdf
# print(mean(mydata$Biomass.yield, na.rm = TRUE))
}
for(df in mydf){
print(mean(df$Biomass.yield, na.rm = TRUE))
}
i <- 1
while(i < 10){
print(i)
i <- i - 1
}
for(i in 1:10){
print(i)
}
| /Week 2/Lecture_2/reading_script.R | permissive | lvclark/cpsc499_fall_2018 | R | false | false | 554 | r | source("data_reading.R")
#mydata <- read_yield("yield_csvs/N_Japan_yield.csv")
myfiles <- list.files("yield_csvs")
myfiles
mydf <- list()
for(i in 1:length(myfiles)){
f <- myfiles[i]
thisdf <- read_yield(paste("yield_csvs", f, sep = "/"))
if(nrow(thisdf) < 30) next # skip this data frame
if(!endsWith(f, ".csv")) next
mydf[[i]] <- thisdf
# print(mean(mydata$Biomass.yield, na.rm = TRUE))
}
for(df in mydf){
print(mean(df$Biomass.yield, na.rm = TRUE))
}
i <- 1
while(i < 10){
print(i)
i <- i - 1
}
for(i in 1:10){
print(i)
}
|
#' Write citation pairs
#'
#' @param citation_list (data.frame) data.frame of citation pairs containing variables article_id and dataset_id
#' @param path (char) path to write JSON citation pairs to
#' @import dplyr
#' @export
#'
#' @examples
#' \dontrun{
#' pairs <- data.frame(article_id = "10.1371/journal.pone.0213037",
#' dataset_id = "10.18739/A22274")
#' write_citation_pairs(citation_list = pairs, path = "citation_pairs.json")
#' }
write_citation_pairs <- function(citation_list, path) {
if (any(!(c("article_id", "dataset_id") %in% names(citation_list)))){
stop(.call = FALSE, "citations_list data.frame does not contain variables article_id and/or dataset_id")
}
# write list of citations to bib format
bib <- rcrossref::cr_cn(dois = citation_list$article_id, format = "bibtex")
t <- tempfile()
writeLines(unlist(bib), t)
# import as a dataframe
df <- bib2df::bib2df(t)
# assign article_id to data.frame
df$dataset_id <- citation_list$dataset_id
# rename for database ingest
cit_full <- df %>%
dplyr::rename(target_id = .data$dataset_id,
source_id = .data$DOI,
source_url = .data$URL,
origin = .data$AUTHOR,
title = .data$TITLE,
publisher = .data$PUBLISHER,
journal = .data$JOURNAL,
volume = .data$VOLUME,
page = .data$PAGES,
year_of_publishing = .data$YEAR) %>%
dplyr::select(.data$target_id, .data$source_id, .data$source_url, .data$origin, .data$title, .data$publisher, .data$journal, .data$volume, .data$page, .data$year_of_publishing) %>%
dplyr::mutate(id = NA, report = NA, metadata = NA, link_publication_date = Sys.Date()) #%>%
#dplyr::mutate(publisher = ifelse(.data$publisher == "Elsevier {BV", "Elsevier", "Copernicus"))
jsonlite::write_json(cit_full, path)
}
| /R/write_citation_pairs.R | permissive | DataONEorg/scythe | R | false | false | 1,938 | r | #' Write citation pairs
#'
#' @param citation_list (data.frame) data.frame of citation pairs containing variables article_id and dataset_id
#' @param path (char) path to write JSON citation pairs to
#' @import dplyr
#' @export
#'
#' @examples
#' \dontrun{
#' pairs <- data.frame(article_id = "10.1371/journal.pone.0213037",
#' dataset_id = "10.18739/A22274")
#' write_citation_pairs(citation_list = pairs, path = "citation_pairs.json")
#' }
write_citation_pairs <- function(citation_list, path) {
if (any(!(c("article_id", "dataset_id") %in% names(citation_list)))){
stop(.call = FALSE, "citations_list data.frame does not contain variables article_id and/or dataset_id")
}
# write list of citations to bib format
bib <- rcrossref::cr_cn(dois = citation_list$article_id, format = "bibtex")
t <- tempfile()
writeLines(unlist(bib), t)
# import as a dataframe
df <- bib2df::bib2df(t)
# assign article_id to data.frame
df$dataset_id <- citation_list$dataset_id
# rename for database ingest
cit_full <- df %>%
dplyr::rename(target_id = .data$dataset_id,
source_id = .data$DOI,
source_url = .data$URL,
origin = .data$AUTHOR,
title = .data$TITLE,
publisher = .data$PUBLISHER,
journal = .data$JOURNAL,
volume = .data$VOLUME,
page = .data$PAGES,
year_of_publishing = .data$YEAR) %>%
dplyr::select(.data$target_id, .data$source_id, .data$source_url, .data$origin, .data$title, .data$publisher, .data$journal, .data$volume, .data$page, .data$year_of_publishing) %>%
dplyr::mutate(id = NA, report = NA, metadata = NA, link_publication_date = Sys.Date()) #%>%
#dplyr::mutate(publisher = ifelse(.data$publisher == "Elsevier {BV", "Elsevier", "Copernicus"))
jsonlite::write_json(cit_full, path)
}
|
#############################################################
# loggammarob function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: February, 5, 2015
# Version: 0.2
# Copyright (C) 2015 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob <- function(x, start=NULL, weights=rep(1, length(x)), method=c("oneWL", "WQTau", "WL", "QTau", "ML"), control, ...) {
method <- match.arg(method)
x <- na.omit(x)
if (!is.null(na <- attr(x, "na.action")))
weights <- weights[-na]
or <- order(x)
if (any(or!=1:length(x))) {
x <- sort(x)
warning("data 'x' are sorted")
}
weights <- weights[or]
if (missing(control))
control <- if (missing(method))
loggammarob.control(...)
else
loggammarob.control(method = method, ...)
cl <- match.call()
if (!missing(control) && !missing(method) && method != control$method) {
warning("Methods argument set by method is different from method in control\n", "Using method = ", method)
control$method <- method
}
## x.orig <- x
## mx <- median(x)
## sx <- mad(x)
## x <- (x - mx)/sx
if (control$method=="QTau")
result <- loggammarob.QTau(x, weights, control)
else if (method=="WQTau")
result <- loggammarob.WQTau(x, weights, control)
else if (method=="WL")
result <- loggammarob.WL(x, start, weights, control)
else if (method=="oneWL")
result <- loggammarob.oneWL(x, start, weights, control)
else if (method=="ML")
result <- loggammarob.ML(x, start, weights, control)
## result$mu <- result$mu*sx+mx
## result$sigma <- result$sigma*sx
result$eta <- Exp.response(result$mu, result$sigma, result$lambda)
## result$data <- x.orig
result$data <- x
result$method <- method
result$call <- cl
class(result) <- "loggammarob"
return(result)
}
#############################################################
# loggammarob.control function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.control <- function(method="oneWL", tuning.rho=1.547647, tuning.psi=6.08, lower=-7, upper=7, n=201, max.it=750, refine.tol=1e-6, nResample=100, bw=0.3, smooth=NULL, raf=c("NED","GKL","PWD","HD","SCHI2"), tau=1, subdivisions=1000, lambda.step=TRUE, sigma.step=TRUE, step=1, minw=0.04, nexp=1000, reparam=NULL, bootstrap=FALSE, bootstrap.lambda=NULL) {
#tuning.rho, tuning.chi # c1, c2
#lower,upper # optimization interval
#n # optimization grid
#max.it # maxit in refinement
#refine.tol # relative tolerance in refinement
#nResample # number of subsamples
## Parameters for Fully Itereated Weighted Likelihood and for One Step Weighted Likelihood
#bw # bandwidth
#smooth # smooth parameter, if not NULL then bw = smooth*\hat{sd}
#raf # residual adjustment function c("SCHI2", "NED", "HD", "GKL")
#tau # parameter for the GKL and PD family
#subdivisions
#lambda.step
#sigma.step
#step #step length in the oneWL function
#Used only in the oneWL function
#minw #below this value the weights are set to zero.
#nexp #number of quantile points used to approximate the Expected Jacobian.
#reparam #list for reparametrization of the sigma parameter.
#bootstrap #to bootstrap the oneWL.
#bootstrap.lambda#to bootstrap the oneWL.
raf <- match.arg(raf)
if (lower >= upper)
stop("'lower' must be smaller than 'upper'")
if (length(step) != 1 & length(step) != 3)
stop("'step' must be a scalar or a vector of length 3")
if (length(n) > 1 || n < 0)
stop("'n' must be a positive scalar")
n <- round(n)
n <- ifelse(n < 1, 1, n)
if (length(max.it) > 1 || max.it < 0)
stop("'max.it' must be a positive scalar")
max.it <- round(max.it)
max.it <- ifelse(max.it < 1, 1, max.it)
if (length(refine.tol) > 1 || refine.tol < 0)
stop("'refine.tol' must be a positive scalar")
if (length(bw) > 1 || bw < 0)
stop("'bw' must be a positive scalar")
if (!is.null(smooth) && (length(bw) > 1 || bw < 0))
stop("'smooth' must be NULL or a positive scalar")
if (length(subdivisions) > 1 || subdivisions < 0)
stop("'subdivisions' must be a positive scalar")
subdivisions <- round(subdivisions)
subdivisions <- ifelse(subdivisions < 1, 1, subdivisions)
if (!is.logical(lambda.step))
stop("'lambda.step' must be a logical")
if (!is.logical(sigma.step))
stop("'sigma.step' must be a logical")
if (length(minw) > 1 || minw > 1 | minw < 0)
stop("'minw' must be a scalar in [0,1]")
if (length(nexp) > 1 || nexp < 0)
stop("'nexp' must be a positive scalar")
nexp <- round(nexp)
nexp <- ifelse(nexp < 1, 1, nexp)
############ EXAMPLE OF REPARAM ################################
#reparam.loggamma <- list(gam=function(sigma) sqrt(sigma),
# gaminv=function(gam) gam^2,
# delta=function(sigma) 2*sqrt(sigma))
if (!is.null(reparam)) {
badreparam <- !is.list(reparam) || !is.function(reparam$gam) || !is.function(reparam$gaminv) || !is.function(reparam$delta)
if (badreparam)
stop("'reparam' must be a list of 3 functions named 'gam', 'gaminv' and 'delta'")
}
if (!is.logical(bootstrap))
stop("'bootstrap' must be a logical")
res <- list(method=method, tuning.rho=tuning.rho, tuning.psi=tuning.psi, lower=lower, upper=upper, n=n, max.it=max.it, refine.tol=refine.tol, nResample=nResample, bw=bw, smooth=smooth, raf=raf, tau=tau, subdivisions=subdivisions, lambda.step=lambda.step, sigma.step=sigma.step,step=step,minw=minw, nexp=nexp, reparam=reparam, bootstrap=bootstrap, bootstrap.lambda=bootstrap.lambda)
return(res)
}
#############################################################
# loggammarob.QTau function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.QTau <- function(x, w=rep(1, length(x)), control=loggammarob.control()) {
lgrid <- seq(control$lower, control$upper, length.out=control$n)
if (!control$bootstrap) {
resQ <- WQtau(ri=x,w=w,lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
} else {
resQ <- WQtauboot(ri=x,w=w,lambda=control$bootstrap.lambda,step=0.25,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
}
result <- list(mu=resQ$mu2, sigma=resQ$sig2, lambda=resQ$lam2, weights=w, iterations=NULL, error=NULL)
return(result)
}
#############################################################
# loggammarob.WQTau function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.WQTau <- function(x, w=rep(1, length(x)), control=loggammarob.control()) {
lgrid <- seq(control$lower, control$upper, length.out=control$n)
resQ <- WQtau(ri=x,w=w,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
pp <- ppoints(length(x))
ql <- qloggamma(p=pp, lambda=resQ$lam1)
dl <- dloggamma(x=ql, lambda=resQ$lam1)
vl <- pp*(1-pp)/dl^2
wl <- 1/sqrt(vl)
resWQ <- WQtau(ri=x,w=wl,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
result <- list(mu=resWQ$mu2, sigma=resWQ$sig2, lambda=resWQ$lam2, weights=wl, iterations=NULL, error=NULL, QTau=list(mu=resQ$mu2, sigma=resQ$sig2, lambda=resQ$lam2, weights=w, iterations=NULL, error=NULL))
return(result)
}
#############################################################
# loggammarob.WL function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.WL <- function(x, start=NULL, w=rep(1, length(x)), control=loggammarob.control()) {
if (is.null(start)) {
lgrid <- seq(control$lower, control$upper, length.out=control$n)
resQ <- WQtau(ri=x,w=w,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
pp <- ppoints(length(x))
ql <- qloggamma(p=pp, lambda=resQ$lam1)
dl <- dloggamma(x=ql, lambda=resQ$lam1)
vl <- pp*(1-pp)/dl^2
wl <- 1/sqrt(vl)
resWQ <- WQtau(ri=x,w=wl,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
start <- c(resWQ$mu2, resWQ$sig2, resWQ$lam2)
QTau <- list(mu=resQ$mu2, sigma=resQ$sig2, lambda=resQ$lam2, weights=w, iterations=NULL, error=NULL)
WQTau <- list(mu=resWQ$mu2, sigma=resWQ$sig2, lambda=resWQ$lam2, weights=wl, iterations=NULL, error=NULL)
} else {
if (!is.numeric(start))
stop("'start' must be a numeric vector")
if (length(start)!=3)
stop("'start' must be a vector of length 3: mu, sigma2, lambda")
QTau <- NULL
WQTau <- NULL
}
if (!is.null(control$smooth))
control$bw <- control$smooth*sqrt(start[2])
resWL <- Disparity.WML.loggamma(y=x,mu0=start[1],sig0=start[2],lam0=start[3],lam.low=control$lower,lam.sup=control$upper,tol=control$refine.tol,maxit=control$max.it,lstep=control$lambda.step,sigstep=control$sigma.step,bw=control$bw,raf=control$raf,tau=control$tau,nmod=control$subdivisions,minw=control$minw)
result <- list(mu=resWL$mu, sigma=resWL$sig, lambda=resWL$lam, weights=resWL$weights, iterations=resWL$nit, error=NULL, QTau=QTau, WQTau=WQTau)
return(result)
}
#############################################################
# loggammarob.ML function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.ML <- function(x, start=NULL, w=rep(1, length(x)), control=loggammarob.control()) {
if (is.null(start)) {
lgrid <- seq(control$lower, control$upper, length.out=control$n)
resQ <- WQtau(ri=x,w=w,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
start <- c(resQ$mu2, resQ$sig2, resQ$lam2)
} else {
if (!is.numeric(start))
stop("'start' must be a numeric vector")
if (length(start)!=3)
stop("'start' must be a vector of length 3: mu, sigma2, lambda")
}
resML <- WML.loggamma(y=x,wi=w,mu0=start[1],sig0=start[2],lam0=start[3],lam.low=control$lower,lam.sup=control$upper,tol=control$refine.tol,maxit=control$max.it,lstep=control$lambda.step,sigstep=control$sigma.step)
result <- list(mu=resML$mu, sigma=resML$sig, lambda=resML$lam, weights=w, iterations=resML$nit, error=NULL)
return(result)
}
#############################################################
# loggammarob.oneWL function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.oneWL <- function(x, start=NULL, w=rep(1, length(x)), control=loggammarob.control()) {
if (is.null(start)) {
lgrid <- seq(control$lower, control$upper, length.out=control$n)
if (!control$bootstrap) {
resQ <- WQtau(ri=x,w=w,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
pp <- ppoints(length(x))
ql <- qloggamma(p=pp, lambda=resQ$lam1)
dl <- dloggamma(x=ql, lambda=resQ$lam1)
vl <- pp*(1-pp)/dl^2
wl <- 1/sqrt(vl)
} else {
resQ <- resWQ <- WQtauboot(ri=x,w=w,lambda=control$bootstrap.lambda,step=0.25,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
wl <- w
### cat(resQ$mu1, resQ$sig1, resQ$lam1, '\n')
### resQ <- list(); resQ$lam1 <- control$bootstrap.lambda
}
if (!control$bootstrap)
resWQ <- WQtau(ri=x,w=wl,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
## else
## resWQ <- WQtauboot(ri=x,w=wl,lambda=control$bootstrap.lambda,step=0.25,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
#### cat(resWQ$mu2, resWQ$sig2, resWQ$lam2, '\n')
start <- c(resWQ$mu2, resWQ$sig2, resWQ$lam2)
QTau <- list(mu=resQ$mu2, sigma=resQ$sig2, lambda=resQ$lam2, weights=w, iterations=NULL, error=NULL)
WQTau <- list(mu=resWQ$mu2, sigma=resWQ$sig2, lambda=resWQ$lam2, weights=wl, iterations=NULL, error=NULL)
} else {
if (!is.numeric(start))
stop("'start' must be a numeric vector")
if (length(start)!=3)
stop("'start' must be a vector of length 3: mu, sigma2, lambda")
QTau <- NULL
WQTau <- NULL
}
if (!is.null(control$smooth))
control$bw <- control$smooth*sqrt(start[2])
if (is.null(control$reparam))
resOWL <- WMLone.loggamma(yi.sorted=x,mu0=start[1],sig0=start[2],lam0=start[3],bw=control$bw,raf=control$raf,tau=control$tau,nmod=control$subdivisions,step=control$step,minw=control$minw,nexp=control$nexp)
else
resOWL <- WMLone.reparam.loggamma(yi.sorted=x,mu0=start[1],sig0=start[2],lam0=start[3],bw=control$bw,raf=control$raf,tau=control$tau,nmod=control$subdivisions,step=control$step,minw=control$minw,nexp=control$nexp,reparam=control$reparam)
result <- list(mu=resOWL$mu, sigma=resOWL$sig, lambda=resOWL$lam, weights=resOWL$weights, iterations=1, error=resOWL$error, step=control$step, QTau=QTau, WQTau=WQTau)
return(result)
}
#############################################################
# loggammarob.test function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.test <- function(x, mu=NULL, sigma=NULL, lambda=NULL, eta=NULL, type="Wald", conf.level = 0.95, prob=0.00001) {
## x an object from loggammarob
if (!(x$method %in% c("WL", "oneWL", "ML")))
stop("Inference is available only for 'WL', 'oneWL' and 'ML'")
if (is.null(mu) & is.null(sigma) & is.null(lambda) & is.null(eta)) {
mu <- 0
sigma <- 1
lambda <- 0
}
alternative <- "two.sided"
dname <- names(x$data)
type <- match.arg(type)
components <- c(!is.null(mu), !is.null(sigma), !is.null(lambda))
if (is.null(eta)) {
estimate <- c(x$mu, x$sigma, x$lambda)[components]
null.value <- c(mu, sigma, lambda)
if (type=="Wald") {
param <- estimate - null.value
df <- length(param)
acov <- ACOV.ML.loggamma(sigma=x$sigma,lambda=x$lambda, prob=prob)
if (df==1) {
hatJ <- 1/diag(acov$cov)[components]
} else {
hatJ <- acov$Fisher.Info
hatJ <- matrix(hatJ[as.logical(components%*%t(components))], nrow=length(param), byrow=TRUE)
}
wstat <- drop(sum(x$weights)*t(param)%*%hatJ%*%param)
pval <- pchisq(q=wstat, df=df, ncp=0, lower.tail = FALSE, log.p = FALSE)
estcov <- diag(acov$cov)[components]
if (df==1)
cint <- estimate+c(-1,1)*qnorm(1-(1-conf.level)/2)*sqrt(estcov)/sqrt(sum(x$weights))
else
cint <- c(NA,NA)
}
names(wstat) <- "ww"
names(df) <- "df"
names(null.value) <- c("mean", "scale", "shape")[components]
} else {
if (any(components))
warning("test with 'eta' together with other parameters is not implemented, only the test on 'eta' is performed")
eps <- 0.0001
estimate <- x$eta
null.value <- eta
if (type=="Wald") {
param <- estimate - null.value
if (x$lambda < eps)
warning("test and confidence intervals for 'eta' is not implemented for negative lambda")
hatvar <- AVAR.ML.eta.loggamma(mu=x$mu, sigma=x$sigma, lambda=x$lambda, prob=prob, eps=eps)
wstat <- sum(x$weights)*param^2/hatvar
df <- length(param)
pval <- pchisq(q=wstat, df=df, ncp=0, lower.tail = FALSE, log.p = FALSE)
cint <- estimate+c(-1,1)*qnorm(1-(1-conf.level)/2)*sqrt(hatvar)/sqrt(sum(x$weights))
}
names(wstat) <- "ww"
names(df) <- "df"
names(null.value) <- "mean(exp(X))"
}
type <- paste(ifelse(x$method=="ML","Classic","Weighted"), type, "Test based on", x$method, sep=" ")
attr(cint,"conf.level") <- conf.level
rval <- list(statistic=wstat, parameter=df, p.value=pval,
conf.int=cint, estimate=estimate, null.value=null.value,
alternative=alternative,
method=type, data.name=dname)
class(rval) <- "htest"
return(rval)
}
#############################################################
# print.loggammarob function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
######## PRINT
print.loggammarob <- function(x, digits = max(3, getOption("digits") - 3), ...) {
cat("\nCall:\n",deparse(x$call),"\n\n",sep="")
cat("Location: ", format(x$mu, digits=digits))
cat(" Scale: ", format(x$sigma, digits=digits))
cat(" Shape: ", format(x$lambda, digits=digits))
cat(" E(exp(X)): ", format(x$eta, digits=digits))
cat("\n")
invisible(x)
}
#############################################################
# summary.loggammarob function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
########### SUMMARY
summary.loggammarob <- function(object, p=NULL, conf.level=0.95, prob=0.00001, ...) {
if ((object$method %in% c("WL", "oneWL", "ML"))) {
p <- p[ifelse(p > 0 & p < 1, TRUE, FALSE)]
nrep <- 4+length(p)
if (length(conf.level) == 1)
conf.level <- rep(conf.level, nrep)
if (length(conf.level) != nrep)
stop("'conf.level' must have length equal to 1 or 4+length(p)")
z <- qnorm(1-(1-conf.level)/2)
if (is.finite(object$eta)) {
asd <- sqrt(diag(ACOV.ML.loggamma(sigma=object$sigma,lambda=object$lambda,prob=prob)$cov))
asdeta <- sqrt(AVAR.ML.eta.loggamma(mu=object$mu, sigma=object$sigma, lambda=object$lambda, prob=prob, eps=0.0001, npoints=100000))
} else {
asd <- rep(NA,3)
asdeta <- NA
}
object$muconf.int <- object$mu+c(-1,1)*z[1]*asd[1]/sqrt(sum(object$weights))
object$sigmaconf.int <- object$sigma+c(-1,1)*z[2]*asd[2]/sqrt(sum(object$weights))
object$lambdaconf.int <- object$lambda+c(-1,1)*z[3]*asd[3]/sqrt(sum(object$weights))
object$etaconf.int <- object$eta+c(-1,1)*z[4]*asdeta/sqrt(sum(object$weights))
attr(object$muconf.int, "conf.level") <- conf.level[1]
attr(object$sigmaconf.int, "conf.level") <- conf.level[2]
attr(object$lambdaconf.int, "conf.level") <- conf.level[3]
attr(object$etaconf.int, "conf.level") <- conf.level[4]
object$muse <- asd[1]/sqrt(sum(object$weights))
object$sigmase <- asd[2]/sqrt(sum(object$weights))
object$lambdase <- asd[3]/sqrt(sum(object$weights))
object$etase <- asdeta/sqrt(sum(object$weights))
object$p <- p
if (!is.null(p)) {
object$q <- asdq <- rep(NA, length(p))
object$qconf.int <- matrix(NA, nrow=length(p), ncol=2)
for (i in 1:length(p)) {
object$q[i] <- qloggamma(p=p[i], mu=object$mu, sigma=object$sigma, lambda=object$lambda)
if (is.finite(object$eta))
asdq[i] <- sqrt(AVAR.ML.quantile.loggamma(p=p[i], mu=object$mu, sigma=object$sigma, lambda=object$lambda, prob=prob))
else
asdq[i] <- NA
object$qconf.int[i,] <- object$q[i]+c(-1,1)*z[4+i]*asdq[i]/sqrt(sum(object$weights))
}
##### object$qconf.int <- drop(qconf.int)
attr(object$qconf.int, "conf.level") <- conf.level[4+(1:length(p))]
object$qse <- asdq/sqrt(sum(object$weights))
}
}
object$call <- match.call()
class(object) <- "summary.loggammarob"
return(object)
}
#############################################################
# print.summary.loggammarob function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
#PRINT.SUMMARY
print.summary.loggammarob <- function(x, digits = max(3, getOption("digits") - 3), ...) {
cat("\nCall:\n",deparse(x$call),"\n\n",sep="")
cat("Location: ", format(x$mu, digits=digits))
if ((x$method %in% c("WL", "oneWL", "ML"))) {
cat(" s.e. ", format(x$muse, digits=digits), "\n")
cat("(", format(x$muconf.int[1L], digits=digits), ", ",
format(x$muconf.int[2L], digits=digits), ") \n")
cat(format(100 * attr(x$muconf.int, "conf.level")),
"percent confidence interval\n\n")
} else {
cat("\n")
}
cat("Scale: ", format(x$sigma, digits=digits))
if ((x$method %in% c("WL", "oneWL", "ML"))) {
cat(" s.e. ", format(x$sigmase, digits=digits), "\n")
cat("(", format(x$sigmaconf.int[1L], digits=digits), ", ",
format(x$sigmaconf.int[2L], digits=digits), ") \n")
cat(format(100 * attr(x$sigmaconf.int, "conf.level")),
"percent confidence interval\n\n")
} else {
cat("\n")
}
cat("Shape: ", format(x$lambda, digits=digits))
if ((x$method %in% c("WL", "oneWL", "ML"))) {
cat(" s.e. ", format(x$lambdase, digits=digits), "\n")
cat("(", format(x$lambdaconf.int[1L], digits=digits), ", ",
format(x$lambdaconf.int[2L], digits=digits), ") \n")
cat(format(100 * attr(x$lambdaconf.int, "conf.level")),
"percent confidence interval\n\n")
} else {
cat("\n")
}
cat("Mean(exp(X)): ", format(x$eta, digits=digits))
if ((x$method %in% c("WL", "oneWL", "ML"))) {
cat(" s.e. ", format(x$etase, digits=digits), "\n")
cat("(", format(x$etaconf.int[1L], digits=digits), ", ",
format(x$etaconf.int[2L], digits=digits), ") \n")
cat(format(100 * attr(x$etaconf.int, "conf.level")),
"percent confidence interval\n\n\n")
} else {
cat("\n")
}
######### Quantiles estimation and confidence interval
if (!is.null(x$p)) {
for (i in 1:length(x$p)) {
cat("Quantile of order ", format(x$p[i], digits=digits), ": ", format(x$q[i], digits=digits))
if ((x$method %in% c("WL", "oneWL", "ML"))) {
cat(" s.e. ", format(x$qse[i], digits=digits), "\n")
cat("(", format(x$qconf.int[i,1L], digits=digits), ", ",
format(x$qconf.int[i,2L], digits=digits), ") \n")
cat(format(100 * attr(x$qconf.int, "conf.level")[i]),
"percent confidence interval\n\n\n")
} else {
cat("\n")
}
}
}
if ((x$method %in% c("WL", "oneWL", "ML")))
cat("\n\n")
else
cat("\n\nConfidence intervals and Standard Errors are available only for 'WL', 'oneWL' and 'ML'\n\n\n")
if (x$method %in% c("WL", "oneWL", "WQTau"))
summarizeRobWeights(x$weights, digits = digits, ...)
invisible(x)
}
| /robustloggamma/R/loggammarob.R | no_license | ingted/R-Examples | R | false | false | 24,932 | r | #############################################################
# loggammarob function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: February, 5, 2015
# Version: 0.2
# Copyright (C) 2015 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob <- function(x, start=NULL, weights=rep(1, length(x)), method=c("oneWL", "WQTau", "WL", "QTau", "ML"), control, ...) {
method <- match.arg(method)
x <- na.omit(x)
if (!is.null(na <- attr(x, "na.action")))
weights <- weights[-na]
or <- order(x)
if (any(or!=1:length(x))) {
x <- sort(x)
warning("data 'x' are sorted")
}
weights <- weights[or]
if (missing(control))
control <- if (missing(method))
loggammarob.control(...)
else
loggammarob.control(method = method, ...)
cl <- match.call()
if (!missing(control) && !missing(method) && method != control$method) {
warning("Methods argument set by method is different from method in control\n", "Using method = ", method)
control$method <- method
}
## x.orig <- x
## mx <- median(x)
## sx <- mad(x)
## x <- (x - mx)/sx
if (control$method=="QTau")
result <- loggammarob.QTau(x, weights, control)
else if (method=="WQTau")
result <- loggammarob.WQTau(x, weights, control)
else if (method=="WL")
result <- loggammarob.WL(x, start, weights, control)
else if (method=="oneWL")
result <- loggammarob.oneWL(x, start, weights, control)
else if (method=="ML")
result <- loggammarob.ML(x, start, weights, control)
## result$mu <- result$mu*sx+mx
## result$sigma <- result$sigma*sx
result$eta <- Exp.response(result$mu, result$sigma, result$lambda)
## result$data <- x.orig
result$data <- x
result$method <- method
result$call <- cl
class(result) <- "loggammarob"
return(result)
}
#############################################################
# loggammarob.control function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.control <- function(method="oneWL", tuning.rho=1.547647, tuning.psi=6.08, lower=-7, upper=7, n=201, max.it=750, refine.tol=1e-6, nResample=100, bw=0.3, smooth=NULL, raf=c("NED","GKL","PWD","HD","SCHI2"), tau=1, subdivisions=1000, lambda.step=TRUE, sigma.step=TRUE, step=1, minw=0.04, nexp=1000, reparam=NULL, bootstrap=FALSE, bootstrap.lambda=NULL) {
#tuning.rho, tuning.chi # c1, c2
#lower,upper # optimization interval
#n # optimization grid
#max.it # maxit in refinement
#refine.tol # relative tolerance in refinement
#nResample # number of subsamples
## Parameters for Fully Itereated Weighted Likelihood and for One Step Weighted Likelihood
#bw # bandwidth
#smooth # smooth parameter, if not NULL then bw = smooth*\hat{sd}
#raf # residual adjustment function c("SCHI2", "NED", "HD", "GKL")
#tau # parameter for the GKL and PD family
#subdivisions
#lambda.step
#sigma.step
#step #step length in the oneWL function
#Used only in the oneWL function
#minw #below this value the weights are set to zero.
#nexp #number of quantile points used to approximate the Expected Jacobian.
#reparam #list for reparametrization of the sigma parameter.
#bootstrap #to bootstrap the oneWL.
#bootstrap.lambda#to bootstrap the oneWL.
raf <- match.arg(raf)
if (lower >= upper)
stop("'lower' must be smaller than 'upper'")
if (length(step) != 1 & length(step) != 3)
stop("'step' must be a scalar or a vector of length 3")
if (length(n) > 1 || n < 0)
stop("'n' must be a positive scalar")
n <- round(n)
n <- ifelse(n < 1, 1, n)
if (length(max.it) > 1 || max.it < 0)
stop("'max.it' must be a positive scalar")
max.it <- round(max.it)
max.it <- ifelse(max.it < 1, 1, max.it)
if (length(refine.tol) > 1 || refine.tol < 0)
stop("'refine.tol' must be a positive scalar")
if (length(bw) > 1 || bw < 0)
stop("'bw' must be a positive scalar")
if (!is.null(smooth) && (length(bw) > 1 || bw < 0))
stop("'smooth' must be NULL or a positive scalar")
if (length(subdivisions) > 1 || subdivisions < 0)
stop("'subdivisions' must be a positive scalar")
subdivisions <- round(subdivisions)
subdivisions <- ifelse(subdivisions < 1, 1, subdivisions)
if (!is.logical(lambda.step))
stop("'lambda.step' must be a logical")
if (!is.logical(sigma.step))
stop("'sigma.step' must be a logical")
if (length(minw) > 1 || minw > 1 | minw < 0)
stop("'minw' must be a scalar in [0,1]")
if (length(nexp) > 1 || nexp < 0)
stop("'nexp' must be a positive scalar")
nexp <- round(nexp)
nexp <- ifelse(nexp < 1, 1, nexp)
############ EXAMPLE OF REPARAM ################################
#reparam.loggamma <- list(gam=function(sigma) sqrt(sigma),
# gaminv=function(gam) gam^2,
# delta=function(sigma) 2*sqrt(sigma))
if (!is.null(reparam)) {
badreparam <- !is.list(reparam) || !is.function(reparam$gam) || !is.function(reparam$gaminv) || !is.function(reparam$delta)
if (badreparam)
stop("'reparam' must be a list of 3 functions named 'gam', 'gaminv' and 'delta'")
}
if (!is.logical(bootstrap))
stop("'bootstrap' must be a logical")
res <- list(method=method, tuning.rho=tuning.rho, tuning.psi=tuning.psi, lower=lower, upper=upper, n=n, max.it=max.it, refine.tol=refine.tol, nResample=nResample, bw=bw, smooth=smooth, raf=raf, tau=tau, subdivisions=subdivisions, lambda.step=lambda.step, sigma.step=sigma.step,step=step,minw=minw, nexp=nexp, reparam=reparam, bootstrap=bootstrap, bootstrap.lambda=bootstrap.lambda)
return(res)
}
#############################################################
# loggammarob.QTau function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.QTau <- function(x, w=rep(1, length(x)), control=loggammarob.control()) {
lgrid <- seq(control$lower, control$upper, length.out=control$n)
if (!control$bootstrap) {
resQ <- WQtau(ri=x,w=w,lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
} else {
resQ <- WQtauboot(ri=x,w=w,lambda=control$bootstrap.lambda,step=0.25,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
}
result <- list(mu=resQ$mu2, sigma=resQ$sig2, lambda=resQ$lam2, weights=w, iterations=NULL, error=NULL)
return(result)
}
#############################################################
# loggammarob.WQTau function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.WQTau <- function(x, w=rep(1, length(x)), control=loggammarob.control()) {
lgrid <- seq(control$lower, control$upper, length.out=control$n)
resQ <- WQtau(ri=x,w=w,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
pp <- ppoints(length(x))
ql <- qloggamma(p=pp, lambda=resQ$lam1)
dl <- dloggamma(x=ql, lambda=resQ$lam1)
vl <- pp*(1-pp)/dl^2
wl <- 1/sqrt(vl)
resWQ <- WQtau(ri=x,w=wl,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
result <- list(mu=resWQ$mu2, sigma=resWQ$sig2, lambda=resWQ$lam2, weights=wl, iterations=NULL, error=NULL, QTau=list(mu=resQ$mu2, sigma=resQ$sig2, lambda=resQ$lam2, weights=w, iterations=NULL, error=NULL))
return(result)
}
#############################################################
# loggammarob.WL function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.WL <- function(x, start=NULL, w=rep(1, length(x)), control=loggammarob.control()) {
if (is.null(start)) {
lgrid <- seq(control$lower, control$upper, length.out=control$n)
resQ <- WQtau(ri=x,w=w,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
pp <- ppoints(length(x))
ql <- qloggamma(p=pp, lambda=resQ$lam1)
dl <- dloggamma(x=ql, lambda=resQ$lam1)
vl <- pp*(1-pp)/dl^2
wl <- 1/sqrt(vl)
resWQ <- WQtau(ri=x,w=wl,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
start <- c(resWQ$mu2, resWQ$sig2, resWQ$lam2)
QTau <- list(mu=resQ$mu2, sigma=resQ$sig2, lambda=resQ$lam2, weights=w, iterations=NULL, error=NULL)
WQTau <- list(mu=resWQ$mu2, sigma=resWQ$sig2, lambda=resWQ$lam2, weights=wl, iterations=NULL, error=NULL)
} else {
if (!is.numeric(start))
stop("'start' must be a numeric vector")
if (length(start)!=3)
stop("'start' must be a vector of length 3: mu, sigma2, lambda")
QTau <- NULL
WQTau <- NULL
}
if (!is.null(control$smooth))
control$bw <- control$smooth*sqrt(start[2])
resWL <- Disparity.WML.loggamma(y=x,mu0=start[1],sig0=start[2],lam0=start[3],lam.low=control$lower,lam.sup=control$upper,tol=control$refine.tol,maxit=control$max.it,lstep=control$lambda.step,sigstep=control$sigma.step,bw=control$bw,raf=control$raf,tau=control$tau,nmod=control$subdivisions,minw=control$minw)
result <- list(mu=resWL$mu, sigma=resWL$sig, lambda=resWL$lam, weights=resWL$weights, iterations=resWL$nit, error=NULL, QTau=QTau, WQTau=WQTau)
return(result)
}
#############################################################
# loggammarob.ML function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.ML <- function(x, start=NULL, w=rep(1, length(x)), control=loggammarob.control()) {
if (is.null(start)) {
lgrid <- seq(control$lower, control$upper, length.out=control$n)
resQ <- WQtau(ri=x,w=w,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
start <- c(resQ$mu2, resQ$sig2, resQ$lam2)
} else {
if (!is.numeric(start))
stop("'start' must be a numeric vector")
if (length(start)!=3)
stop("'start' must be a vector of length 3: mu, sigma2, lambda")
}
resML <- WML.loggamma(y=x,wi=w,mu0=start[1],sig0=start[2],lam0=start[3],lam.low=control$lower,lam.sup=control$upper,tol=control$refine.tol,maxit=control$max.it,lstep=control$lambda.step,sigstep=control$sigma.step)
result <- list(mu=resML$mu, sigma=resML$sig, lambda=resML$lam, weights=w, iterations=resML$nit, error=NULL)
return(result)
}
#############################################################
# loggammarob.oneWL function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.oneWL <- function(x, start=NULL, w=rep(1, length(x)), control=loggammarob.control()) {
if (is.null(start)) {
lgrid <- seq(control$lower, control$upper, length.out=control$n)
if (!control$bootstrap) {
resQ <- WQtau(ri=x,w=w,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
pp <- ppoints(length(x))
ql <- qloggamma(p=pp, lambda=resQ$lam1)
dl <- dloggamma(x=ql, lambda=resQ$lam1)
vl <- pp*(1-pp)/dl^2
wl <- 1/sqrt(vl)
} else {
resQ <- resWQ <- WQtauboot(ri=x,w=w,lambda=control$bootstrap.lambda,step=0.25,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
wl <- w
### cat(resQ$mu1, resQ$sig1, resQ$lam1, '\n')
### resQ <- list(); resQ$lam1 <- control$bootstrap.lambda
}
if (!control$bootstrap)
resWQ <- WQtau(ri=x,w=wl,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
## else
## resWQ <- WQtauboot(ri=x,w=wl,lambda=control$bootstrap.lambda,step=0.25,lgrid=lgrid,c1=control$tuning.rho,c2=control$tuning.psi,N=control$nResample,maxit=control$max.it,tolr=control$refine.tol)
#### cat(resWQ$mu2, resWQ$sig2, resWQ$lam2, '\n')
start <- c(resWQ$mu2, resWQ$sig2, resWQ$lam2)
QTau <- list(mu=resQ$mu2, sigma=resQ$sig2, lambda=resQ$lam2, weights=w, iterations=NULL, error=NULL)
WQTau <- list(mu=resWQ$mu2, sigma=resWQ$sig2, lambda=resWQ$lam2, weights=wl, iterations=NULL, error=NULL)
} else {
if (!is.numeric(start))
stop("'start' must be a numeric vector")
if (length(start)!=3)
stop("'start' must be a vector of length 3: mu, sigma2, lambda")
QTau <- NULL
WQTau <- NULL
}
if (!is.null(control$smooth))
control$bw <- control$smooth*sqrt(start[2])
if (is.null(control$reparam))
resOWL <- WMLone.loggamma(yi.sorted=x,mu0=start[1],sig0=start[2],lam0=start[3],bw=control$bw,raf=control$raf,tau=control$tau,nmod=control$subdivisions,step=control$step,minw=control$minw,nexp=control$nexp)
else
resOWL <- WMLone.reparam.loggamma(yi.sorted=x,mu0=start[1],sig0=start[2],lam0=start[3],bw=control$bw,raf=control$raf,tau=control$tau,nmod=control$subdivisions,step=control$step,minw=control$minw,nexp=control$nexp,reparam=control$reparam)
result <- list(mu=resOWL$mu, sigma=resOWL$sig, lambda=resOWL$lam, weights=resOWL$weights, iterations=1, error=resOWL$error, step=control$step, QTau=QTau, WQTau=WQTau)
return(result)
}
#############################################################
# loggammarob.test function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
loggammarob.test <- function(x, mu=NULL, sigma=NULL, lambda=NULL, eta=NULL, type="Wald", conf.level = 0.95, prob=0.00001) {
## x an object from loggammarob
if (!(x$method %in% c("WL", "oneWL", "ML")))
stop("Inference is available only for 'WL', 'oneWL' and 'ML'")
if (is.null(mu) & is.null(sigma) & is.null(lambda) & is.null(eta)) {
mu <- 0
sigma <- 1
lambda <- 0
}
alternative <- "two.sided"
dname <- names(x$data)
type <- match.arg(type)
components <- c(!is.null(mu), !is.null(sigma), !is.null(lambda))
if (is.null(eta)) {
estimate <- c(x$mu, x$sigma, x$lambda)[components]
null.value <- c(mu, sigma, lambda)
if (type=="Wald") {
param <- estimate - null.value
df <- length(param)
acov <- ACOV.ML.loggamma(sigma=x$sigma,lambda=x$lambda, prob=prob)
if (df==1) {
hatJ <- 1/diag(acov$cov)[components]
} else {
hatJ <- acov$Fisher.Info
hatJ <- matrix(hatJ[as.logical(components%*%t(components))], nrow=length(param), byrow=TRUE)
}
wstat <- drop(sum(x$weights)*t(param)%*%hatJ%*%param)
pval <- pchisq(q=wstat, df=df, ncp=0, lower.tail = FALSE, log.p = FALSE)
estcov <- diag(acov$cov)[components]
if (df==1)
cint <- estimate+c(-1,1)*qnorm(1-(1-conf.level)/2)*sqrt(estcov)/sqrt(sum(x$weights))
else
cint <- c(NA,NA)
}
names(wstat) <- "ww"
names(df) <- "df"
names(null.value) <- c("mean", "scale", "shape")[components]
} else {
if (any(components))
warning("test with 'eta' together with other parameters is not implemented, only the test on 'eta' is performed")
eps <- 0.0001
estimate <- x$eta
null.value <- eta
if (type=="Wald") {
param <- estimate - null.value
if (x$lambda < eps)
warning("test and confidence intervals for 'eta' is not implemented for negative lambda")
hatvar <- AVAR.ML.eta.loggamma(mu=x$mu, sigma=x$sigma, lambda=x$lambda, prob=prob, eps=eps)
wstat <- sum(x$weights)*param^2/hatvar
df <- length(param)
pval <- pchisq(q=wstat, df=df, ncp=0, lower.tail = FALSE, log.p = FALSE)
cint <- estimate+c(-1,1)*qnorm(1-(1-conf.level)/2)*sqrt(hatvar)/sqrt(sum(x$weights))
}
names(wstat) <- "ww"
names(df) <- "df"
names(null.value) <- "mean(exp(X))"
}
type <- paste(ifelse(x$method=="ML","Classic","Weighted"), type, "Test based on", x$method, sep=" ")
attr(cint,"conf.level") <- conf.level
rval <- list(statistic=wstat, parameter=df, p.value=pval,
conf.int=cint, estimate=estimate, null.value=null.value,
alternative=alternative,
method=type, data.name=dname)
class(rval) <- "htest"
return(rval)
}
#############################################################
# print.loggammarob function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
######## PRINT
print.loggammarob <- function(x, digits = max(3, getOption("digits") - 3), ...) {
cat("\nCall:\n",deparse(x$call),"\n\n",sep="")
cat("Location: ", format(x$mu, digits=digits))
cat(" Scale: ", format(x$sigma, digits=digits))
cat(" Shape: ", format(x$lambda, digits=digits))
cat(" E(exp(X)): ", format(x$eta, digits=digits))
cat("\n")
invisible(x)
}
#############################################################
# summary.loggammarob function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
########### SUMMARY
summary.loggammarob <- function(object, p=NULL, conf.level=0.95, prob=0.00001, ...) {
if ((object$method %in% c("WL", "oneWL", "ML"))) {
p <- p[ifelse(p > 0 & p < 1, TRUE, FALSE)]
nrep <- 4+length(p)
if (length(conf.level) == 1)
conf.level <- rep(conf.level, nrep)
if (length(conf.level) != nrep)
stop("'conf.level' must have length equal to 1 or 4+length(p)")
z <- qnorm(1-(1-conf.level)/2)
if (is.finite(object$eta)) {
asd <- sqrt(diag(ACOV.ML.loggamma(sigma=object$sigma,lambda=object$lambda,prob=prob)$cov))
asdeta <- sqrt(AVAR.ML.eta.loggamma(mu=object$mu, sigma=object$sigma, lambda=object$lambda, prob=prob, eps=0.0001, npoints=100000))
} else {
asd <- rep(NA,3)
asdeta <- NA
}
object$muconf.int <- object$mu+c(-1,1)*z[1]*asd[1]/sqrt(sum(object$weights))
object$sigmaconf.int <- object$sigma+c(-1,1)*z[2]*asd[2]/sqrt(sum(object$weights))
object$lambdaconf.int <- object$lambda+c(-1,1)*z[3]*asd[3]/sqrt(sum(object$weights))
object$etaconf.int <- object$eta+c(-1,1)*z[4]*asdeta/sqrt(sum(object$weights))
attr(object$muconf.int, "conf.level") <- conf.level[1]
attr(object$sigmaconf.int, "conf.level") <- conf.level[2]
attr(object$lambdaconf.int, "conf.level") <- conf.level[3]
attr(object$etaconf.int, "conf.level") <- conf.level[4]
object$muse <- asd[1]/sqrt(sum(object$weights))
object$sigmase <- asd[2]/sqrt(sum(object$weights))
object$lambdase <- asd[3]/sqrt(sum(object$weights))
object$etase <- asdeta/sqrt(sum(object$weights))
object$p <- p
if (!is.null(p)) {
object$q <- asdq <- rep(NA, length(p))
object$qconf.int <- matrix(NA, nrow=length(p), ncol=2)
for (i in 1:length(p)) {
object$q[i] <- qloggamma(p=p[i], mu=object$mu, sigma=object$sigma, lambda=object$lambda)
if (is.finite(object$eta))
asdq[i] <- sqrt(AVAR.ML.quantile.loggamma(p=p[i], mu=object$mu, sigma=object$sigma, lambda=object$lambda, prob=prob))
else
asdq[i] <- NA
object$qconf.int[i,] <- object$q[i]+c(-1,1)*z[4+i]*asdq[i]/sqrt(sum(object$weights))
}
##### object$qconf.int <- drop(qconf.int)
attr(object$qconf.int, "conf.level") <- conf.level[4+(1:length(p))]
object$qse <- asdq/sqrt(sum(object$weights))
}
}
object$call <- match.call()
class(object) <- "summary.loggammarob"
return(object)
}
#############################################################
# print.summary.loggammarob function
# Author: C. Agostinelli, A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
# Maintainer e-mail: claudio@unive.it
# Date: January, 1, 2013
# Version: 0.1
# Copyright (C) 2013 C. Agostinelli A. Marazzi,
# V.J. Yohai and A. Randriamiharisoa
#############################################################
#PRINT.SUMMARY
print.summary.loggammarob <- function(x, digits = max(3, getOption("digits") - 3), ...) {
cat("\nCall:\n",deparse(x$call),"\n\n",sep="")
cat("Location: ", format(x$mu, digits=digits))
if ((x$method %in% c("WL", "oneWL", "ML"))) {
cat(" s.e. ", format(x$muse, digits=digits), "\n")
cat("(", format(x$muconf.int[1L], digits=digits), ", ",
format(x$muconf.int[2L], digits=digits), ") \n")
cat(format(100 * attr(x$muconf.int, "conf.level")),
"percent confidence interval\n\n")
} else {
cat("\n")
}
cat("Scale: ", format(x$sigma, digits=digits))
if ((x$method %in% c("WL", "oneWL", "ML"))) {
cat(" s.e. ", format(x$sigmase, digits=digits), "\n")
cat("(", format(x$sigmaconf.int[1L], digits=digits), ", ",
format(x$sigmaconf.int[2L], digits=digits), ") \n")
cat(format(100 * attr(x$sigmaconf.int, "conf.level")),
"percent confidence interval\n\n")
} else {
cat("\n")
}
cat("Shape: ", format(x$lambda, digits=digits))
if ((x$method %in% c("WL", "oneWL", "ML"))) {
cat(" s.e. ", format(x$lambdase, digits=digits), "\n")
cat("(", format(x$lambdaconf.int[1L], digits=digits), ", ",
format(x$lambdaconf.int[2L], digits=digits), ") \n")
cat(format(100 * attr(x$lambdaconf.int, "conf.level")),
"percent confidence interval\n\n")
} else {
cat("\n")
}
cat("Mean(exp(X)): ", format(x$eta, digits=digits))
if ((x$method %in% c("WL", "oneWL", "ML"))) {
cat(" s.e. ", format(x$etase, digits=digits), "\n")
cat("(", format(x$etaconf.int[1L], digits=digits), ", ",
format(x$etaconf.int[2L], digits=digits), ") \n")
cat(format(100 * attr(x$etaconf.int, "conf.level")),
"percent confidence interval\n\n\n")
} else {
cat("\n")
}
######### Quantiles estimation and confidence interval
if (!is.null(x$p)) {
for (i in 1:length(x$p)) {
cat("Quantile of order ", format(x$p[i], digits=digits), ": ", format(x$q[i], digits=digits))
if ((x$method %in% c("WL", "oneWL", "ML"))) {
cat(" s.e. ", format(x$qse[i], digits=digits), "\n")
cat("(", format(x$qconf.int[i,1L], digits=digits), ", ",
format(x$qconf.int[i,2L], digits=digits), ") \n")
cat(format(100 * attr(x$qconf.int, "conf.level")[i]),
"percent confidence interval\n\n\n")
} else {
cat("\n")
}
}
}
if ((x$method %in% c("WL", "oneWL", "ML")))
cat("\n\n")
else
cat("\n\nConfidence intervals and Standard Errors are available only for 'WL', 'oneWL' and 'ML'\n\n\n")
if (x$method %in% c("WL", "oneWL", "WQTau"))
summarizeRobWeights(x$weights, digits = digits, ...)
invisible(x)
}
|
#' AutoH2oDRFRegression is an automated H2O modeling framework with grid-tuning and model evaluation
#'
#' AutoH2oDRFRegression is an automated H2O modeling framework with grid-tuning and model evaluation that runs a variety of steps. First, the function will run a random grid tune over N number of models and find which model is the best (a default model is always included in that set). Once the model is identified and built, several other outputs are generated: validation data with predictions, evaluation plot, evaluation boxplot, evaluation metrics, variable importance, partial dependence calibration plots, partial dependence calibration box plots, and column names used in model fitting.
#' @author Adrian Antico
#' @family Supervised Learning
#' @param data This is your data set for training and testing your model
#' @param ValidationData This is your holdout data set used in modeling either refine your hyperparameters.
#' @param TestData This is your holdout data set. Catboost using both training and validation data in the training process so you should evaluate out of sample performance with this data set.
#' @param TargetColumnName Either supply the target column name OR the column number where the target is located (but not mixed types).
#' @param FeatureColNames Either supply the feature column names OR the column number where the target is located (but not mixed types)
#' @param TransformNumericColumns Set to NULL to do nothing; otherwise supply the column names of numeric variables you want transformed
#' @param eval_metric This is the metric used to identify best grid tuned model. Choose from "MSE", "RMSE", "MAE", "RMSLE"
#' @param Trees The maximum number of trees you want in your models
#' @param GridTune Set to TRUE to run a grid tuning procedure. Set a number in MaxModelsInGrid to tell the procedure how many models you want to test.
#' @param MaxMem Set the maximum amount of memory you'd like to dedicate to the model run. E.g. "32G"
#' @param MaxModelsInGrid Number of models to test from grid options (1080 total possible options)
#' @param model_path A character string of your path file to where you want your output saved
#' @param ModelID A character string to name your model and output
#' @param NumOfParDepPlots Tell the function the number of partial dependence calibration plots you want to create. Calibration boxplots will only be created for numerical features (not dummy variables)
#' @param ReturnModelObjects Set to TRUE to output all modeling objects (E.g. plots and evaluation metrics)
#' @param SaveModelObjects Set to TRUE to return all modeling objects to your environment
#' @param IfSaveModel Set to "mojo" to save a mojo file, otherwise "standard" to save a regular H2O model object
#' @param StopH2O For use in other functions.
#' @examples
#' \donttest{
#' Correl <- 0.85
#' N <- 1000
#' data <- data.table::data.table(Target = runif(N))
#' data[, x1 := qnorm(Target)]
#' data[, x2 := runif(N)]
#' data[, Independent_Variable1 := log(pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))]
#' data[, Independent_Variable2 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))]
#' data[, Independent_Variable3 := exp(pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))]
#' data[, Independent_Variable4 := exp(exp(pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2))))]
#' data[, Independent_Variable5 := sqrt(pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))]
#' data[, Independent_Variable6 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))^0.10]
#' data[, Independent_Variable7 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))^0.25]
#' data[, Independent_Variable8 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))^0.75]
#' data[, Independent_Variable9 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))^2]
#' data[, Independent_Variable10 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))^4]
#' data[, Independent_Variable11 := as.factor(
#' ifelse(Independent_Variable2 < 0.20, "A",
#' ifelse(Independent_Variable2 < 0.40, "B",
#' ifelse(Independent_Variable2 < 0.6, "C",
#' ifelse(Independent_Variable2 < 0.8, "D", "E")))))]
#' data[, ':=' (x1 = NULL, x2 = NULL)]
#' TestModel <- AutoH2oDRFRegression(data,
#' ValidationData = NULL,
#' TestData = NULL,
#' TargetColumnName = "Target",
#' FeatureColNames = 2:ncol(data),
#' TransformNumericColumns = NULL,
#' eval_metric = "RMSE",
#' Trees = 50,
#' GridTune = FALSE,
#' MaxMem = "32G",
#' MaxModelsInGrid = 10,
#' model_path = NULL,
#' ModelID = "FirstModel",
#' NumOfParDepPlots = 3,
#' ReturnModelObjects = TRUE,
#' SaveModelObjects = FALSE,
#' IfSaveModel = "mojo",
#' StopH2O = TRUE)
#' }
#' @return Saves to file and returned in list: VariableImportance.csv, Model, ValidationData.csv, EvalutionPlot.png, EvalutionBoxPlot.png, EvaluationMetrics.csv, ParDepPlots.R a named list of features with partial dependence calibration plots, ParDepBoxPlots.R, GridCollect, GridList, and Transformation metadata
#' @export
AutoH2oDRFRegression <- function(data,
ValidationData = NULL,
TestData = NULL,
TargetColumnName = NULL,
FeatureColNames = NULL,
TransformNumericColumns = NULL,
eval_metric = "RMSE",
Trees = 50,
GridTune = FALSE,
MaxMem = "32G",
MaxModelsInGrid = 2,
model_path = NULL,
ModelID = "FirstModel",
NumOfParDepPlots = 3,
ReturnModelObjects = TRUE,
SaveModelObjects = FALSE,
IfSaveModel = "mojo",
StopH2O = TRUE) {
# Regression Check Arguments----
if (!(tolower(eval_metric) %chin% c("mse", "rmse", "mae", "rmsle"))) {
warning("eval_metric not in MSE, RMSE, MAE, RMSLE")
}
if (Trees < 1)
warning("Trees must be greater than 1")
if (!GridTune %in% c(TRUE, FALSE))
warning("GridTune needs to be TRUE or FALSE")
if (MaxModelsInGrid < 1 & GridTune == TRUE) {
warning("MaxModelsInGrid needs to be at least 1")
}
if (!is.null(model_path)) {
if (!is.character(model_path))
warning("model_path needs to be a character type")
}
if (!is.character(ModelID))
warning("ModelID needs to be a character type")
if (NumOfParDepPlots < 0)
warning("NumOfParDepPlots needs to be a positive number")
if (!(ReturnModelObjects %in% c(TRUE, FALSE)))
warning("ReturnModelObjects needs to be TRUE or FALSE")
if (!(SaveModelObjects %in% c(TRUE, FALSE)))
warning("SaveModelObjects needs to be TRUE or FALSE")
# Regression Ensure data is a data.table----
if (!data.table::is.data.table(data)) {
data <- data.table::as.data.table(data)
}
# Regression Ensure data is a data.table----
if (!is.null(ValidationData)) {
if (!data.table::is.data.table(ValidationData)) {
ValidationData <- data.table::as.data.table(ValidationData)
}
}
# Regression Ensure data is a data.table----
if (!is.null(TestData)) {
if (!data.table::is.data.table(TestData)) {
TestData <- data.table::as.data.table(TestData)
}
}
# Convert TransformNumericColumns to Names if not character----
if (!is.null(TransformNumericColumns)) {
if(!is.character(TransformNumericColumns)) {
TransformNumericColumns <- names(data)[TransformNumericColumns]
}
}
# Transform data, ValidationData, and TestData----
if (!is.null(ValidationData) &
!is.null(TransformNumericColumns)) {
MeanTrainTarget <- data[, mean(get(TargetColumnName))]
Output <- AutoTransformationCreate(
data,
ColumnNames = TransformNumericColumns,
Methods = c("BoxCox",
"YeoJohnson",
"Asinh",
"Asin",
"Logit"),
Path = model_path,
TransID = ModelID,
SaveOutput = SaveModelObjects
)
data <- Output$Data
TransformationResults <- Output$FinalResults
# Transform ValidationData----
ValidationData <- AutoTransformationScore(
ScoringData = ValidationData,
Type = "Apply",
FinalResults = TransformationResults,
TransID = NULL,
Path = NULL
)
# Transform TestData----
if (!is.null(TestData)) {
TestData <- AutoTransformationScore(
ScoringData = TestData,
Type = "Apply",
FinalResults = TransformationResults,
TransID = NULL,
Path = NULL
)
}
}
# Regression Data Partition----
if (is.null(ValidationData) & is.null(TestData)) {
if (!is.null(TransformNumericColumns)) {
# Partition----
dataSets <- AutoDataPartition(
data,
NumDataSets = 3,
Ratios = c(0.70, 0.20, 0.10),
PartitionType = "random",
StratifyColumnNames = NULL,
TimeColumnName = NULL
)
data <- dataSets$TrainData
ValidationData <- dataSets$ValidationData
TestData <- dataSets$TestData
# Mean of data----
MeanTrainTarget <- data[, mean(get(TargetColumnName))]
# Transform data sets----
Output <- AutoTransformationCreate(
data,
ColumnNames = TransformNumericColumns,
Methods = c("BoxCox",
"YeoJohnson",
"Asinh",
"Asin",
"Logit"),
Path = model_path,
TransID = ModelID,
SaveOutput = SaveModelObjects
)
data <- Output$Data
TransformationResults <- Output$FinalResults
# Transform ValidationData----
ValidationData <- AutoTransformationScore(
ScoringData = ValidationData,
Type = "Apply",
FinalResults = TransformationResults,
TransID = NULL,
Path = NULL
)
# Transform TestData----
if (!is.null(TestData)) {
TestData <- AutoTransformationScore(
ScoringData = TestData,
Type = "Apply",
FinalResults = TransformationResults,
TransID = NULL,
Path = NULL
)
}
} else {
dataSets <- AutoDataPartition(
data,
NumDataSets = 3,
Ratios = c(0.70, 0.20, 0.10),
PartitionType = "random",
StratifyColumnNames = NULL,
TimeColumnName = NULL
)
data <- dataSets$TrainData
ValidationData <- dataSets$ValidationData
TestData <- dataSets$TestData
MeanTrainTarget <- data[, mean(get(TargetColumnName))]
}
}
# Regression ModelDataPrep----
dataTrain <- ModelDataPrep(data = data,
Impute = FALSE,
CharToFactor = TRUE)
# Regression ModelDataPrep----
dataTest <- ModelDataPrep(data = ValidationData,
Impute = FALSE,
CharToFactor = TRUE)
# Regression ModelDataPrep----
if (!is.null(TestData)) {
TestData <- ModelDataPrep(data = TestData,
Impute = FALSE,
CharToFactor = TRUE)
}
# Regression Target Name Storage----
if (is.character(TargetColumnName)) {
Target <- TargetColumnName
} else {
Target <- names(data)[TargetColumnName]
}
# Regression Get Min Value of Target Data----
MinVal <- min(data[[eval(Target)]], na.rm = TRUE)
# Regression Grid Tune Check----
if (GridTune) {
# Regression Start Up H2O----
h2o::h2o.init(max_mem_size = MaxMem,
enable_assertions = FALSE)
# Regression Define data sets----
datatrain <- h2o::as.h2o(dataTrain)
datavalidate <- h2o::as.h2o(dataTest)
# Regression Grid Tune Search Criteria----
search_criteria <- list(
strategy = "RandomDiscrete",
max_runtime_secs = 3600 * 24 * 7,
max_models = MaxModelsInGrid,
seed = 1234,
stopping_rounds = 10,
stopping_metric = toupper(eval_metric),
stopping_tolerance = 1e-3
)
# Regression Grid Parameters----
hyper_params <- list(
max_depth = c(6, 9, 12),
sample_rate = c(0.5, 0.75, 1.0),
col_sample_rate_per_tree = c(0.5, 0.75, 1.0),
col_sample_rate_change_per_level = c(0.9, 1.0, 1.1),
min_rows = c(1, 10),
nbins = c(10, 20, 30),
nbins_cats = c(64, 256, 512),
histogram_type = c("UniformAdaptive",
"QuantilesGlobal",
"RoundRobin")
)
# Regression Grid Train Model----
grid <- h2o::h2o.grid(
hyper_params = hyper_params,
search_criteria = search_criteria,
is_supervised = TRUE,
algorithm = "randomForest",
grid_id = paste0(ModelID, "_Grid"),
x = FeatureColNames,
y = TargetColumnName,
ntrees = Trees,
training_frame = datatrain,
validation_frame = datavalidate,
max_runtime_secs = 3600 * 24 * 7,
stopping_rounds = 10,
stopping_tolerance = 1e-3,
stopping_metric = toupper(eval_metric),
score_tree_interval = 10,
seed = 1234
)
# Regression Get Best Model----
Grid_Out <- h2o::h2o.getGrid(
grid_id = paste0(ModelID, "_Grid"),
sort_by = eval_metric,
decreasing = FALSE
)
# Regression Collect Best Grid Model----
grid_model <- h2o::h2o.getModel(Grid_Out@model_ids[[1]])
}
# Regression Start Up H2O----
if (!GridTune) {
h2o::h2o.init(max_mem_size = MaxMem,
enable_assertions = FALSE)
# Regression Define data sets----
datatrain <- h2o::as.h2o(dataTrain)
datavalidate <- h2o::as.h2o(dataTest)
}
# Regression Baseline Model----
base_model <- h2o::h2o.randomForest(
x = FeatureColNames,
y = TargetColumnName,
training_frame = datatrain,
validation_frame = datavalidate,
model_id = ModelID,
ntrees = Trees
)
# Regression Grab Evaluation Metric----
if (GridTune) {
if (!is.null(TestData)) {
datatest <- h2o::as.h2o(TestData)
if (tolower(eval_metric) == "mse") {
GridModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = grid_model,
newdata = datatest))
BaseModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "rmse") {
GridModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = grid_model,
newdata = datatest))
BaseModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "mae") {
GridModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = grid_model,
newdata = datatest))
BaseModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "rmsle") {
GridModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = grid_model,
newdata = datatest))
BaseModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = base_model,
newdata = datatest))
}
} else {
if (tolower(eval_metric) == "mse") {
GridModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = grid_model,
newdata = datavalidate))
BaseModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "rmse") {
GridModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = grid_model,
newdata = datavalidate))
BaseModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "mae") {
GridModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = grid_model,
newdata = datavalidate))
BaseModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "rmsle") {
GridModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = grid_model,
newdata = datavalidate))
BaseModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
}
}
} else {
if (!is.null(TestData)) {
datatest <- h2o::as.h2o(TestData)
if (tolower(eval_metric) == "mse") {
BaseModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "rmse") {
BaseModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "mae") {
BaseModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "rmsle") {
BaseModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = base_model,
newdata = datatest))
}
} else {
if (tolower(eval_metric) == "mse") {
BaseModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "rmse") {
BaseModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "mae") {
BaseModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "rmsle") {
BaseModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
}
}
}
# Regression Pick Winner----
if (GridTune) {
if (GridModelEval < BaseModelEval) {
FinalModel <- grid_model
} else {
FinalModel <- base_model
}
} else {
FinalModel <- base_model
}
# Regression Save Model----
if (SaveModelObjects) {
if (tolower(IfSaveModel) == "mojo") {
SaveModel <- h2o::h2o.saveMojo(object = FinalModel,
path = model_path,
force = TRUE)
h2o::h2o.download_mojo(
model = FinalModel,
path = model_path,
get_genmodel_jar = TRUE,
genmodel_path = model_path,
genmodel_name = ModelID
)
} else {
SaveModel <- h2o::h2o.saveModel(object = FinalModel,
path = model_path,
force = TRUE)
}
}
# Regression Score Final Test Data----
if (!is.null(TestData)) {
Predict <-
data.table::as.data.table(h2o::h2o.predict(object = FinalModel,
newdata = datatest))
} else {
Predict <-
data.table::as.data.table(h2o::h2o.predict(object = FinalModel,
newdata = datavalidate))
}
# Regression Variable Importance----
VariableImportance <-
data.table::as.data.table(h2o::h2o.varimp(object = FinalModel))
if (SaveModelObjects) {
data.table::fwrite(VariableImportance,
file = paste0(model_path,
"/",
ModelID, "_VariableImportance.csv"))
}
# Regression Format Variable Importance Table----
data.table::setnames(
VariableImportance,
c(
"variable",
"relative_importance",
"scaled_importance",
"percentage"
),
c(
"Variable",
"RelativeImportance",
"ScaledImportance",
"Percentage"
)
)
VariableImportance[, ':=' (
RelativeImportance = round(RelativeImportance, 4),
ScaledImportance = round(ScaledImportance, 4),
Percentage = round(Percentage, 4)
)]
# Regression H2O Shutdown----
if(StopH2O) {
h2o::h2o.shutdown(prompt = FALSE)
}
# Regression Create Validation Data----
if (!is.null(TestData)) {
ValidationData <-
data.table::as.data.table(cbind(TestData, Predict))
} else {
ValidationData <-
data.table::as.data.table(cbind(dataTest, Predict))
}
# Regression Change Prediction Name----
data.table::setnames(ValidationData, "predict", "Predict")
# Inverse Transform----
if (!is.null(TransformNumericColumns)) {
# Append record for Predicted Column----
if (GridTune) {
TransformationResults <-
TransformationResults[ColumnName != "Predict"]
}
TransformationResults <- data.table::rbindlist(list(
TransformationResults,
data.table::data.table(
ColumnName = "Predict",
MethodName = rep(TransformationResults[ColumnName == eval(TargetColumnName),
MethodName], 1),
Lambda = rep(TransformationResults[ColumnName == eval(TargetColumnName),
Lambda], 1),
NormalizedStatistics = rep(0, 1)
)
))
# If Actual target columnname == "Target" remove the duplicate version----
if (length(unique(TransformationResults[["ColumnName"]])) != nrow(TransformationResults)) {
temp <- TransformationResults[, .N, by = "ColumnName"][N != 1][[1]]
temp1 <- which(names(ValidationData) == temp)[1]
ValidationData[, eval(names(data)[temp1]) := NULL]
TransformationResults <- TransformationResults[, ID := 1:.N][
ID != which(TransformationResults[["ID"]] == temp1)][
, ID := NULL]
}
# Transform Target and Predicted Value----
ValidationData <- AutoTransformationScore(
ScoringData = ValidationData,
Type = "Inverse",
FinalResults = TransformationResults,
TransID = NULL,
Path = NULL
)
}
# Regression Get R2----
r_squared <-
(ValidationData[, stats::cor(get(TargetColumnName), Predict)][[1]]) ^ 2
# Regression Save Validation Data to File----
if (SaveModelObjects) {
data.table::fwrite(ValidationData,
file = paste0(model_path,
"/",
ModelID,
"_ValidationData.csv"))
}
# Regression Evaluation Calibration Plot----
EvaluationPlot <- EvalPlot(
data = ValidationData,
PredictionColName = "Predict",
TargetColName = Target,
GraphType = "calibration",
PercentileBucket = 0.05,
aggrfun = function(x)
mean(x, na.rm = TRUE)
)
# Regression Evaluation Plot Update Title----
if (GridTune) {
val <- max(GridModelEval, BaseModelEval)
EvaluationPlot <- EvaluationPlot +
ggplot2::ggtitle(paste0(
"Random Forest Calibration Evaluation Plot: ",
toupper(eval_metric),
" = ",
round(val, 3)
))
} else {
EvaluationPlot <- EvaluationPlot +
ggplot2::ggtitle(paste0(
"Calibration Evaluation Plot: ",
toupper(eval_metric),
" = ",
round(BaseModelEval, 3)
))
}
# Save plot to file
if (SaveModelObjects) {
ggplot2::ggsave(paste0(model_path,
"/",
ModelID,
"_EvaluationPlot.png"))
}
# Regression Evaluation BoxPlot----
EvaluationBoxPlot <- EvalPlot(
data = ValidationData,
PredictionColName = "Predict",
TargetColName = Target,
GraphType = "boxplot",
PercentileBucket = 0.05,
aggrfun = function(x)
mean(x, na.rm = TRUE)
)
# Regression Evaluation Plot Update Title----
if (GridTune) {
val <- max(GridModelEval, BaseModelEval)
EvaluationBoxPlot <- EvaluationBoxPlot +
ggplot2::ggtitle(paste0(
"Random Forest Calibration Evaluation Plot: ",
toupper(eval_metric),
" = ",
round(val, 3)
))
} else {
EvaluationBoxPlot <- EvaluationBoxPlot +
ggplot2::ggtitle(paste0(
"Random Forest Calibration Evaluation Plot: ",
toupper(eval_metric),
" = ",
round(BaseModelEval, 3)
))
}
# Save plot to file
if (SaveModelObjects) {
ggplot2::ggsave(paste0(model_path,
"/",
ModelID,
"_EvaluationBoxPlot.png"))
}
# Regression Evaluation Metrics----
EvaluationMetrics <-
data.table::data.table(
Metric = c("Poisson", "MAE",
"MAPE", "MSE", "MSLE",
"KL", "CS", "R2"),
MetricValue = rep(999999, 8)
)
i <- 0
for (metric in c("poisson", "mae", "mape", "mse", "msle", "kl", "cs", "r2")) {
i <- as.integer(i + 1)
tryCatch({
# Regression Grid Evaluation Metrics----
if (tolower(metric) == "poisson") {
if (MinVal > 0 &
min(ValidationData[["Predict"]], na.rm = TRUE) > 0) {
ValidationData[, Metric := Predict - Target * log(Predict + 1)]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
}
} else if (tolower(metric) == "mae") {
ValidationData[, Metric := abs(Target - Predict)]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
} else if (tolower(metric) == "mape") {
ValidationData[, Metric := abs((Target - Predict) / (Target + 1))]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
} else if (tolower(metric) == "mse") {
ValidationData[, Metric := (Target - Predict) ^ 2]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
} else if (tolower(metric) == "msle") {
if (MinVal > 0 &
min(ValidationData[["Predict"]], na.rm = TRUE) > 0) {
ValidationData[, Metric := (log(Target + 1) - log(Predict + 1)) ^ 2]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
}
} else if (tolower(metric) == "kl") {
if (MinVal > 0 &
min(ValidationData[["Predict"]], na.rm = TRUE) > 0) {
ValidationData[, Metric := Target * log((Target + 1) /
(Predict + 1))]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
}
} else if (tolower(metric) == "cs") {
ValidationData[, ':=' (
Metric1 = Target * Predict,
Metric2 = Target ^ 2,
Metric3 = Predict ^ 2
)]
Metric <-
ValidationData[, sum(Metric1, na.rm = TRUE)] / (sqrt(ValidationData[, sum(Metric2, na.rm = TRUE)]) *
sqrt(ValidationData[, sum(Metric3, na.rm = TRUE)]))
} else if (tolower(metric) == "r2") {
Metric <-
(ValidationData[, stats::cor(Target, Predict)][[1]]) ^ 2
}
data.table::set(
EvaluationMetrics,
i = i,
j = 2L,
value = round(Metric, 4)
)
data.table::set(EvaluationMetrics,
i = i,
j = 3L,
value = NA)
}, error = function(x)
"skip")
}
# Regression Save EvaluationMetrics to File----
EvaluationMetrics <- EvaluationMetrics[MetricValue != 999999]
if (SaveModelObjects) {
data.table::fwrite(EvaluationMetrics,
file = paste0(model_path,
"/",
ModelID,
"_EvaluationMetrics.csv"))
}
# Regression Partial Dependence----
ParDepPlots <- list()
j <- 0
ParDepBoxPlots <- list()
k <- 0
for (i in seq_len(min(length(FeatureColNames), NumOfParDepPlots))) {
tryCatch({
Out <- ParDepCalPlots(
data = ValidationData,
PredictionColName = "Predict",
TargetColName = Target,
IndepVar = VariableImportance[i, Variable],
GraphType = "calibration",
PercentileBucket = 0.05,
FactLevels = 10,
Function = function(x)
mean(x, na.rm = TRUE)
)
j <- j + 1
ParDepPlots[[paste0(VariableImportance[j, Variable])]] <-
Out
}, error = function(x)
"skip")
tryCatch({
Out1 <- ParDepCalPlots(
data = ValidationData,
PredictionColName = "Predict",
TargetColName = Target,
IndepVar = VariableImportance[i, Variable],
GraphType = "boxplot",
PercentileBucket = 0.05,
FactLevels = 10,
Function = function(x)
mean(x, na.rm = TRUE)
)
k <- k + 1
ParDepBoxPlots[[paste0(VariableImportance[k, Variable])]] <-
Out1
}, error = function(x)
"skip")
}
# Regression Save ParDepPlots to file----
if (SaveModelObjects) {
save(ParDepPlots,
file = paste0(model_path, "/", ModelID, "_ParDepPlots.R"))
}
# Regression Save ParDepBoxPlots to file----
if (SaveModelObjects) {
save(ParDepBoxPlots,
file = paste0(model_path, "/", ModelID, "_ParDepBoxPlots.R"))
}
# Subset Transformation Object----
if(!is.null(TransformNumericColumns)) {
if(TargetColumnName == "Target") {
TransformationResults <- TransformationResults[!(ColumnName %chin% c("Predict"))]
} else {
TransformationResults <- TransformationResults[!(ColumnName %chin% c("Predict", "Target"))]
}
}
# Regression Return Objects----
if (ReturnModelObjects) {
if(!is.null(TransformNumericColumns)) {
return(
list(
Model = FinalModel,
ValidationData = ValidationData,
EvaluationPlot = EvaluationPlot,
EvaluationBoxPlot = EvaluationBoxPlot,
EvaluationMetrics = EvaluationMetrics,
VariableImportance = VariableImportance,
PartialDependencePlots = ParDepPlots,
PartialDependenceBoxPlots = ParDepBoxPlots,
TransformationInformation = TransformationResults
)
)
} else {
return(
list(
Model = FinalModel,
ValidationData = ValidationData,
EvaluationPlot = EvaluationPlot,
EvaluationBoxPlot = EvaluationBoxPlot,
EvaluationMetrics = EvaluationMetrics,
VariableImportance = VariableImportance,
PartialDependencePlots = ParDepPlots,
PartialDependenceBoxPlots = ParDepBoxPlots
)
)
}
}
}
| /R/AutoH2oDRFRegression.R | no_license | solomondaner/RemixAutoML | R | false | false | 34,012 | r | #' AutoH2oDRFRegression is an automated H2O modeling framework with grid-tuning and model evaluation
#'
#' AutoH2oDRFRegression is an automated H2O modeling framework with grid-tuning and model evaluation that runs a variety of steps. First, the function will run a random grid tune over N number of models and find which model is the best (a default model is always included in that set). Once the model is identified and built, several other outputs are generated: validation data with predictions, evaluation plot, evaluation boxplot, evaluation metrics, variable importance, partial dependence calibration plots, partial dependence calibration box plots, and column names used in model fitting.
#' @author Adrian Antico
#' @family Supervised Learning
#' @param data This is your data set for training and testing your model
#' @param ValidationData This is your holdout data set used in modeling either refine your hyperparameters.
#' @param TestData This is your holdout data set. Catboost using both training and validation data in the training process so you should evaluate out of sample performance with this data set.
#' @param TargetColumnName Either supply the target column name OR the column number where the target is located (but not mixed types).
#' @param FeatureColNames Either supply the feature column names OR the column number where the target is located (but not mixed types)
#' @param TransformNumericColumns Set to NULL to do nothing; otherwise supply the column names of numeric variables you want transformed
#' @param eval_metric This is the metric used to identify best grid tuned model. Choose from "MSE", "RMSE", "MAE", "RMSLE"
#' @param Trees The maximum number of trees you want in your models
#' @param GridTune Set to TRUE to run a grid tuning procedure. Set a number in MaxModelsInGrid to tell the procedure how many models you want to test.
#' @param MaxMem Set the maximum amount of memory you'd like to dedicate to the model run. E.g. "32G"
#' @param MaxModelsInGrid Number of models to test from grid options (1080 total possible options)
#' @param model_path A character string of your path file to where you want your output saved
#' @param ModelID A character string to name your model and output
#' @param NumOfParDepPlots Tell the function the number of partial dependence calibration plots you want to create. Calibration boxplots will only be created for numerical features (not dummy variables)
#' @param ReturnModelObjects Set to TRUE to output all modeling objects (E.g. plots and evaluation metrics)
#' @param SaveModelObjects Set to TRUE to return all modeling objects to your environment
#' @param IfSaveModel Set to "mojo" to save a mojo file, otherwise "standard" to save a regular H2O model object
#' @param StopH2O For use in other functions.
#' @examples
#' \donttest{
#' Correl <- 0.85
#' N <- 1000
#' data <- data.table::data.table(Target = runif(N))
#' data[, x1 := qnorm(Target)]
#' data[, x2 := runif(N)]
#' data[, Independent_Variable1 := log(pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))]
#' data[, Independent_Variable2 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))]
#' data[, Independent_Variable3 := exp(pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))]
#' data[, Independent_Variable4 := exp(exp(pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2))))]
#' data[, Independent_Variable5 := sqrt(pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))]
#' data[, Independent_Variable6 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))^0.10]
#' data[, Independent_Variable7 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))^0.25]
#' data[, Independent_Variable8 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))^0.75]
#' data[, Independent_Variable9 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))^2]
#' data[, Independent_Variable10 := (pnorm(Correl * x1 +
#' sqrt(1-Correl^2) * qnorm(x2)))^4]
#' data[, Independent_Variable11 := as.factor(
#' ifelse(Independent_Variable2 < 0.20, "A",
#' ifelse(Independent_Variable2 < 0.40, "B",
#' ifelse(Independent_Variable2 < 0.6, "C",
#' ifelse(Independent_Variable2 < 0.8, "D", "E")))))]
#' data[, ':=' (x1 = NULL, x2 = NULL)]
#' TestModel <- AutoH2oDRFRegression(data,
#' ValidationData = NULL,
#' TestData = NULL,
#' TargetColumnName = "Target",
#' FeatureColNames = 2:ncol(data),
#' TransformNumericColumns = NULL,
#' eval_metric = "RMSE",
#' Trees = 50,
#' GridTune = FALSE,
#' MaxMem = "32G",
#' MaxModelsInGrid = 10,
#' model_path = NULL,
#' ModelID = "FirstModel",
#' NumOfParDepPlots = 3,
#' ReturnModelObjects = TRUE,
#' SaveModelObjects = FALSE,
#' IfSaveModel = "mojo",
#' StopH2O = TRUE)
#' }
#' @return Saves to file and returned in list: VariableImportance.csv, Model, ValidationData.csv, EvalutionPlot.png, EvalutionBoxPlot.png, EvaluationMetrics.csv, ParDepPlots.R a named list of features with partial dependence calibration plots, ParDepBoxPlots.R, GridCollect, GridList, and Transformation metadata
#' @export
AutoH2oDRFRegression <- function(data,
ValidationData = NULL,
TestData = NULL,
TargetColumnName = NULL,
FeatureColNames = NULL,
TransformNumericColumns = NULL,
eval_metric = "RMSE",
Trees = 50,
GridTune = FALSE,
MaxMem = "32G",
MaxModelsInGrid = 2,
model_path = NULL,
ModelID = "FirstModel",
NumOfParDepPlots = 3,
ReturnModelObjects = TRUE,
SaveModelObjects = FALSE,
IfSaveModel = "mojo",
StopH2O = TRUE) {
# Regression Check Arguments----
if (!(tolower(eval_metric) %chin% c("mse", "rmse", "mae", "rmsle"))) {
warning("eval_metric not in MSE, RMSE, MAE, RMSLE")
}
if (Trees < 1)
warning("Trees must be greater than 1")
if (!GridTune %in% c(TRUE, FALSE))
warning("GridTune needs to be TRUE or FALSE")
if (MaxModelsInGrid < 1 & GridTune == TRUE) {
warning("MaxModelsInGrid needs to be at least 1")
}
if (!is.null(model_path)) {
if (!is.character(model_path))
warning("model_path needs to be a character type")
}
if (!is.character(ModelID))
warning("ModelID needs to be a character type")
if (NumOfParDepPlots < 0)
warning("NumOfParDepPlots needs to be a positive number")
if (!(ReturnModelObjects %in% c(TRUE, FALSE)))
warning("ReturnModelObjects needs to be TRUE or FALSE")
if (!(SaveModelObjects %in% c(TRUE, FALSE)))
warning("SaveModelObjects needs to be TRUE or FALSE")
# Regression Ensure data is a data.table----
if (!data.table::is.data.table(data)) {
data <- data.table::as.data.table(data)
}
# Regression Ensure data is a data.table----
if (!is.null(ValidationData)) {
if (!data.table::is.data.table(ValidationData)) {
ValidationData <- data.table::as.data.table(ValidationData)
}
}
# Regression Ensure data is a data.table----
if (!is.null(TestData)) {
if (!data.table::is.data.table(TestData)) {
TestData <- data.table::as.data.table(TestData)
}
}
# Convert TransformNumericColumns to Names if not character----
if (!is.null(TransformNumericColumns)) {
if(!is.character(TransformNumericColumns)) {
TransformNumericColumns <- names(data)[TransformNumericColumns]
}
}
# Transform data, ValidationData, and TestData----
if (!is.null(ValidationData) &
!is.null(TransformNumericColumns)) {
MeanTrainTarget <- data[, mean(get(TargetColumnName))]
Output <- AutoTransformationCreate(
data,
ColumnNames = TransformNumericColumns,
Methods = c("BoxCox",
"YeoJohnson",
"Asinh",
"Asin",
"Logit"),
Path = model_path,
TransID = ModelID,
SaveOutput = SaveModelObjects
)
data <- Output$Data
TransformationResults <- Output$FinalResults
# Transform ValidationData----
ValidationData <- AutoTransformationScore(
ScoringData = ValidationData,
Type = "Apply",
FinalResults = TransformationResults,
TransID = NULL,
Path = NULL
)
# Transform TestData----
if (!is.null(TestData)) {
TestData <- AutoTransformationScore(
ScoringData = TestData,
Type = "Apply",
FinalResults = TransformationResults,
TransID = NULL,
Path = NULL
)
}
}
# Regression Data Partition----
if (is.null(ValidationData) & is.null(TestData)) {
if (!is.null(TransformNumericColumns)) {
# Partition----
dataSets <- AutoDataPartition(
data,
NumDataSets = 3,
Ratios = c(0.70, 0.20, 0.10),
PartitionType = "random",
StratifyColumnNames = NULL,
TimeColumnName = NULL
)
data <- dataSets$TrainData
ValidationData <- dataSets$ValidationData
TestData <- dataSets$TestData
# Mean of data----
MeanTrainTarget <- data[, mean(get(TargetColumnName))]
# Transform data sets----
Output <- AutoTransformationCreate(
data,
ColumnNames = TransformNumericColumns,
Methods = c("BoxCox",
"YeoJohnson",
"Asinh",
"Asin",
"Logit"),
Path = model_path,
TransID = ModelID,
SaveOutput = SaveModelObjects
)
data <- Output$Data
TransformationResults <- Output$FinalResults
# Transform ValidationData----
ValidationData <- AutoTransformationScore(
ScoringData = ValidationData,
Type = "Apply",
FinalResults = TransformationResults,
TransID = NULL,
Path = NULL
)
# Transform TestData----
if (!is.null(TestData)) {
TestData <- AutoTransformationScore(
ScoringData = TestData,
Type = "Apply",
FinalResults = TransformationResults,
TransID = NULL,
Path = NULL
)
}
} else {
dataSets <- AutoDataPartition(
data,
NumDataSets = 3,
Ratios = c(0.70, 0.20, 0.10),
PartitionType = "random",
StratifyColumnNames = NULL,
TimeColumnName = NULL
)
data <- dataSets$TrainData
ValidationData <- dataSets$ValidationData
TestData <- dataSets$TestData
MeanTrainTarget <- data[, mean(get(TargetColumnName))]
}
}
# Regression ModelDataPrep----
dataTrain <- ModelDataPrep(data = data,
Impute = FALSE,
CharToFactor = TRUE)
# Regression ModelDataPrep----
dataTest <- ModelDataPrep(data = ValidationData,
Impute = FALSE,
CharToFactor = TRUE)
# Regression ModelDataPrep----
if (!is.null(TestData)) {
TestData <- ModelDataPrep(data = TestData,
Impute = FALSE,
CharToFactor = TRUE)
}
# Regression Target Name Storage----
if (is.character(TargetColumnName)) {
Target <- TargetColumnName
} else {
Target <- names(data)[TargetColumnName]
}
# Regression Get Min Value of Target Data----
MinVal <- min(data[[eval(Target)]], na.rm = TRUE)
# Regression Grid Tune Check----
if (GridTune) {
# Regression Start Up H2O----
h2o::h2o.init(max_mem_size = MaxMem,
enable_assertions = FALSE)
# Regression Define data sets----
datatrain <- h2o::as.h2o(dataTrain)
datavalidate <- h2o::as.h2o(dataTest)
# Regression Grid Tune Search Criteria----
search_criteria <- list(
strategy = "RandomDiscrete",
max_runtime_secs = 3600 * 24 * 7,
max_models = MaxModelsInGrid,
seed = 1234,
stopping_rounds = 10,
stopping_metric = toupper(eval_metric),
stopping_tolerance = 1e-3
)
# Regression Grid Parameters----
hyper_params <- list(
max_depth = c(6, 9, 12),
sample_rate = c(0.5, 0.75, 1.0),
col_sample_rate_per_tree = c(0.5, 0.75, 1.0),
col_sample_rate_change_per_level = c(0.9, 1.0, 1.1),
min_rows = c(1, 10),
nbins = c(10, 20, 30),
nbins_cats = c(64, 256, 512),
histogram_type = c("UniformAdaptive",
"QuantilesGlobal",
"RoundRobin")
)
# Regression Grid Train Model----
grid <- h2o::h2o.grid(
hyper_params = hyper_params,
search_criteria = search_criteria,
is_supervised = TRUE,
algorithm = "randomForest",
grid_id = paste0(ModelID, "_Grid"),
x = FeatureColNames,
y = TargetColumnName,
ntrees = Trees,
training_frame = datatrain,
validation_frame = datavalidate,
max_runtime_secs = 3600 * 24 * 7,
stopping_rounds = 10,
stopping_tolerance = 1e-3,
stopping_metric = toupper(eval_metric),
score_tree_interval = 10,
seed = 1234
)
# Regression Get Best Model----
Grid_Out <- h2o::h2o.getGrid(
grid_id = paste0(ModelID, "_Grid"),
sort_by = eval_metric,
decreasing = FALSE
)
# Regression Collect Best Grid Model----
grid_model <- h2o::h2o.getModel(Grid_Out@model_ids[[1]])
}
# Regression Start Up H2O----
if (!GridTune) {
h2o::h2o.init(max_mem_size = MaxMem,
enable_assertions = FALSE)
# Regression Define data sets----
datatrain <- h2o::as.h2o(dataTrain)
datavalidate <- h2o::as.h2o(dataTest)
}
# Regression Baseline Model----
base_model <- h2o::h2o.randomForest(
x = FeatureColNames,
y = TargetColumnName,
training_frame = datatrain,
validation_frame = datavalidate,
model_id = ModelID,
ntrees = Trees
)
# Regression Grab Evaluation Metric----
if (GridTune) {
if (!is.null(TestData)) {
datatest <- h2o::as.h2o(TestData)
if (tolower(eval_metric) == "mse") {
GridModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = grid_model,
newdata = datatest))
BaseModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "rmse") {
GridModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = grid_model,
newdata = datatest))
BaseModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "mae") {
GridModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = grid_model,
newdata = datatest))
BaseModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "rmsle") {
GridModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = grid_model,
newdata = datatest))
BaseModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = base_model,
newdata = datatest))
}
} else {
if (tolower(eval_metric) == "mse") {
GridModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = grid_model,
newdata = datavalidate))
BaseModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "rmse") {
GridModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = grid_model,
newdata = datavalidate))
BaseModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "mae") {
GridModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = grid_model,
newdata = datavalidate))
BaseModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "rmsle") {
GridModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = grid_model,
newdata = datavalidate))
BaseModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
}
}
} else {
if (!is.null(TestData)) {
datatest <- h2o::as.h2o(TestData)
if (tolower(eval_metric) == "mse") {
BaseModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "rmse") {
BaseModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "mae") {
BaseModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = base_model,
newdata = datatest))
} else if (tolower(eval_metric) == "rmsle") {
BaseModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = base_model,
newdata = datatest))
}
} else {
if (tolower(eval_metric) == "mse") {
BaseModelEval <-
h2o::h2o.mse(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "rmse") {
BaseModelEval <-
h2o::h2o.rmse(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "mae") {
BaseModelEval <-
h2o::h2o.mae(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
} else if (tolower(eval_metric) == "rmsle") {
BaseModelEval <-
h2o::h2o.rmsle(h2o::h2o.performance(model = base_model,
newdata = datavalidate))
}
}
}
# Regression Pick Winner----
if (GridTune) {
if (GridModelEval < BaseModelEval) {
FinalModel <- grid_model
} else {
FinalModel <- base_model
}
} else {
FinalModel <- base_model
}
# Regression Save Model----
if (SaveModelObjects) {
if (tolower(IfSaveModel) == "mojo") {
SaveModel <- h2o::h2o.saveMojo(object = FinalModel,
path = model_path,
force = TRUE)
h2o::h2o.download_mojo(
model = FinalModel,
path = model_path,
get_genmodel_jar = TRUE,
genmodel_path = model_path,
genmodel_name = ModelID
)
} else {
SaveModel <- h2o::h2o.saveModel(object = FinalModel,
path = model_path,
force = TRUE)
}
}
# Regression Score Final Test Data----
if (!is.null(TestData)) {
Predict <-
data.table::as.data.table(h2o::h2o.predict(object = FinalModel,
newdata = datatest))
} else {
Predict <-
data.table::as.data.table(h2o::h2o.predict(object = FinalModel,
newdata = datavalidate))
}
# Regression Variable Importance----
VariableImportance <-
data.table::as.data.table(h2o::h2o.varimp(object = FinalModel))
if (SaveModelObjects) {
data.table::fwrite(VariableImportance,
file = paste0(model_path,
"/",
ModelID, "_VariableImportance.csv"))
}
# Regression Format Variable Importance Table----
data.table::setnames(
VariableImportance,
c(
"variable",
"relative_importance",
"scaled_importance",
"percentage"
),
c(
"Variable",
"RelativeImportance",
"ScaledImportance",
"Percentage"
)
)
VariableImportance[, ':=' (
RelativeImportance = round(RelativeImportance, 4),
ScaledImportance = round(ScaledImportance, 4),
Percentage = round(Percentage, 4)
)]
# Regression H2O Shutdown----
if(StopH2O) {
h2o::h2o.shutdown(prompt = FALSE)
}
# Regression Create Validation Data----
if (!is.null(TestData)) {
ValidationData <-
data.table::as.data.table(cbind(TestData, Predict))
} else {
ValidationData <-
data.table::as.data.table(cbind(dataTest, Predict))
}
# Regression Change Prediction Name----
data.table::setnames(ValidationData, "predict", "Predict")
# Inverse Transform----
if (!is.null(TransformNumericColumns)) {
# Append record for Predicted Column----
if (GridTune) {
TransformationResults <-
TransformationResults[ColumnName != "Predict"]
}
TransformationResults <- data.table::rbindlist(list(
TransformationResults,
data.table::data.table(
ColumnName = "Predict",
MethodName = rep(TransformationResults[ColumnName == eval(TargetColumnName),
MethodName], 1),
Lambda = rep(TransformationResults[ColumnName == eval(TargetColumnName),
Lambda], 1),
NormalizedStatistics = rep(0, 1)
)
))
# If Actual target columnname == "Target" remove the duplicate version----
if (length(unique(TransformationResults[["ColumnName"]])) != nrow(TransformationResults)) {
temp <- TransformationResults[, .N, by = "ColumnName"][N != 1][[1]]
temp1 <- which(names(ValidationData) == temp)[1]
ValidationData[, eval(names(data)[temp1]) := NULL]
TransformationResults <- TransformationResults[, ID := 1:.N][
ID != which(TransformationResults[["ID"]] == temp1)][
, ID := NULL]
}
# Transform Target and Predicted Value----
ValidationData <- AutoTransformationScore(
ScoringData = ValidationData,
Type = "Inverse",
FinalResults = TransformationResults,
TransID = NULL,
Path = NULL
)
}
# Regression Get R2----
r_squared <-
(ValidationData[, stats::cor(get(TargetColumnName), Predict)][[1]]) ^ 2
# Regression Save Validation Data to File----
if (SaveModelObjects) {
data.table::fwrite(ValidationData,
file = paste0(model_path,
"/",
ModelID,
"_ValidationData.csv"))
}
# Regression Evaluation Calibration Plot----
EvaluationPlot <- EvalPlot(
data = ValidationData,
PredictionColName = "Predict",
TargetColName = Target,
GraphType = "calibration",
PercentileBucket = 0.05,
aggrfun = function(x)
mean(x, na.rm = TRUE)
)
# Regression Evaluation Plot Update Title----
if (GridTune) {
val <- max(GridModelEval, BaseModelEval)
EvaluationPlot <- EvaluationPlot +
ggplot2::ggtitle(paste0(
"Random Forest Calibration Evaluation Plot: ",
toupper(eval_metric),
" = ",
round(val, 3)
))
} else {
EvaluationPlot <- EvaluationPlot +
ggplot2::ggtitle(paste0(
"Calibration Evaluation Plot: ",
toupper(eval_metric),
" = ",
round(BaseModelEval, 3)
))
}
# Save plot to file
if (SaveModelObjects) {
ggplot2::ggsave(paste0(model_path,
"/",
ModelID,
"_EvaluationPlot.png"))
}
# Regression Evaluation BoxPlot----
EvaluationBoxPlot <- EvalPlot(
data = ValidationData,
PredictionColName = "Predict",
TargetColName = Target,
GraphType = "boxplot",
PercentileBucket = 0.05,
aggrfun = function(x)
mean(x, na.rm = TRUE)
)
# Regression Evaluation Plot Update Title----
if (GridTune) {
val <- max(GridModelEval, BaseModelEval)
EvaluationBoxPlot <- EvaluationBoxPlot +
ggplot2::ggtitle(paste0(
"Random Forest Calibration Evaluation Plot: ",
toupper(eval_metric),
" = ",
round(val, 3)
))
} else {
EvaluationBoxPlot <- EvaluationBoxPlot +
ggplot2::ggtitle(paste0(
"Random Forest Calibration Evaluation Plot: ",
toupper(eval_metric),
" = ",
round(BaseModelEval, 3)
))
}
# Save plot to file
if (SaveModelObjects) {
ggplot2::ggsave(paste0(model_path,
"/",
ModelID,
"_EvaluationBoxPlot.png"))
}
# Regression Evaluation Metrics----
EvaluationMetrics <-
data.table::data.table(
Metric = c("Poisson", "MAE",
"MAPE", "MSE", "MSLE",
"KL", "CS", "R2"),
MetricValue = rep(999999, 8)
)
i <- 0
for (metric in c("poisson", "mae", "mape", "mse", "msle", "kl", "cs", "r2")) {
i <- as.integer(i + 1)
tryCatch({
# Regression Grid Evaluation Metrics----
if (tolower(metric) == "poisson") {
if (MinVal > 0 &
min(ValidationData[["Predict"]], na.rm = TRUE) > 0) {
ValidationData[, Metric := Predict - Target * log(Predict + 1)]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
}
} else if (tolower(metric) == "mae") {
ValidationData[, Metric := abs(Target - Predict)]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
} else if (tolower(metric) == "mape") {
ValidationData[, Metric := abs((Target - Predict) / (Target + 1))]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
} else if (tolower(metric) == "mse") {
ValidationData[, Metric := (Target - Predict) ^ 2]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
} else if (tolower(metric) == "msle") {
if (MinVal > 0 &
min(ValidationData[["Predict"]], na.rm = TRUE) > 0) {
ValidationData[, Metric := (log(Target + 1) - log(Predict + 1)) ^ 2]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
}
} else if (tolower(metric) == "kl") {
if (MinVal > 0 &
min(ValidationData[["Predict"]], na.rm = TRUE) > 0) {
ValidationData[, Metric := Target * log((Target + 1) /
(Predict + 1))]
Metric <- ValidationData[, mean(Metric, na.rm = TRUE)]
}
} else if (tolower(metric) == "cs") {
ValidationData[, ':=' (
Metric1 = Target * Predict,
Metric2 = Target ^ 2,
Metric3 = Predict ^ 2
)]
Metric <-
ValidationData[, sum(Metric1, na.rm = TRUE)] / (sqrt(ValidationData[, sum(Metric2, na.rm = TRUE)]) *
sqrt(ValidationData[, sum(Metric3, na.rm = TRUE)]))
} else if (tolower(metric) == "r2") {
Metric <-
(ValidationData[, stats::cor(Target, Predict)][[1]]) ^ 2
}
data.table::set(
EvaluationMetrics,
i = i,
j = 2L,
value = round(Metric, 4)
)
data.table::set(EvaluationMetrics,
i = i,
j = 3L,
value = NA)
}, error = function(x)
"skip")
}
# Regression Save EvaluationMetrics to File----
EvaluationMetrics <- EvaluationMetrics[MetricValue != 999999]
if (SaveModelObjects) {
data.table::fwrite(EvaluationMetrics,
file = paste0(model_path,
"/",
ModelID,
"_EvaluationMetrics.csv"))
}
# Regression Partial Dependence----
ParDepPlots <- list()
j <- 0
ParDepBoxPlots <- list()
k <- 0
for (i in seq_len(min(length(FeatureColNames), NumOfParDepPlots))) {
tryCatch({
Out <- ParDepCalPlots(
data = ValidationData,
PredictionColName = "Predict",
TargetColName = Target,
IndepVar = VariableImportance[i, Variable],
GraphType = "calibration",
PercentileBucket = 0.05,
FactLevels = 10,
Function = function(x)
mean(x, na.rm = TRUE)
)
j <- j + 1
ParDepPlots[[paste0(VariableImportance[j, Variable])]] <-
Out
}, error = function(x)
"skip")
tryCatch({
Out1 <- ParDepCalPlots(
data = ValidationData,
PredictionColName = "Predict",
TargetColName = Target,
IndepVar = VariableImportance[i, Variable],
GraphType = "boxplot",
PercentileBucket = 0.05,
FactLevels = 10,
Function = function(x)
mean(x, na.rm = TRUE)
)
k <- k + 1
ParDepBoxPlots[[paste0(VariableImportance[k, Variable])]] <-
Out1
}, error = function(x)
"skip")
}
# Regression Save ParDepPlots to file----
if (SaveModelObjects) {
save(ParDepPlots,
file = paste0(model_path, "/", ModelID, "_ParDepPlots.R"))
}
# Regression Save ParDepBoxPlots to file----
if (SaveModelObjects) {
save(ParDepBoxPlots,
file = paste0(model_path, "/", ModelID, "_ParDepBoxPlots.R"))
}
# Subset Transformation Object----
if(!is.null(TransformNumericColumns)) {
if(TargetColumnName == "Target") {
TransformationResults <- TransformationResults[!(ColumnName %chin% c("Predict"))]
} else {
TransformationResults <- TransformationResults[!(ColumnName %chin% c("Predict", "Target"))]
}
}
# Regression Return Objects----
if (ReturnModelObjects) {
if(!is.null(TransformNumericColumns)) {
return(
list(
Model = FinalModel,
ValidationData = ValidationData,
EvaluationPlot = EvaluationPlot,
EvaluationBoxPlot = EvaluationBoxPlot,
EvaluationMetrics = EvaluationMetrics,
VariableImportance = VariableImportance,
PartialDependencePlots = ParDepPlots,
PartialDependenceBoxPlots = ParDepBoxPlots,
TransformationInformation = TransformationResults
)
)
} else {
return(
list(
Model = FinalModel,
ValidationData = ValidationData,
EvaluationPlot = EvaluationPlot,
EvaluationBoxPlot = EvaluationBoxPlot,
EvaluationMetrics = EvaluationMetrics,
VariableImportance = VariableImportance,
PartialDependencePlots = ParDepPlots,
PartialDependenceBoxPlots = ParDepBoxPlots
)
)
}
}
}
|
# SPACE incorporating information of potential hub nodes
espace <- function(X,hub_indx, alpha, lambda,maxit_in=1000,maxit_out=5,tol=1e-6)
{
n = nrow(X)
p = ncol(X)
nh = length(hub_indx)
rho = matrix(0,p,p)
rsd = matrix(0,n,p)
sigma = rep(0,p)
out <- .C('espace',n = as.integer(n), p = as.integer(p), nh = as.integer(nh),
X = as.double(X), hub_indx = as.integer(hub_indx), alpha = as.double(alpha),
lam = as.double(lambda), niter_in = as.integer(maxit_in), niter_out=as.integer(maxit_out),
tol = as.double(tol), rho = as.double(rho), residual = as.double(rsd), sigma=as.double(sigma),PACKAGE='espace')
out$rho <- matrix(out$rho,p,p)
output <- list(rho=out$rho, alpha=alpha, lambda=lambda, residual=matrix(out$residual,n,p), w_d=out$sigma)
return(output)
} | /R/espace/R/espace.r | permissive | Camiling/tailoredGlassoAnalysis | R | false | false | 874 | r |
# SPACE incorporating information of potential hub nodes
espace <- function(X,hub_indx, alpha, lambda,maxit_in=1000,maxit_out=5,tol=1e-6)
{
n = nrow(X)
p = ncol(X)
nh = length(hub_indx)
rho = matrix(0,p,p)
rsd = matrix(0,n,p)
sigma = rep(0,p)
out <- .C('espace',n = as.integer(n), p = as.integer(p), nh = as.integer(nh),
X = as.double(X), hub_indx = as.integer(hub_indx), alpha = as.double(alpha),
lam = as.double(lambda), niter_in = as.integer(maxit_in), niter_out=as.integer(maxit_out),
tol = as.double(tol), rho = as.double(rho), residual = as.double(rsd), sigma=as.double(sigma),PACKAGE='espace')
out$rho <- matrix(out$rho,p,p)
output <- list(rho=out$rho, alpha=alpha, lambda=lambda, residual=matrix(out$residual,n,p), w_d=out$sigma)
return(output)
} |
my.dat <-
read.table("puke")
head(my.dat)
nrow(my.dat)
my.dat$V3 <- my.dat$V2-my.dat$V1
hist(log10(my.dat$V3))
rug (log10(my.dat$V3))
my.lower <- 2500
abline(v=log10(my.lower), col=2)
hist(log10(my.dat[my.dat$V3>my.lower,"V3"]))
rug (log10(my.dat[my.dat$V3>my.lower,"V3"]))
my.upper <- 400000
abline(v=log10(my.upper), col=2)
hist(log10(my.dat[my.dat$V3>my.lower & my.dat$V3<my.upper,"V3"]))
rug (log10(my.dat[my.dat$V3>my.lower & my.dat$V3<my.upper,"V3"]))
hist(my.dat[my.dat$V3>my.lower & my.dat$V3<my.upper,"V3"])
| /Clone_Ends/RH_BES/ins_size_dist.R | no_license | dbolser/PGSC-Sequence-Alignment-Mapping | R | false | false | 535 | r |
my.dat <-
read.table("puke")
head(my.dat)
nrow(my.dat)
my.dat$V3 <- my.dat$V2-my.dat$V1
hist(log10(my.dat$V3))
rug (log10(my.dat$V3))
my.lower <- 2500
abline(v=log10(my.lower), col=2)
hist(log10(my.dat[my.dat$V3>my.lower,"V3"]))
rug (log10(my.dat[my.dat$V3>my.lower,"V3"]))
my.upper <- 400000
abline(v=log10(my.upper), col=2)
hist(log10(my.dat[my.dat$V3>my.lower & my.dat$V3<my.upper,"V3"]))
rug (log10(my.dat[my.dat$V3>my.lower & my.dat$V3<my.upper,"V3"]))
hist(my.dat[my.dat$V3>my.lower & my.dat$V3<my.upper,"V3"])
|
# gets the raw filing text
filing <- get_filing(test_cases[5])
# get header data from filing
header <- get_sec_header(filing)
if(!is.null(header$fund_data)) {
funds <- header$fund_data
fund_list <- split(funds, by = "series_id")
fund_names <- unique(funds$series_name)
}
filing_html <- get_filing_html(filing)
# pulls out the relevant html
filing_html <- get_filing_html(filing)
# pulls out the tables in the html, converts to text
# for cleanup
filing_html_tables <- lapply(filing_html, parse_filing_html) %>%
Filter(x = ., function(x) length(x) > 0)
# dispatch relevant parser
parsed_tables <- lapply(filing_html_tables, combine_all_tables_from_filing)
| /experiments/header-attempt.R | no_license | be-green/sec-filings | R | false | false | 672 | r | # gets the raw filing text
filing <- get_filing(test_cases[5])
# get header data from filing
header <- get_sec_header(filing)
if(!is.null(header$fund_data)) {
funds <- header$fund_data
fund_list <- split(funds, by = "series_id")
fund_names <- unique(funds$series_name)
}
filing_html <- get_filing_html(filing)
# pulls out the relevant html
filing_html <- get_filing_html(filing)
# pulls out the tables in the html, converts to text
# for cleanup
filing_html_tables <- lapply(filing_html, parse_filing_html) %>%
Filter(x = ., function(x) length(x) > 0)
# dispatch relevant parser
parsed_tables <- lapply(filing_html_tables, combine_all_tables_from_filing)
|
# Chicago, IL
# Housekeeping ----------------------------------------------------------------
rm(list=ls())
cat("\014")
library(dplyr)
library(feather)
#library(stargazer)
# Load Data -------------------------------------------------------------------
CHICAGO_data_2010 <- read_feather("/Users/malooney/Google Drive/digitalLibrary/*MS_Thesis/MS_Thesis/data/CHICAGO_data_2010_feather")
# Add volume measures ---------------------------------------------------------
oz <- round(data.frame(oz=CHICAGO_data_2010$VOL_EQ.x* 288))
total_oz <- (oz* CHICAGO_data_2010$UNITS); colnames(total_oz) <- "total_oz"
total_gal <- (0.0078125* total_oz); colnames(total_gal) <- "total_gal"
dollarPerGal <- CHICAGO_data_2010$DOLLARS/ total_gal;
colnames(dollarPerGal) <- "dollarPerGal"
CHICAGO_data_2010_manip <- cbind(CHICAGO_data_2010, oz, total_oz, total_gal,
dollarPerGal)
rm(oz, total_gal, total_oz, dollarPerGal)
# Remove zero data ------------------------------------------------------------
CHICAGO_data_2010_manip <- filter(CHICAGO_data_2010_manip, L5 !="ALL BRAND")
CHICAGO_data_2010_manip <- filter(CHICAGO_data_2010_manip, dollarPerGal !="Inf")
Brands <- unique(CHICAGO_data_2010_manip$L5)
Weeks <- unique(CHICAGO_data_2010_manip$WEEK)
Chains <- unique(CHICAGO_data_2010_manip$MskdName)
num_Brands <- length(Brands)
num_Weeks <- length(Weeks)
num_Chains <- length(Chains)
augWeeks <- num_Weeks+1
augChains <- num_Weeks+num_Chains
explore_Data <- data.frame(matrix(NA, nrow=num_Weeks+num_Chains,
ncol=num_Brands+2))
colnames(explore_Data) <- c("Week_Chain", "Week_Chain_Num", Brands)
explore_Data[1:num_Weeks, 1] <- paste("Week", 1:num_Weeks, sep="")
explore_Data[augWeeks:augChains, 1] <- paste("Chain",
1:num_Chains, sep="")
explore_Data[1:num_Weeks, 2] <- unique(CHICAGO_data_2010_manip$WEEK)
explore_Data[augWeeks:augChains, 2] <- unique(CHICAGO_data_2010_manip$MskdName)
i=1
j=3
for(i in seq_along(Brands)){
tmp <- filter(CHICAGO_data_2010_manip, L5==Brands[i])
tmp_week <- unique(tmp$WEEK)
tmp_chain <- unique(tmp$MskdName)
explore_Data[1:52, j] <- ifelse(explore_Data[1:num_Weeks, 2] %in% tmp_week,
explore_Data[1:num_Weeks, 2], NA)
explore_Data[augWeeks:augChains, j] <- ifelse(explore_Data[augWeeks:augChains, 2] %in% tmp_chain, explore_Data[augWeeks:augChains, 2], NA)
j <- j+1
}
rm(i, j, tmp, num_Brands, tmp_chain, tmp_week, augChains, augWeeks, num_Chains,
num_Weeks)
explore_Data_Complete <- explore_Data[ , apply(explore_Data, 2, function(x) !any(is.na(x)))]
Brands_CompleteData <- data.frame(Brand_Name=colnames(explore_Data_Complete[-c(1,2)]))
Brands_CompleteData <- arrange(Brands_CompleteData, Brand_Name)
write.csv(explore_Data, file="Chicago_explore_Data.csv")
write.csv(Brands_CompleteData, file="Chicago_Brands_CompleteData.csv")
| /scripts_and_functions/Chicago_Data_Explore.R | permissive | malooney/MS_Thesis | R | false | false | 2,949 | r |
# Chicago, IL
# Housekeeping ----------------------------------------------------------------
rm(list=ls())
cat("\014")
library(dplyr)
library(feather)
#library(stargazer)
# Load Data -------------------------------------------------------------------
CHICAGO_data_2010 <- read_feather("/Users/malooney/Google Drive/digitalLibrary/*MS_Thesis/MS_Thesis/data/CHICAGO_data_2010_feather")
# Add volume measures ---------------------------------------------------------
oz <- round(data.frame(oz=CHICAGO_data_2010$VOL_EQ.x* 288))
total_oz <- (oz* CHICAGO_data_2010$UNITS); colnames(total_oz) <- "total_oz"
total_gal <- (0.0078125* total_oz); colnames(total_gal) <- "total_gal"
dollarPerGal <- CHICAGO_data_2010$DOLLARS/ total_gal;
colnames(dollarPerGal) <- "dollarPerGal"
CHICAGO_data_2010_manip <- cbind(CHICAGO_data_2010, oz, total_oz, total_gal,
dollarPerGal)
rm(oz, total_gal, total_oz, dollarPerGal)
# Remove zero data ------------------------------------------------------------
CHICAGO_data_2010_manip <- filter(CHICAGO_data_2010_manip, L5 !="ALL BRAND")
CHICAGO_data_2010_manip <- filter(CHICAGO_data_2010_manip, dollarPerGal !="Inf")
Brands <- unique(CHICAGO_data_2010_manip$L5)
Weeks <- unique(CHICAGO_data_2010_manip$WEEK)
Chains <- unique(CHICAGO_data_2010_manip$MskdName)
num_Brands <- length(Brands)
num_Weeks <- length(Weeks)
num_Chains <- length(Chains)
augWeeks <- num_Weeks+1
augChains <- num_Weeks+num_Chains
explore_Data <- data.frame(matrix(NA, nrow=num_Weeks+num_Chains,
ncol=num_Brands+2))
colnames(explore_Data) <- c("Week_Chain", "Week_Chain_Num", Brands)
explore_Data[1:num_Weeks, 1] <- paste("Week", 1:num_Weeks, sep="")
explore_Data[augWeeks:augChains, 1] <- paste("Chain",
1:num_Chains, sep="")
explore_Data[1:num_Weeks, 2] <- unique(CHICAGO_data_2010_manip$WEEK)
explore_Data[augWeeks:augChains, 2] <- unique(CHICAGO_data_2010_manip$MskdName)
i=1
j=3
for(i in seq_along(Brands)){
tmp <- filter(CHICAGO_data_2010_manip, L5==Brands[i])
tmp_week <- unique(tmp$WEEK)
tmp_chain <- unique(tmp$MskdName)
explore_Data[1:52, j] <- ifelse(explore_Data[1:num_Weeks, 2] %in% tmp_week,
explore_Data[1:num_Weeks, 2], NA)
explore_Data[augWeeks:augChains, j] <- ifelse(explore_Data[augWeeks:augChains, 2] %in% tmp_chain, explore_Data[augWeeks:augChains, 2], NA)
j <- j+1
}
rm(i, j, tmp, num_Brands, tmp_chain, tmp_week, augChains, augWeeks, num_Chains,
num_Weeks)
explore_Data_Complete <- explore_Data[ , apply(explore_Data, 2, function(x) !any(is.na(x)))]
Brands_CompleteData <- data.frame(Brand_Name=colnames(explore_Data_Complete[-c(1,2)]))
Brands_CompleteData <- arrange(Brands_CompleteData, Brand_Name)
write.csv(explore_Data, file="Chicago_explore_Data.csv")
write.csv(Brands_CompleteData, file="Chicago_Brands_CompleteData.csv")
|
server <- function(input,output, session){
tab_list <- NULL
### joining all the data
base_flights <- reactive({
res <- flights %>%
filter(carrier == input$airline) %>%
left_join(airlines, by = "carrier") %>%
rename(airline = name) %>%
left_join(airports, by = c("origin" = "faa")) %>%
rename(origin_name = name) %>%
select(-lat, -lon, -alt, -tz, -dst) %>%
left_join(airports, by = c("dest" = "faa")) %>%
rename(dest_name = name)
if (input$month != 99) res <- filter(res, month == input$month)
res
})
output$total_flights <- renderValueBox(
base_flights() %>%
tally() %>%
pull() %>%
as.integer() %>%
prettyNum(big.mark = ",") %>%
valueBox(subtitle = "Number of Flights")
)
output$per_day <- renderValueBox(
base_flights() %>%
group_by(day, month) %>%
tally() %>%
ungroup() %>%
summarise(avg = mean(n)) %>%
pull(avg) %>%
round() %>%
prettyNum(big.mark = ",") %>%
valueBox(
subtitle = "Average Flights per day",
color = "blue"
)
)
output$percent_delayed <- renderValueBox(
base_flights() %>%
filter(!is.na(dep_delay)) %>%
mutate(delayed = ifelse(dep_delay >= 15, 1, 0)) %>%
summarise(
delays = sum(delayed),
total = n()
) %>%
mutate(percent = (delays / total) * 100) %>%
pull() %>%
round() %>%
paste0("%") %>%
valueBox(
subtitle = "Flights delayed",
color = "teal"
)
)
output$fights_count <- renderD3({
grouped <- ifelse(input$month != 99, expr(day), expr(month))
res <- base_flights() %>%
group_by(!!grouped) %>%
tally() %>%
collect() %>%
mutate(
y = n,
x = !!grouped
) %>%
select(x, y)
if (input$month == 99) {
res <- res %>%
inner_join(
tibble(x = 1:12, label = substr(month.name, 1, 3)),
by = "x"
)
} else {
res <- res %>%
mutate(label = x)
}
r2d3(res, "www/col_plot.js")
})
# Top airports (server) -------------------------------------------
output$top_airports <- renderD3({
# The following code runs inside the database
base_flights() %>%
group_by(dest, dest_name) %>%
tally() %>%
collect() %>%
arrange(desc(n)) %>%
head(10) %>%
arrange(dest_name) %>%
mutate(dest_name = str_sub(dest_name, 1, 30)) %>%
rename(
x = dest,
y = n,
label = dest_name
) %>%
r2d3("www/bar_plot.js")
})
# Get details (server) --------------------------------------------
get_details <- function(airport = NULL, day = NULL) {
# Create a generic details function that can be called
# by different dashboard events
res <- base_flights()
if (!is.null(airport)) res <- filter(res, dest == airport)
if (!is.null(day)) res <- filter(res, day == !!as.integer(day))
res %>%
head(100) %>%
select(
month, day, flight, tailnum,
dep_time, arr_time, dest_name,
distance
) %>%
collect() %>%
mutate(month = month.name[as.integer(month)])
}
# Month/Day column click (server) ---------------------------------
observeEvent(input$column_clicked != "", {
if (input$month == "99") {
updateSelectInput(session, "month", selected = input$column_clicked)
} else {
day <- input$column_clicked
month <- input$month
tab_title <- paste(
input$airline, "-", month.name[as.integer(month)], "-", day
)
if (!(tab_title %in% tab_list)) {
appendTab(
inputId = "tabs",
tabPanel(
tab_title,
DT::renderDataTable(
get_details(day = day)
)
)
)
tab_list <<- c(tab_list, tab_title)
}
updateTabsetPanel(session, "tabs", selected = tab_title)
}
},
ignoreInit = TRUE
)
# Bar clicked (server) --------------------------------------------
observeEvent(input$bar_clicked, {
airport <- input$bar_clicked
month <- input$month
tab_title <- paste(
input$airline, "-", airport,
if (month != 99) {
paste("-", month.name[as.integer(month)])
}
)
if (!(tab_title %in% tab_list)) {
appendTab(
inputId = "tabs",
tabPanel(
tab_title,
DT::renderDataTable(
get_details(airport = airport)
)
)
)
tab_list <<- c(tab_list, tab_title)
}
updateTabsetPanel(session, "tabs", selected = tab_title)
})
# Remote tabs (server) --------------------------------------------
observeEvent(input$remove, {
# Use purrr's walk command to cycle through each
# panel tabs and remove them
tab_list %>%
walk(~ removeTab("tabs", .x))
tab_list <<- NULL
})
} | /app/server.R | no_license | Pranith1785/interactive-shiny-dashboard | R | false | false | 5,087 | r |
server <- function(input,output, session){
tab_list <- NULL
### joining all the data
base_flights <- reactive({
res <- flights %>%
filter(carrier == input$airline) %>%
left_join(airlines, by = "carrier") %>%
rename(airline = name) %>%
left_join(airports, by = c("origin" = "faa")) %>%
rename(origin_name = name) %>%
select(-lat, -lon, -alt, -tz, -dst) %>%
left_join(airports, by = c("dest" = "faa")) %>%
rename(dest_name = name)
if (input$month != 99) res <- filter(res, month == input$month)
res
})
output$total_flights <- renderValueBox(
base_flights() %>%
tally() %>%
pull() %>%
as.integer() %>%
prettyNum(big.mark = ",") %>%
valueBox(subtitle = "Number of Flights")
)
output$per_day <- renderValueBox(
base_flights() %>%
group_by(day, month) %>%
tally() %>%
ungroup() %>%
summarise(avg = mean(n)) %>%
pull(avg) %>%
round() %>%
prettyNum(big.mark = ",") %>%
valueBox(
subtitle = "Average Flights per day",
color = "blue"
)
)
output$percent_delayed <- renderValueBox(
base_flights() %>%
filter(!is.na(dep_delay)) %>%
mutate(delayed = ifelse(dep_delay >= 15, 1, 0)) %>%
summarise(
delays = sum(delayed),
total = n()
) %>%
mutate(percent = (delays / total) * 100) %>%
pull() %>%
round() %>%
paste0("%") %>%
valueBox(
subtitle = "Flights delayed",
color = "teal"
)
)
output$fights_count <- renderD3({
grouped <- ifelse(input$month != 99, expr(day), expr(month))
res <- base_flights() %>%
group_by(!!grouped) %>%
tally() %>%
collect() %>%
mutate(
y = n,
x = !!grouped
) %>%
select(x, y)
if (input$month == 99) {
res <- res %>%
inner_join(
tibble(x = 1:12, label = substr(month.name, 1, 3)),
by = "x"
)
} else {
res <- res %>%
mutate(label = x)
}
r2d3(res, "www/col_plot.js")
})
# Top airports (server) -------------------------------------------
output$top_airports <- renderD3({
# The following code runs inside the database
base_flights() %>%
group_by(dest, dest_name) %>%
tally() %>%
collect() %>%
arrange(desc(n)) %>%
head(10) %>%
arrange(dest_name) %>%
mutate(dest_name = str_sub(dest_name, 1, 30)) %>%
rename(
x = dest,
y = n,
label = dest_name
) %>%
r2d3("www/bar_plot.js")
})
# Get details (server) --------------------------------------------
get_details <- function(airport = NULL, day = NULL) {
# Create a generic details function that can be called
# by different dashboard events
res <- base_flights()
if (!is.null(airport)) res <- filter(res, dest == airport)
if (!is.null(day)) res <- filter(res, day == !!as.integer(day))
res %>%
head(100) %>%
select(
month, day, flight, tailnum,
dep_time, arr_time, dest_name,
distance
) %>%
collect() %>%
mutate(month = month.name[as.integer(month)])
}
# Month/Day column click (server) ---------------------------------
observeEvent(input$column_clicked != "", {
if (input$month == "99") {
updateSelectInput(session, "month", selected = input$column_clicked)
} else {
day <- input$column_clicked
month <- input$month
tab_title <- paste(
input$airline, "-", month.name[as.integer(month)], "-", day
)
if (!(tab_title %in% tab_list)) {
appendTab(
inputId = "tabs",
tabPanel(
tab_title,
DT::renderDataTable(
get_details(day = day)
)
)
)
tab_list <<- c(tab_list, tab_title)
}
updateTabsetPanel(session, "tabs", selected = tab_title)
}
},
ignoreInit = TRUE
)
# Bar clicked (server) --------------------------------------------
observeEvent(input$bar_clicked, {
airport <- input$bar_clicked
month <- input$month
tab_title <- paste(
input$airline, "-", airport,
if (month != 99) {
paste("-", month.name[as.integer(month)])
}
)
if (!(tab_title %in% tab_list)) {
appendTab(
inputId = "tabs",
tabPanel(
tab_title,
DT::renderDataTable(
get_details(airport = airport)
)
)
)
tab_list <<- c(tab_list, tab_title)
}
updateTabsetPanel(session, "tabs", selected = tab_title)
})
# Remote tabs (server) --------------------------------------------
observeEvent(input$remove, {
# Use purrr's walk command to cycle through each
# panel tabs and remove them
tab_list %>%
walk(~ removeTab("tabs", .x))
tab_list <<- NULL
})
} |
## Sets LE table to 'Standard Life Expectancy'
## Update 'LE' and '.LE'
setStdLE <-
function(table = NULL) {
if (is.null(table)) {
table <- DALYtclvalue("stdLEtab")
}
table <- match.arg(table, c("GBD2010", "GBD1990", "WHO/GHE"))
if (table == "GBD1990") {
stdM <- DALYget("stdM")
stdF <- DALYget("stdF")
} else if (table == "GBD2010") {
stdM <- stdF <- DALYget("stdGBD")
} else if (table == "WHO/GHE") {
stdM <- stdF <- DALYget("stdWHO")
}
for (i in seq(21)) {
DALYassign("LE", stdM[i], i, 1)
DALYassign("LE", stdF[i], i, 2)
}
DALYupdate(".LE")
DALYupdate("stdLEtab", table)
}
| /R/setStdLE.R | no_license | cran/DALY | R | false | false | 661 | r | ## Sets LE table to 'Standard Life Expectancy'
## Update 'LE' and '.LE'
setStdLE <-
function(table = NULL) {
if (is.null(table)) {
table <- DALYtclvalue("stdLEtab")
}
table <- match.arg(table, c("GBD2010", "GBD1990", "WHO/GHE"))
if (table == "GBD1990") {
stdM <- DALYget("stdM")
stdF <- DALYget("stdF")
} else if (table == "GBD2010") {
stdM <- stdF <- DALYget("stdGBD")
} else if (table == "WHO/GHE") {
stdM <- stdF <- DALYget("stdWHO")
}
for (i in seq(21)) {
DALYassign("LE", stdM[i], i, 1)
DALYassign("LE", stdF[i], i, 2)
}
DALYupdate(".LE")
DALYupdate("stdLEtab", table)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classifier_fns.R
\name{fitModel}
\alias{fitModel}
\title{Fit GLM on Survey (Natsal) data}
\usage{
fitModel(data.train, data.test, riskfac, depvar)
}
\arguments{
\item{data.train}{Training/fitting data}
\item{data.test}{Test data}
\item{riskfac}{Risk factors}
\item{depvar}{Dependant variable name}
}
\value{
list of fits
}
\description{
Create regression variables and
binds them together into a list
}
| /man/fitModel.Rd | no_license | n8thangreen/STIecoPredict | R | false | true | 484 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classifier_fns.R
\name{fitModel}
\alias{fitModel}
\title{Fit GLM on Survey (Natsal) data}
\usage{
fitModel(data.train, data.test, riskfac, depvar)
}
\arguments{
\item{data.train}{Training/fitting data}
\item{data.test}{Test data}
\item{riskfac}{Risk factors}
\item{depvar}{Dependant variable name}
}
\value{
list of fits
}
\description{
Create regression variables and
binds them together into a list
}
|
#####################################################
# Load Packages (if Required) #
#####################################################
if(!require(githubinstall)) install.packages("githubinstall", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(RCurl)) install.packages("RCurl", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(httr)) install.packages("httr", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(lubridate)) install.packages("lubridate", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(foreach)) install.packages("foreach", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(gam)) install.packages("gam", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(splines2)) install.packages("splines2", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(stringi)) install.packages("stringi", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(gridExtra)) install.packages("gridExtra", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(rmarkdown)) install.packages("rmarkdown", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(knitr)) install.packages("knitr", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(devtools)) install.packages("devtools", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(tinytex)) install.packages("latexpdf", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(tinytex)) install.packages("tinytex", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(packrat)) install.packages("packrat", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(rsconnect)) install.packages("rsconnect", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(readxl)) install.packages("readxl", repos = "http://cran.us.r-project.org", quiet=TRUE)
#####################################################
# Load Libraries #
#####################################################
library(githubinstall)
library(RCurl)
library(httr)
library(tidyverse)
library(lubridate)
library(caret)
library(gam)
library(foreach)
library(splines2)
library(stringi)
library(data.table)
library(gridExtra)
library(ggplot2)
library(rmarkdown)
library(knitr)
library(devtools)
library(latexpdf)
library(tinytex)
library(packrat)
library(rsconnect)
library(readxl)
#####################################################
# Reset Variables / Set Options #
#####################################################
rm(list=ls()) # Clears all variables
options(digits=5)
#####################################################
# Kaggle Heart Attack Prediction Data #
#####################################################
##### GitHub .csv Data File - Unzipped frin original Kaggle URL
KaggleURL <- "https://www.kaggle.com/nareshbhat/health-care-data-set-on-heart-attack-possibility"
GitHubURL <- "https://github.com/johndsowers/HeartAttack/raw/master/heart.csv"
dl <- tempfile()
download.file(GitHubURL, dl)
heart <- as.data.frame(read_csv(dl))
heart <- heart %>% mutate(target=as.numeric(target))
heart
#####################################################
# Creating 90% Train / 10% Test Data Sets #
#####################################################
set.seed(1) # Set Seed to 1
test_index <- createDataPartition(y = heart$target, times = 1, p = 0.1, list = FALSE)
training <- heart[-test_index,] # Create Training Data Set
testing <- heart[test_index,] # Create Test Data Set
#####################################################
# INTRODUCTION #
# (Purpose / Initial Data Analysis) #
#####################################################
##### Reference https://artificialintelligence-news.com/2019/05/14/ml-algorithm-predicts-heart-attacks/
##### Possibility of recreating this 90% prediction rate utiling the 13 predictors in the Kaggle website
#####################################################
# Initial Data Analysis #
#####################################################
################# Data Set Overview #################
str(heart)
#####################################################
# METHODOLOGY / ANALYSIS #
# (Purpose / Initial Data Analysis) #
#####################################################
################## Analysis by Age ##################
##### Patient Age Proportions Figure
age_p_distribution.fig <- heart %>%
group_by(age) %>%
summarize(p = length(target)/nrow(heart), .groups="drop") %>%
ggplot(aes(age, p)) +
geom_bar(stat="identity") +
geom_hline(yintercept=.0244, size=1.0, color="red") +
annotate("text", x = 33.5, y = .0264, label = "Mean = .0244") +
ggtitle("Patient Distribution by Age") +
xlab("Age (Years)") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Age Proportions Figure
age_p_disease.fig <- heart %>%
group_by(age) %>%
mutate(n_total = length(age)) %>%
summarize(prop_disease = sum(target==1)/n_total, prop_health= sum(target==0)/n_total, .groups="drop") %>%
ggplot(aes(age, prop_disease)) +
geom_point() +
geom_smooth(method = "loess", formula='y ~ x') +
geom_hline(yintercept=0.545, size=1.0, color="red") +
annotate("text", x = 34.5, y = .575, label = "Mean = 0.545") +
ylim(0, 1) +
ggtitle("Heart Disease Proportions by Age") +
xlab("Age (Years)") +
ylab("Proportion of Cases")
grid.arrange(age_p_distribution.fig, age_p_disease.fig, ncol=2) # Plot proportion distributions
################## Analysis by Sex ##################
##### Patient Gender Proportions Figure
sex_p_distribution.fig <- heart %>%
group_by(sex) %>%
summarise(p = length(target)/nrow(heart), .groups="drop") %>%
mutate(sex = ifelse(sex==0,"Female", "Male")) %>%
ggplot(aes(sex, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Gender") +
xlab("Gender") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Gender Proportions Figure
sex_p_disease.fig <- heart %>%
group_by(sex) %>%
summarise(p = sum(as.numeric(target)/length(target)), .groups="drop") %>%
mutate(sex = ifelse(sex==0,"Female", "Male")) %>%
ggplot(aes(sex,p)) +
geom_bar(stat="identity") +
geom_hline(yintercept=.6, size=1.0, color="red") +
annotate("text", x = 2.3, y = .62, label = "Mean = 0.6") +
ggtitle("Heart Disease Proportions by Gender") +
xlab("Gender") +
ylab("Proportion of Cases")
grid.arrange(sex_p_distribution.fig, sex_p_disease.fig, ncol=2) # Plot proportion distributions
########### Analysis by Chest Pain (cp) #############
##### Patient chest pain Proportions Figure
cp_p_distribution.fig <- heart %>%
group_by(cp) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(cp, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by CP") +
xlab("Chest Pain (CP) Score") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Chest Pain Proportions Figure
cp_p_disease.fig <- heart %>%
group_by(cp) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(cp,p)) +
geom_bar(stat="identity") +
geom_hline(yintercept=.645, size=1.0, color="red") +
annotate("text", x = 0, y = .625, label = "Mean = 0.645") +
ggtitle("Heart Disease Proportions by CP") +
xlab("Chest Pain (CP) Score") +
ylab("Proportion of Cases")
grid.arrange(cp_p_distribution.fig, cp_p_disease.fig, ncol=2) # Plot proportion distributions
### Analysis by Resting Blood Pressure (trestbps) ###
##### Patient BP Proportions Figure
trestbps_p_distribution.fig <- heart %>%
group_by(trestbps) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(trestbps, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by BP") +
xlab("Blood Perssure (BP) Level") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease BP Proportions Figure
trestbps_p_disease.fig <- heart %>%
group_by(trestbps) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(trestbps,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
#geom_bar(stat="identity") +
geom_hline(yintercept=.51, size=1.0, color="red") +
annotate("text", x = 190, y = .56, label = "Mean = 0.510") +
ggtitle("Heart Disease Proportions by BP") +
xlab("Blood Pressure (BP) Level") +
ylab("Proportion of Cases")
grid.arrange(trestbps_p_distribution.fig, trestbps_p_disease.fig, ncol=2) # Plot proportion distributions
########## Analysis by Cholesterol (chol) ###########
##### Patient Age Cholesterol Figure
chol_p_distribution.fig <- heart %>%
group_by(chol) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(chol, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Chol") +
xlab("Cholesteral (Chol) Level") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Cholesterol Proportions Figure
chol_p_disease.fig <- heart %>%
group_by(chol) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(chol,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.521, size=1.0, color="red") +
annotate("text", x = 510, y = .557, label = "Mean = 0.521") +
ggtitle("Heart Disease Proportions by Chol") +
xlab("Cholesterol (Chol) Level") +
ylab("Proportion of Cases")
grid.arrange(chol_p_distribution.fig, chol_p_disease.fig, ncol=2) # Plot proportion distributions
########## Analysis by Blood Sugar (fbs) ############
##### Patient Blood Sugar Proportions Figure
fbs_p_distribution.fig <- heart %>%
group_by(fbs) %>%
mutate(fbs=ifelse(exang==0,"False","True")) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(fbs, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by FBS") +
xlab("Fasting Blood Sugar (fbs) Level (>120 mg/dl)") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Blood Sugar Proportions Figure
fbs_p_disease.fig <- heart %>%
mutate(fbs=ifelse(exang==0,"False","True")) %>%
group_by(fbs) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(fbs,p)) +
geom_bar(stat="identity") +
geom_hline(yintercept=.531, size=1.0, color="red") +
annotate("text", x = 2.3, y = .545, label = "Mean = 0.531") +
ggtitle("Heart Disease Proportions by FBS") +
xlab("Fasting Blood Sugar (fbs) Level (>120 mg/dl)") +
ylab("Proportion of Cases")
grid.arrange(fbs_p_distribution.fig, fbs_p_disease.fig, ncol=2) # Plot proportion distributions
#### Analysis by Electrocardiographic (restecg) #####
##### Patient ECG Results Proportions Figure
ekg_p_distribution.fig <- heart %>%
group_by(restecg) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(restecg, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by EKG") +
xlab("Electrocardiogram (EKG) Score") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease ECG Proportions Figure
ekg_p_disease.fig <- heart %>%
group_by(restecg) %>%
summarise(p = sum(as.numeric(target)/length(target)), .groups="drop") %>%
ggplot(aes(restecg,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.333, size=1.0, color="red") +
annotate("text", x = 0.25, y = .343, label = "Mean = 0.333") +
ggtitle("Heart Disease Proportions by FBS") +
xlab("Electrocardiogram (EKG) Score") +
ylab("Proportion of Cases")
grid.arrange(ekg_p_distribution.fig, ekg_p_disease.fig, ncol=2) # Plot proportion distributions
####### Analysis by Max Heart Rate (thalach) ########
##### Patient Max Heart Rate Proportions Figure
hr_p_distribution.fig <- heart %>%
group_by(thalach) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(thalach, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by HR") +
xlab("Maximum Heart Rate (HR)") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Max Heart Rate Proportions Figure
hr_p_disease.fig <- heart %>%
group_by(thalach) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(thalach,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.491, size=1.0, color="red") +
annotate("text", x = 85, y = .527, label = "Mean = 0.491") +
ggtitle("Heart Disease Proportions by HR") +
xlab("Maximum Heart Rate (HR)") +
ylab("Proportion of Cases")
grid.arrange(hr_p_distribution.fig, hr_p_disease.fig, ncol=2) # Plot proportion distributions
########### Analysis by Angina (exang) ##############
##### Patient Angina Proportions Figure
ang_p_distribution.fig <- heart %>%
mutate(exang=ifelse(exang==0,"Not Present","Present")) %>%
group_by(exang) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(exang, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Angina") +
xlab("Exercise Induced Angina") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Angina Proportions Figure
ang_p_disease.fig <- heart %>%
mutate(exang=ifelse(exang==0,"Not Present","Present")) %>%
group_by(exang) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(exang,p)) +
geom_bar(stat="identity") +
geom_hline(yintercept=.464, size=1.0, color="red") +
annotate("text", x = 2.2, y = .484, label = "Mean = 0.464") +
ggtitle("Heart Disease Proportions by Angina") +
xlab("Exercise Induced Angina") +
ylab("Proportion of Cases")
grid.arrange(ang_p_distribution.fig, ang_p_disease.fig, ncol=2) # Plot proportion distributions
################ Analysis by oldpeak #################
##### Patient Oldpeak Proportions Figure
oldpeak_p_distribution.fig <- heart %>%
group_by(oldpeak) %>%
summarise(p=length(oldpeak)/nrow(heart), .groups="drop") %>%
ggplot(aes(oldpeak, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by ST") +
xlab("ST Depression Score") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Oldpeak Proportions Figure
oldpeak_p_disease.fig <- heart %>%
group_by(oldpeak) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(oldpeak,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.393, size=1.0, color="red") +
annotate("text", x = 5.5, y = .427, label = "Mean = 0.393") +
ggtitle("Heart Disease Proportions by ST") +
xlab("ST Depression Score") +
ylab("Proportion of Cases")
grid.arrange(oldpeak_p_distribution.fig, oldpeak_p_disease.fig, ncol=2) # Plot proportion distributions
################# Analysis by slope #################
##### Patient slope Proportions Figure
slope_p_distribution.fig <- heart %>%
group_by(slope) %>%
summarise(p=length(slope)/nrow(heart), .groups="drop") %>%
ggplot(aes(slope, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Slope") +
xlab("Slope") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Slope Proportions Figure
slope_p_disease.fig <- heart %>%
group_by(slope) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(slope,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.693, size=1.0, color="red") +
annotate("text", x = 0.23, y = .703, label = "Mean = 0.693") +
ggtitle("Heart Disease Proportions by Slope") +
xlab("Slope") +
ylab("Proportion of Cases")
grid.arrange(slope_p_distribution.fig, slope_p_disease.fig, ncol=2) # Plot proportion distributions
################### Analysis by ca ##################
##### Patient Blood Vessel Numbers Proportions Figure
ca_p_distribution.fig <- heart %>%
group_by(ca) %>%
summarise(p=length(oldpeak)/nrow(heart), .groups="drop") %>%
ggplot(aes(ca, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Flourosopy") +
xlab("Number Blood Vessels Colored by Flourosopy") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Blood Vessels Proportions Figure
ca_p_disease.fig <- heart %>%
group_by(ca) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(ca,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.44, size=1.0, color="red") +
annotate("text", x = 0.35, y = .455, label = "Mean = 0.44") +
ggtitle("Heart Disease Proportions by Flourosopy") +
xlab("Number Blood Vessels Colored by Flourosopy") +
ylab("Proportion of Cases")
grid.arrange(ca_p_distribution.fig, ca_p_disease.fig, ncol=2) # Plot proportion distributions
################## Analysis by thal #################
##### Patient Thalassemia Proportions Figure
thal_p_distribution.fig <- heart %>%
group_by(thal) %>%
summarise(p=length(oldpeak)/nrow(heart), .groups="drop") %>%
ggplot(aes(thal, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Thalassemia") +
xlab("Thalassemia") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Thalassemia Proportions Figure
thal_p_disease.fig <- heart %>%
group_by(thal) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(thal,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.464, size=1.0, color="red") +
annotate("text", x = 0.33, y = .474, label = "Mean = 0.464") +
xlab("Thalassemia") +
ylab("Proportion of Cases")
grid.arrange(thal_p_distribution.fig, thal_p_disease.fig, ncol=2) # Plot proportion distributions
#####################################################
# RESULTS #
# (Summary) #
#####################################################
##### Initialize Variables
method <- c("lm", "glm", "loess", "knn", "rf", "ensemble")
accuracy <- c(NA, NA, NA, NA, NA, NA)
heart_models <- data.frame(method, accuracy)
predictor <- c("age", "sex", "cp", "trestbps", "chol", "fbs", "restecg", "thalach", "exang", "oldpeak", "slope", "ca", "thal")
importance <- data.frame(predictor=predictor)
##### Linear Regression (LM) Model
# Train
train_lm <- train(target~., method="lm", data=training)
# Predict
predict_lm <- round(predict(train_lm, newdata=testing))
# Confusion Matrix
cm_lm <- confusionMatrix(as.factor(predict_lm), as.factor(testing$target))
cm_lm$byClass
heart_models[1,2] <- cm_lm$overall[["Accuracy"]]
heart_models[1,]
# Variable Importance
imp_lm <- data.frame(predictor=row.names(varImp(train_lm)$importance),
lm=varImp(train_lm)$importance$Overall)
importance <- right_join(importance,imp_lm, by="predictor")
importance[,c(1,2)] %>% arrange(desc(lm))
##### Logistic Regression (GLM) Model
# Train
train_glm <- train(target~., method="glm", data=training)
# Predict
predict_glm <- round(predict(train_glm, newdata=testing))
# Confusion Matrix
cm_glm <- confusionMatrix(as.factor(predict_glm), as.factor(testing$target))
cm_glm$byClass
heart_models[2,2] <- cm_glm$overall[["Accuracy"]]
heart_models[2,]
# Variable Importance
imp_glm <- data.frame(predictor=row.names(varImp(train_glm)$importance),
glm=varImp(train_glm)$importance$Overall)
importance <- right_join(importance,imp_glm, by="predictor")
importance[,c(1,3)] %>% arrange(desc(glm))
#####(Loess)
# Train
train_loess <- train(target~., method="gamLoess", data=training)
# Predict
predict_loess <- round(predict(train_loess, newdata=testing))
# Confusion Matrix
cm_loess <- confusionMatrix(as.factor(predict_loess), as.factor(testing$target))
cm_loess$byClass
heart_models[3,2] <- cm_loess$overall[["Accuracy"]]
heart_models[3,]
# Variable Importance
imp_loess <- data.frame(predictor=row.names(varImp(train_loess)$importance),
loess=varImp(train_loess)$importance$Overall)
importance <- right_join(importance,imp_loess, by="predictor")
importance[,c(1,4)] %>% arrange(desc(loess))
##### K-Nearest Neighbors (KNN)
# Train
train_knn <- train(target~., method="knn", data=training, tuneGrid = data.frame(k=seq(0:5)))
train_knn$bestTune
# Predict
predict_knn <- round(predict(train_knn, newdata=testing))
# Confusion Matrix
cm_knn <- confusionMatrix(as.factor(predict_knn), as.factor(testing$target))
cm_knn$byClass
heart_models[4,2] <- cm_knn$overall[["Accuracy"]]
heart_models[4,]
# Variable Importance
imp_knn <- data.frame(predictor=row.names(varImp(train_knn)$importance),
knn=varImp(train_knn)$importance$Overall)
importance <- right_join(importance,imp_knn, by="predictor")
importance[,c(1,5)] %>% arrange(desc(knn))
##### RF (Random Forest)
# Train
train_rf <- train(target~., method="rf", data=training, tuneGrid = data.frame(mtry=c(1,2,4,8)))
train_rf$bestTune
# Predict
predict_rf <- round(predict(train_rf, newdata=testing))
# Confusion Matrix
cm_rf <- confusionMatrix(as.factor(predict_rf), as.factor(testing$target))
cm_rf$byClass
heart_models[5,2] <- cm_rf$overall[["Accuracy"]]
heart_models[5,]
#Variable Importance
imp_rf <- data.frame(predictor=row.names(varImp(train_rf)$importance),
rf=varImp(train_rf)$importance$Overall)
importance <- right_join(importance,imp_rf, by="predictor")
importance[,c(1,6)] %>% arrange(desc(rf))
################# Best Model - Ensemble ###############
# Train & Predict
predict_ensemble <- as.factor(round((as.numeric(predict_lm)+as.numeric(predict_glm)+ (predict_rf))/3))
# Confusion Matrix
cm_ensemble <- confusionMatrix(as.factor(predict_ensemble), as.factor(testing$target))
cm_ensemble$byClass
heart_models[6,2] <- cm_ensemble$overall[["Accuracy"]]
heart_models[6,]
#Variable Importance
importance <- importance %>% mutate(ensemble=(lm+glm+rf)/3)
importance[,c(1,7)] %>% arrange(desc(ensemble))
#####################################################
# CONCLUSION #
# (Limitations / Further Studies) #
##################################################### | /heartattack_code.R | no_license | johndsowers/HeartAttack | R | false | false | 23,686 | r | #####################################################
# Load Packages (if Required) #
#####################################################
if(!require(githubinstall)) install.packages("githubinstall", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(RCurl)) install.packages("RCurl", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(httr)) install.packages("httr", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(lubridate)) install.packages("lubridate", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(foreach)) install.packages("foreach", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(gam)) install.packages("gam", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(splines2)) install.packages("splines2", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(stringi)) install.packages("stringi", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(gridExtra)) install.packages("gridExtra", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(rmarkdown)) install.packages("rmarkdown", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(knitr)) install.packages("knitr", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(devtools)) install.packages("devtools", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(tinytex)) install.packages("latexpdf", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(tinytex)) install.packages("tinytex", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(packrat)) install.packages("packrat", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(rsconnect)) install.packages("rsconnect", repos = "http://cran.us.r-project.org", quiet=TRUE)
if(!require(readxl)) install.packages("readxl", repos = "http://cran.us.r-project.org", quiet=TRUE)
#####################################################
# Load Libraries #
#####################################################
library(githubinstall)
library(RCurl)
library(httr)
library(tidyverse)
library(lubridate)
library(caret)
library(gam)
library(foreach)
library(splines2)
library(stringi)
library(data.table)
library(gridExtra)
library(ggplot2)
library(rmarkdown)
library(knitr)
library(devtools)
library(latexpdf)
library(tinytex)
library(packrat)
library(rsconnect)
library(readxl)
#####################################################
# Reset Variables / Set Options #
#####################################################
rm(list=ls()) # Clears all variables
options(digits=5)
#####################################################
# Kaggle Heart Attack Prediction Data #
#####################################################
##### GitHub .csv Data File - Unzipped frin original Kaggle URL
KaggleURL <- "https://www.kaggle.com/nareshbhat/health-care-data-set-on-heart-attack-possibility"
GitHubURL <- "https://github.com/johndsowers/HeartAttack/raw/master/heart.csv"
dl <- tempfile()
download.file(GitHubURL, dl)
heart <- as.data.frame(read_csv(dl))
heart <- heart %>% mutate(target=as.numeric(target))
heart
#####################################################
# Creating 90% Train / 10% Test Data Sets #
#####################################################
set.seed(1) # Set Seed to 1
test_index <- createDataPartition(y = heart$target, times = 1, p = 0.1, list = FALSE)
training <- heart[-test_index,] # Create Training Data Set
testing <- heart[test_index,] # Create Test Data Set
#####################################################
# INTRODUCTION #
# (Purpose / Initial Data Analysis) #
#####################################################
##### Reference https://artificialintelligence-news.com/2019/05/14/ml-algorithm-predicts-heart-attacks/
##### Possibility of recreating this 90% prediction rate utiling the 13 predictors in the Kaggle website
#####################################################
# Initial Data Analysis #
#####################################################
################# Data Set Overview #################
str(heart)
#####################################################
# METHODOLOGY / ANALYSIS #
# (Purpose / Initial Data Analysis) #
#####################################################
################## Analysis by Age ##################
##### Patient Age Proportions Figure
age_p_distribution.fig <- heart %>%
group_by(age) %>%
summarize(p = length(target)/nrow(heart), .groups="drop") %>%
ggplot(aes(age, p)) +
geom_bar(stat="identity") +
geom_hline(yintercept=.0244, size=1.0, color="red") +
annotate("text", x = 33.5, y = .0264, label = "Mean = .0244") +
ggtitle("Patient Distribution by Age") +
xlab("Age (Years)") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Age Proportions Figure
age_p_disease.fig <- heart %>%
group_by(age) %>%
mutate(n_total = length(age)) %>%
summarize(prop_disease = sum(target==1)/n_total, prop_health= sum(target==0)/n_total, .groups="drop") %>%
ggplot(aes(age, prop_disease)) +
geom_point() +
geom_smooth(method = "loess", formula='y ~ x') +
geom_hline(yintercept=0.545, size=1.0, color="red") +
annotate("text", x = 34.5, y = .575, label = "Mean = 0.545") +
ylim(0, 1) +
ggtitle("Heart Disease Proportions by Age") +
xlab("Age (Years)") +
ylab("Proportion of Cases")
grid.arrange(age_p_distribution.fig, age_p_disease.fig, ncol=2) # Plot proportion distributions
################## Analysis by Sex ##################
##### Patient Gender Proportions Figure
sex_p_distribution.fig <- heart %>%
group_by(sex) %>%
summarise(p = length(target)/nrow(heart), .groups="drop") %>%
mutate(sex = ifelse(sex==0,"Female", "Male")) %>%
ggplot(aes(sex, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Gender") +
xlab("Gender") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Gender Proportions Figure
sex_p_disease.fig <- heart %>%
group_by(sex) %>%
summarise(p = sum(as.numeric(target)/length(target)), .groups="drop") %>%
mutate(sex = ifelse(sex==0,"Female", "Male")) %>%
ggplot(aes(sex,p)) +
geom_bar(stat="identity") +
geom_hline(yintercept=.6, size=1.0, color="red") +
annotate("text", x = 2.3, y = .62, label = "Mean = 0.6") +
ggtitle("Heart Disease Proportions by Gender") +
xlab("Gender") +
ylab("Proportion of Cases")
grid.arrange(sex_p_distribution.fig, sex_p_disease.fig, ncol=2) # Plot proportion distributions
########### Analysis by Chest Pain (cp) #############
##### Patient chest pain Proportions Figure
cp_p_distribution.fig <- heart %>%
group_by(cp) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(cp, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by CP") +
xlab("Chest Pain (CP) Score") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Chest Pain Proportions Figure
cp_p_disease.fig <- heart %>%
group_by(cp) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(cp,p)) +
geom_bar(stat="identity") +
geom_hline(yintercept=.645, size=1.0, color="red") +
annotate("text", x = 0, y = .625, label = "Mean = 0.645") +
ggtitle("Heart Disease Proportions by CP") +
xlab("Chest Pain (CP) Score") +
ylab("Proportion of Cases")
grid.arrange(cp_p_distribution.fig, cp_p_disease.fig, ncol=2) # Plot proportion distributions
### Analysis by Resting Blood Pressure (trestbps) ###
##### Patient BP Proportions Figure
trestbps_p_distribution.fig <- heart %>%
group_by(trestbps) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(trestbps, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by BP") +
xlab("Blood Perssure (BP) Level") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease BP Proportions Figure
trestbps_p_disease.fig <- heart %>%
group_by(trestbps) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(trestbps,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
#geom_bar(stat="identity") +
geom_hline(yintercept=.51, size=1.0, color="red") +
annotate("text", x = 190, y = .56, label = "Mean = 0.510") +
ggtitle("Heart Disease Proportions by BP") +
xlab("Blood Pressure (BP) Level") +
ylab("Proportion of Cases")
grid.arrange(trestbps_p_distribution.fig, trestbps_p_disease.fig, ncol=2) # Plot proportion distributions
########## Analysis by Cholesterol (chol) ###########
##### Patient Age Cholesterol Figure
chol_p_distribution.fig <- heart %>%
group_by(chol) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(chol, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Chol") +
xlab("Cholesteral (Chol) Level") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Cholesterol Proportions Figure
chol_p_disease.fig <- heart %>%
group_by(chol) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(chol,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.521, size=1.0, color="red") +
annotate("text", x = 510, y = .557, label = "Mean = 0.521") +
ggtitle("Heart Disease Proportions by Chol") +
xlab("Cholesterol (Chol) Level") +
ylab("Proportion of Cases")
grid.arrange(chol_p_distribution.fig, chol_p_disease.fig, ncol=2) # Plot proportion distributions
########## Analysis by Blood Sugar (fbs) ############
##### Patient Blood Sugar Proportions Figure
fbs_p_distribution.fig <- heart %>%
group_by(fbs) %>%
mutate(fbs=ifelse(exang==0,"False","True")) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(fbs, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by FBS") +
xlab("Fasting Blood Sugar (fbs) Level (>120 mg/dl)") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Blood Sugar Proportions Figure
fbs_p_disease.fig <- heart %>%
mutate(fbs=ifelse(exang==0,"False","True")) %>%
group_by(fbs) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(fbs,p)) +
geom_bar(stat="identity") +
geom_hline(yintercept=.531, size=1.0, color="red") +
annotate("text", x = 2.3, y = .545, label = "Mean = 0.531") +
ggtitle("Heart Disease Proportions by FBS") +
xlab("Fasting Blood Sugar (fbs) Level (>120 mg/dl)") +
ylab("Proportion of Cases")
grid.arrange(fbs_p_distribution.fig, fbs_p_disease.fig, ncol=2) # Plot proportion distributions
#### Analysis by Electrocardiographic (restecg) #####
##### Patient ECG Results Proportions Figure
ekg_p_distribution.fig <- heart %>%
group_by(restecg) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(restecg, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by EKG") +
xlab("Electrocardiogram (EKG) Score") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease ECG Proportions Figure
ekg_p_disease.fig <- heart %>%
group_by(restecg) %>%
summarise(p = sum(as.numeric(target)/length(target)), .groups="drop") %>%
ggplot(aes(restecg,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.333, size=1.0, color="red") +
annotate("text", x = 0.25, y = .343, label = "Mean = 0.333") +
ggtitle("Heart Disease Proportions by FBS") +
xlab("Electrocardiogram (EKG) Score") +
ylab("Proportion of Cases")
grid.arrange(ekg_p_distribution.fig, ekg_p_disease.fig, ncol=2) # Plot proportion distributions
####### Analysis by Max Heart Rate (thalach) ########
##### Patient Max Heart Rate Proportions Figure
hr_p_distribution.fig <- heart %>%
group_by(thalach) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(thalach, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by HR") +
xlab("Maximum Heart Rate (HR)") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Max Heart Rate Proportions Figure
hr_p_disease.fig <- heart %>%
group_by(thalach) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(thalach,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.491, size=1.0, color="red") +
annotate("text", x = 85, y = .527, label = "Mean = 0.491") +
ggtitle("Heart Disease Proportions by HR") +
xlab("Maximum Heart Rate (HR)") +
ylab("Proportion of Cases")
grid.arrange(hr_p_distribution.fig, hr_p_disease.fig, ncol=2) # Plot proportion distributions
########### Analysis by Angina (exang) ##############
##### Patient Angina Proportions Figure
ang_p_distribution.fig <- heart %>%
mutate(exang=ifelse(exang==0,"Not Present","Present")) %>%
group_by(exang) %>%
summarise(p=length(cp)/nrow(heart), .groups="drop") %>%
ggplot(aes(exang, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Angina") +
xlab("Exercise Induced Angina") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Angina Proportions Figure
ang_p_disease.fig <- heart %>%
mutate(exang=ifelse(exang==0,"Not Present","Present")) %>%
group_by(exang) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(exang,p)) +
geom_bar(stat="identity") +
geom_hline(yintercept=.464, size=1.0, color="red") +
annotate("text", x = 2.2, y = .484, label = "Mean = 0.464") +
ggtitle("Heart Disease Proportions by Angina") +
xlab("Exercise Induced Angina") +
ylab("Proportion of Cases")
grid.arrange(ang_p_distribution.fig, ang_p_disease.fig, ncol=2) # Plot proportion distributions
################ Analysis by oldpeak #################
##### Patient Oldpeak Proportions Figure
oldpeak_p_distribution.fig <- heart %>%
group_by(oldpeak) %>%
summarise(p=length(oldpeak)/nrow(heart), .groups="drop") %>%
ggplot(aes(oldpeak, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by ST") +
xlab("ST Depression Score") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Oldpeak Proportions Figure
oldpeak_p_disease.fig <- heart %>%
group_by(oldpeak) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(oldpeak,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.393, size=1.0, color="red") +
annotate("text", x = 5.5, y = .427, label = "Mean = 0.393") +
ggtitle("Heart Disease Proportions by ST") +
xlab("ST Depression Score") +
ylab("Proportion of Cases")
grid.arrange(oldpeak_p_distribution.fig, oldpeak_p_disease.fig, ncol=2) # Plot proportion distributions
################# Analysis by slope #################
##### Patient slope Proportions Figure
slope_p_distribution.fig <- heart %>%
group_by(slope) %>%
summarise(p=length(slope)/nrow(heart), .groups="drop") %>%
ggplot(aes(slope, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Slope") +
xlab("Slope") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Slope Proportions Figure
slope_p_disease.fig <- heart %>%
group_by(slope) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(slope,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.693, size=1.0, color="red") +
annotate("text", x = 0.23, y = .703, label = "Mean = 0.693") +
ggtitle("Heart Disease Proportions by Slope") +
xlab("Slope") +
ylab("Proportion of Cases")
grid.arrange(slope_p_distribution.fig, slope_p_disease.fig, ncol=2) # Plot proportion distributions
################### Analysis by ca ##################
##### Patient Blood Vessel Numbers Proportions Figure
ca_p_distribution.fig <- heart %>%
group_by(ca) %>%
summarise(p=length(oldpeak)/nrow(heart), .groups="drop") %>%
ggplot(aes(ca, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Flourosopy") +
xlab("Number Blood Vessels Colored by Flourosopy") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Blood Vessels Proportions Figure
ca_p_disease.fig <- heart %>%
group_by(ca) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(ca,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.44, size=1.0, color="red") +
annotate("text", x = 0.35, y = .455, label = "Mean = 0.44") +
ggtitle("Heart Disease Proportions by Flourosopy") +
xlab("Number Blood Vessels Colored by Flourosopy") +
ylab("Proportion of Cases")
grid.arrange(ca_p_distribution.fig, ca_p_disease.fig, ncol=2) # Plot proportion distributions
################## Analysis by thal #################
##### Patient Thalassemia Proportions Figure
thal_p_distribution.fig <- heart %>%
group_by(thal) %>%
summarise(p=length(oldpeak)/nrow(heart), .groups="drop") %>%
ggplot(aes(thal, p)) +
geom_bar(stat="identity") +
ggtitle("Patient Distribution by Thalassemia") +
xlab("Thalassemia") +
ylab("Proportion of Patients")
##### Confirmed Heart Disease Thalassemia Proportions Figure
thal_p_disease.fig <- heart %>%
group_by(thal) %>%
summarise(p = sum(as.numeric(target))/length(target), .groups="drop") %>%
ggplot(aes(thal,p)) +
geom_smooth(method='loess', formula='y ~ x') +
geom_point() +
geom_hline(yintercept=.464, size=1.0, color="red") +
annotate("text", x = 0.33, y = .474, label = "Mean = 0.464") +
xlab("Thalassemia") +
ylab("Proportion of Cases")
grid.arrange(thal_p_distribution.fig, thal_p_disease.fig, ncol=2) # Plot proportion distributions
#####################################################
# RESULTS #
# (Summary) #
#####################################################
##### Initialize Variables
method <- c("lm", "glm", "loess", "knn", "rf", "ensemble")
accuracy <- c(NA, NA, NA, NA, NA, NA)
heart_models <- data.frame(method, accuracy)
predictor <- c("age", "sex", "cp", "trestbps", "chol", "fbs", "restecg", "thalach", "exang", "oldpeak", "slope", "ca", "thal")
importance <- data.frame(predictor=predictor)
##### Linear Regression (LM) Model
# Train
train_lm <- train(target~., method="lm", data=training)
# Predict
predict_lm <- round(predict(train_lm, newdata=testing))
# Confusion Matrix
cm_lm <- confusionMatrix(as.factor(predict_lm), as.factor(testing$target))
cm_lm$byClass
heart_models[1,2] <- cm_lm$overall[["Accuracy"]]
heart_models[1,]
# Variable Importance
imp_lm <- data.frame(predictor=row.names(varImp(train_lm)$importance),
lm=varImp(train_lm)$importance$Overall)
importance <- right_join(importance,imp_lm, by="predictor")
importance[,c(1,2)] %>% arrange(desc(lm))
##### Logistic Regression (GLM) Model
# Train
train_glm <- train(target~., method="glm", data=training)
# Predict
predict_glm <- round(predict(train_glm, newdata=testing))
# Confusion Matrix
cm_glm <- confusionMatrix(as.factor(predict_glm), as.factor(testing$target))
cm_glm$byClass
heart_models[2,2] <- cm_glm$overall[["Accuracy"]]
heart_models[2,]
# Variable Importance
imp_glm <- data.frame(predictor=row.names(varImp(train_glm)$importance),
glm=varImp(train_glm)$importance$Overall)
importance <- right_join(importance,imp_glm, by="predictor")
importance[,c(1,3)] %>% arrange(desc(glm))
#####(Loess)
# Train
train_loess <- train(target~., method="gamLoess", data=training)
# Predict
predict_loess <- round(predict(train_loess, newdata=testing))
# Confusion Matrix
cm_loess <- confusionMatrix(as.factor(predict_loess), as.factor(testing$target))
cm_loess$byClass
heart_models[3,2] <- cm_loess$overall[["Accuracy"]]
heart_models[3,]
# Variable Importance
imp_loess <- data.frame(predictor=row.names(varImp(train_loess)$importance),
loess=varImp(train_loess)$importance$Overall)
importance <- right_join(importance,imp_loess, by="predictor")
importance[,c(1,4)] %>% arrange(desc(loess))
##### K-Nearest Neighbors (KNN)
# Train
train_knn <- train(target~., method="knn", data=training, tuneGrid = data.frame(k=seq(0:5)))
train_knn$bestTune
# Predict
predict_knn <- round(predict(train_knn, newdata=testing))
# Confusion Matrix
cm_knn <- confusionMatrix(as.factor(predict_knn), as.factor(testing$target))
cm_knn$byClass
heart_models[4,2] <- cm_knn$overall[["Accuracy"]]
heart_models[4,]
# Variable Importance
imp_knn <- data.frame(predictor=row.names(varImp(train_knn)$importance),
knn=varImp(train_knn)$importance$Overall)
importance <- right_join(importance,imp_knn, by="predictor")
importance[,c(1,5)] %>% arrange(desc(knn))
##### RF (Random Forest)
# Train
train_rf <- train(target~., method="rf", data=training, tuneGrid = data.frame(mtry=c(1,2,4,8)))
train_rf$bestTune
# Predict
predict_rf <- round(predict(train_rf, newdata=testing))
# Confusion Matrix
cm_rf <- confusionMatrix(as.factor(predict_rf), as.factor(testing$target))
cm_rf$byClass
heart_models[5,2] <- cm_rf$overall[["Accuracy"]]
heart_models[5,]
#Variable Importance
imp_rf <- data.frame(predictor=row.names(varImp(train_rf)$importance),
rf=varImp(train_rf)$importance$Overall)
importance <- right_join(importance,imp_rf, by="predictor")
importance[,c(1,6)] %>% arrange(desc(rf))
################# Best Model - Ensemble ###############
# Train & Predict
predict_ensemble <- as.factor(round((as.numeric(predict_lm)+as.numeric(predict_glm)+ (predict_rf))/3))
# Confusion Matrix
cm_ensemble <- confusionMatrix(as.factor(predict_ensemble), as.factor(testing$target))
cm_ensemble$byClass
heart_models[6,2] <- cm_ensemble$overall[["Accuracy"]]
heart_models[6,]
#Variable Importance
importance <- importance %>% mutate(ensemble=(lm+glm+rf)/3)
importance[,c(1,7)] %>% arrange(desc(ensemble))
#####################################################
# CONCLUSION #
# (Limitations / Further Studies) #
##################################################### |
library(JGEE)
### Name: JGee1
### Title: Function to fit a joint generalized estimating equation model
### with shared regression coefficients
### Aliases: JGee1 mycor_jgee1 print.JGee1 print.summary.JGee1 S_H1
### summary.JGee1
### Keywords: joint modelling marginal models
### ** Examples
## Not run:
##D data(MSCMsub)
##D mydata=MSCMsub
##D
##D #MSCM stduy data layout requires some arrangement for model fitting.
##D
##D N=167
##D nt=4
##D nr=2
##D
##D yvec=matrix(0,N*nt*nr,1)
##D xmat=matrix(0,N*nt*nr,8)
##D
##D for(i in 1:N) {
##D for(j in 1:nt){
##D yvec[j+(i-1)*nr*nt]=mydata[j+(i-1)*nt,2]
##D yvec[j+(i-1)*nr*nt+nt]=mydata[j+(i-1)*nt,3]
##D }
##D }
##D
##D for(i in 1:N) {
##D for(j in 1:nt){
##D for(k in 4:11){
##D xmat[j+(i-1)*nr*nt,(k-3)]=mydata[j+(i-1)*nt,k]
##D xmat[j+(i-1)*nr*nt+nt,(k-3)]=mydata[j+(i-1)*nt,k]
##D }
##D }
##D }
##D
##D id=rep(1:N, each=(nt*nr))
##D mydatanew=data.frame(id,yvec,xmat)
##D head(mydatanew)
##D colnames(mydatanew)=c("id","resp","chlth","csex","education","employed",
##D "housize","married","mhlth","race")
##D head(mydatanew)
##D
##D formulaj1=resp~chlth+csex+education+employed+housize+married+
##D mhlth+race
##D
##D fitjgee1=JGee1(formula=formulaj1,id=mydatanew[,1],data=mydatanew, nr=2,
##D na.action=NULL, family=binomial(link="logit"), corstr1="exchangeable",
##D Mv=NULL, corstr2="independence", beta_int=NULL, R1=NULL, R2=NULL,
##D scale.fix= FALSE, scale.value=1, maxiter=25, tol=10^-3,
##D silent=FALSE)
##D
##D summary(fitjgee1)
##D
##D names(summary(fitjgee1))
##D
##D summary(fitjgee1)$working.correlation1
## End(Not run)
| /data/genthat_extracted_code/JGEE/examples/JGee1.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,614 | r | library(JGEE)
### Name: JGee1
### Title: Function to fit a joint generalized estimating equation model
### with shared regression coefficients
### Aliases: JGee1 mycor_jgee1 print.JGee1 print.summary.JGee1 S_H1
### summary.JGee1
### Keywords: joint modelling marginal models
### ** Examples
## Not run:
##D data(MSCMsub)
##D mydata=MSCMsub
##D
##D #MSCM stduy data layout requires some arrangement for model fitting.
##D
##D N=167
##D nt=4
##D nr=2
##D
##D yvec=matrix(0,N*nt*nr,1)
##D xmat=matrix(0,N*nt*nr,8)
##D
##D for(i in 1:N) {
##D for(j in 1:nt){
##D yvec[j+(i-1)*nr*nt]=mydata[j+(i-1)*nt,2]
##D yvec[j+(i-1)*nr*nt+nt]=mydata[j+(i-1)*nt,3]
##D }
##D }
##D
##D for(i in 1:N) {
##D for(j in 1:nt){
##D for(k in 4:11){
##D xmat[j+(i-1)*nr*nt,(k-3)]=mydata[j+(i-1)*nt,k]
##D xmat[j+(i-1)*nr*nt+nt,(k-3)]=mydata[j+(i-1)*nt,k]
##D }
##D }
##D }
##D
##D id=rep(1:N, each=(nt*nr))
##D mydatanew=data.frame(id,yvec,xmat)
##D head(mydatanew)
##D colnames(mydatanew)=c("id","resp","chlth","csex","education","employed",
##D "housize","married","mhlth","race")
##D head(mydatanew)
##D
##D formulaj1=resp~chlth+csex+education+employed+housize+married+
##D mhlth+race
##D
##D fitjgee1=JGee1(formula=formulaj1,id=mydatanew[,1],data=mydatanew, nr=2,
##D na.action=NULL, family=binomial(link="logit"), corstr1="exchangeable",
##D Mv=NULL, corstr2="independence", beta_int=NULL, R1=NULL, R2=NULL,
##D scale.fix= FALSE, scale.value=1, maxiter=25, tol=10^-3,
##D silent=FALSE)
##D
##D summary(fitjgee1)
##D
##D names(summary(fitjgee1))
##D
##D summary(fitjgee1)$working.correlation1
## End(Not run)
|
library(tidyverse)
library(ggthemes)
library(ggparliament)
library(readxl)
diputados_tucuman <- read_xlsx("data/Tucuman.xlsx") %>%
mutate(party_long = case_when(
party_short == "UCRHY" ~ "UCR-Hipólito Yrigoyen (1)", # Modificó para agregar tíldes faltantes
party_short == "PSJ" ~ "Psj Recuperemos Tucumán (3)",
TRUE ~ party_long
) ) %>%
mutate(pct = round(seats/sum(seats)*100,1),
name = str_remove_all(party_long, "[^[:alpha:] & [:space:]]"),
name = str_squish(name)) %>%
select(name, everything()) %>% # Modifico party_long para tener toda la info en labels
mutate(party_long = case_when(seats == 1 ~ paste0(name, " (", seats, " banca - ", pct, "%)"),
TRUE ~ paste0(name, " (", seats, " bancas - ", pct, "%)"))) %>%
select(year, country, house, party_long, party_short, seats, government , colour, orden) %>%
print()
# VOY A CREAR EL OBJETO DE ggparliament con la data ordenada para el plot
data_diputados_tucuman<- ggparliament::parliament_data(diputados_tucuman, #datos originales
type = "semicircle", # forma del hemiciclo
parl_rows =3, # cantidad de filas
party_seats = diputados_tucuman$seats, # bancas
plot_order = diputados_tucuman$orden) %>% #orden de partidos
mutate(colour = as.character(colour)) %>% # vector de texto para codigo HEX de colores asignados previamente
as_tibble() %>%
print()
data_diputados_tucuman %>% as_tibble() %>%
ggplot(aes(x, y, colour = party_long)) +
geom_parliament_seats(size = 7) + # tamaño de bancas (puntos)
geom_highlight_government(government == 1, colour = "black", size = 8) + # circulo negro al oficialismo
geom_parliament_bar(party = party_short, label = F) + # barra con proporción de bancas
draw_majoritythreshold(n = 31, label = F, type = "semicircle") + # dinuja el limite de mayoría
scale_colour_manual(values = data_diputados_tucuman$colour,
limits = data_diputados_tucuman$party_long) +
guides(colour = guide_legend(nrow=7)) + # customiza etiquetas
labs(title = "Diputados",
subtitle = "2019 - 2021",
colour = "Bloques") +
theme_fivethirtyeight() +
theme(panel.grid = element_blank(),
axis.text = element_blank(),
legend.position = "bottom",
legend.text = element_text(size = 14))
| /script/legis_tucuman.R | no_license | TuQmano/paRlamentos | R | false | false | 2,560 | r | library(tidyverse)
library(ggthemes)
library(ggparliament)
library(readxl)
diputados_tucuman <- read_xlsx("data/Tucuman.xlsx") %>%
mutate(party_long = case_when(
party_short == "UCRHY" ~ "UCR-Hipólito Yrigoyen (1)", # Modificó para agregar tíldes faltantes
party_short == "PSJ" ~ "Psj Recuperemos Tucumán (3)",
TRUE ~ party_long
) ) %>%
mutate(pct = round(seats/sum(seats)*100,1),
name = str_remove_all(party_long, "[^[:alpha:] & [:space:]]"),
name = str_squish(name)) %>%
select(name, everything()) %>% # Modifico party_long para tener toda la info en labels
mutate(party_long = case_when(seats == 1 ~ paste0(name, " (", seats, " banca - ", pct, "%)"),
TRUE ~ paste0(name, " (", seats, " bancas - ", pct, "%)"))) %>%
select(year, country, house, party_long, party_short, seats, government , colour, orden) %>%
print()
# VOY A CREAR EL OBJETO DE ggparliament con la data ordenada para el plot
data_diputados_tucuman<- ggparliament::parliament_data(diputados_tucuman, #datos originales
type = "semicircle", # forma del hemiciclo
parl_rows =3, # cantidad de filas
party_seats = diputados_tucuman$seats, # bancas
plot_order = diputados_tucuman$orden) %>% #orden de partidos
mutate(colour = as.character(colour)) %>% # vector de texto para codigo HEX de colores asignados previamente
as_tibble() %>%
print()
data_diputados_tucuman %>% as_tibble() %>%
ggplot(aes(x, y, colour = party_long)) +
geom_parliament_seats(size = 7) + # tamaño de bancas (puntos)
geom_highlight_government(government == 1, colour = "black", size = 8) + # circulo negro al oficialismo
geom_parliament_bar(party = party_short, label = F) + # barra con proporción de bancas
draw_majoritythreshold(n = 31, label = F, type = "semicircle") + # dinuja el limite de mayoría
scale_colour_manual(values = data_diputados_tucuman$colour,
limits = data_diputados_tucuman$party_long) +
guides(colour = guide_legend(nrow=7)) + # customiza etiquetas
labs(title = "Diputados",
subtitle = "2019 - 2021",
colour = "Bloques") +
theme_fivethirtyeight() +
theme(panel.grid = element_blank(),
axis.text = element_blank(),
legend.position = "bottom",
legend.text = element_text(size = 14))
|
## Here are internal functions of the package IsoriX
.onAttach <- function(libname, pkgname) {
## This function should not be called by the user.
## It display a message when the package is being loaded
packageStartupMessage(## display message
"\n IsoriX version ", utils::packageDescription("IsoriX")$Version," is loaded!",
"\n",
"\n The names of the functions and objects are not yet stable.",
"\n We keep revising them to make IsoriX more intuitive for you to use.",
"\n We will do our best to limit changes in names from version 1.0 onward!!",
"\n",
"\n Type:",
"\n * ?IsoriX for a short description.",
"\n * browseVignettes(package = 'IsoriX') for tutorials.",
"\n * news(package = 'IsoriX') for news.",
"\n"
)
}
.IsoriX.data <- new.env(parent = emptyenv())
.onLoad <- function(libname, pkgname) {
## This function should not be called by the user.
## It changes the default beahviour of sp concerning lat/long boundaries
.IsoriX.data$sp_ll_warn <- sp::get_ll_warn()
sp::set_ll_warn(TRUE) ## makes sp creating warning instead of errors when lat/long out of boundaries
.IsoriX.data$R_options <- .Options ## backup R options
}
.onUnload <- function(libpath) {
## This function should not be called by the user.
## It restores the original behaviour of sp
sp::set_ll_warn(.IsoriX.data$sp_ll_warn)
options(.IsoriX.data$R_options) ## reset R options to their backed up values
}
.NiceRound <- function(x, digits) {
formatC(round(x, digits), digits = digits, format = "f")
}
.CreateRaster <- function(long, lat, values, proj) {
## This function should not be called by the user but is itself called by other functions.
## It creates a raster.
##
## Args:
## long: a vector of the longitudes of the raster cells
## lat: a vector of the latitudes of the raster cells
## values: a vector of the values of the raster cells
## proj: the projection system for the raster
## save.spatial.files: logical indicating if an hard copy of the raster should be saved (as ascii)
## filename: name of the file for the hard copy
## overwrite.spatial.files: logical indicating if an existing hard copy should be overwritten or not
##
## Returns:
## The raster.
##
data <- data.frame(long = long, lat = lat, values = values)
sp::coordinates(data) <- ~long+lat ## coordonates are being set for the raster
sp::proj4string(data) <- sp::CRS(proj) ## projection is being set for the raster
sp::gridded(data) <- TRUE ## a gridded structure is being set for the raster
data.raster <- raster::raster(data) ## the raster is being created
# if(save.spatial.files) writeRaster(
# data.raster,
# filename = paste(filename, ".asc", sep = ""),
# overwrite = overwrite.spatial.files
# ) ## if save = TRUE the raster is exported as an ascii file
return(data.raster) ## the raster is being returned
}
.CreateSpatialPoints <- function(long, lat, values = -9999, proj) {
## This function should not be called by the user but is itself called by .CreateRasterFromAssignment().
data.sp <- data.frame(long = long, lat = lat, values = values)
sp::coordinates(data.sp) <- ~long+lat
sp::proj4string(data.sp) <- sp::CRS(proj)
return(data.sp)
}
.HitReturn <- function() {
## This function should not be called by the user but is itself called by other functions.
## It asks the user to press return in RStudio (for plotting).
if (interactive() & .Platform$GUI == "RStudio" & IsoriX.getOption("dont_ask") == FALSE) {
cat("Hit <Return> for next plot")
readline()
}
return(NULL)
}
.CompleteArgs <- function(fn) {
## This function should not be called by the user but is itself called by other functions.
## It keeps the default list elements when
## a new list with fewer elements is provided
env <- parent.frame()
args <- formals(fn)
for (arg.name in names(args)) {
if (is.call(arg <- args[[arg.name]])) {
if (arg[1] == "list()") {
arg.input <- mget(names(args), envir = env)[[arg.name]]
arg.full <- eval(formals(fn)[[arg.name]])
arg.full.updated <- utils::modifyList(arg.full, arg.input)
assign(arg.name, arg.full.updated, envir = env)
}
}
}
return(NULL)
}
.BuildAdditionalLayers <- function(x, sources, calib, borders, mask, mask2 = NULL) {
## This function should not be called by the user but is itself called by other functions.
## It build the additional layers for plots
## layer for sources
if (!sources$draw) {
sources.layer <- latticeExtra::layer()
} else {
sources.layer <- latticeExtra::layer(sp::sp.points(sources,
col = pt$col,
cex = pt$cex,
pch = pt$pch,
lwd = pt$lwd
),
data = list(sources = x$sp.points$sources,
pt = sources,
sp.points = sp::sp.points
)
)
}
## layer for calibration points
if (is.null(calib)) {
calib.layer <- latticeExtra::layer()
} else {
if (!calib$draw) {
calib.layer <- latticeExtra::layer()
} else {
calib.layer <- latticeExtra::layer(sp::sp.points(calib, col = pt$col,
cex = pt$cex,
pch = pt$pch,
lwd = pt$lwd
),
data = list(calib = x$sp.points$calibs,
pt = calib,
sp.points = sp::sp.points
)
)
}
}
## layer for country borders
if (is.null(borders$borders)) {
borders.layer <- latticeExtra::layer()
} else {
borders.layer <- latticeExtra::layer(sp::sp.polygons(b$borders,
lwd = b$lwd,
col = b$col,
fill = "transparent"
),
data = list(b = borders,
sp.polygons = sp::sp.polygons
)
)
}
## layer for mask
if (is.null(mask$mask)) {
mask.layer <- latticeExtra::layer()
} else {
mask.layer <- latticeExtra::layer(sp::sp.polygons(m$mask,
fill = m$fill,
col = m$col,
lwd = m$lwd
),
data = list(m = mask,
sp.polygons = sp::sp.polygons
)
)
}
if (is.null(mask2$mask)) {
mask2.layer <- latticeExtra::layer()
} else {
mask2.layer <- latticeExtra::layer(sp::sp.polygons(m$mask,
fill = m$fill,
col = m$col,
lwd = m$lwd
),
data = list(m = mask2,
sp.polygons = sp::sp.polygons
)
)
}
out <- list(sources.layer = sources.layer,
calib.layer = calib.layer,
borders.layer = borders.layer,
mask.layer = mask.layer,
mask2.layer = mask2.layer
)
## tweack to please code checking procedure
b <- m <- pt <- NULL
return(out)
}
.converts_months_to_numbers <- function(x) {
## This function should not be called by the user but is itself called by other functions.
## It converts an english month names (abbrieviated or not) into numbers
## If the months are already as numbers, it works too
## Example: .month_as_numbers(c("January", "Feb", 3, "April", "Toto"))
end <- sapply(x, function(x) {
res <- match(tolower(x), tolower(month.abb)) ## deals with abbreviation
if (is.na(res)) {
res <- match(tolower(x), tolower(month.name)) ## deals with full names
}
if (is.na(res)) { ## deal with other cases
res <- x
}
if (res %in% paste(1:12)) { ## check if other cases are numbers (quoted or not)
res <- as.numeric(res)
}
if (is.numeric(res)) {
return(res)
} else {
warning("some months are NA after the conversion in integers check your data!")
return(NA) ## if final output is not a number, it returns NA
}
})
return(end)
}
| /IsoriX/R/internalfunctions.R | no_license | PhDMeiwp/IsoriX_project | R | false | false | 9,254 | r | ## Here are internal functions of the package IsoriX
.onAttach <- function(libname, pkgname) {
## This function should not be called by the user.
## It display a message when the package is being loaded
packageStartupMessage(## display message
"\n IsoriX version ", utils::packageDescription("IsoriX")$Version," is loaded!",
"\n",
"\n The names of the functions and objects are not yet stable.",
"\n We keep revising them to make IsoriX more intuitive for you to use.",
"\n We will do our best to limit changes in names from version 1.0 onward!!",
"\n",
"\n Type:",
"\n * ?IsoriX for a short description.",
"\n * browseVignettes(package = 'IsoriX') for tutorials.",
"\n * news(package = 'IsoriX') for news.",
"\n"
)
}
.IsoriX.data <- new.env(parent = emptyenv())
.onLoad <- function(libname, pkgname) {
## This function should not be called by the user.
## It changes the default beahviour of sp concerning lat/long boundaries
.IsoriX.data$sp_ll_warn <- sp::get_ll_warn()
sp::set_ll_warn(TRUE) ## makes sp creating warning instead of errors when lat/long out of boundaries
.IsoriX.data$R_options <- .Options ## backup R options
}
.onUnload <- function(libpath) {
## This function should not be called by the user.
## It restores the original behaviour of sp
sp::set_ll_warn(.IsoriX.data$sp_ll_warn)
options(.IsoriX.data$R_options) ## reset R options to their backed up values
}
.NiceRound <- function(x, digits) {
formatC(round(x, digits), digits = digits, format = "f")
}
.CreateRaster <- function(long, lat, values, proj) {
## This function should not be called by the user but is itself called by other functions.
## It creates a raster.
##
## Args:
## long: a vector of the longitudes of the raster cells
## lat: a vector of the latitudes of the raster cells
## values: a vector of the values of the raster cells
## proj: the projection system for the raster
## save.spatial.files: logical indicating if an hard copy of the raster should be saved (as ascii)
## filename: name of the file for the hard copy
## overwrite.spatial.files: logical indicating if an existing hard copy should be overwritten or not
##
## Returns:
## The raster.
##
data <- data.frame(long = long, lat = lat, values = values)
sp::coordinates(data) <- ~long+lat ## coordonates are being set for the raster
sp::proj4string(data) <- sp::CRS(proj) ## projection is being set for the raster
sp::gridded(data) <- TRUE ## a gridded structure is being set for the raster
data.raster <- raster::raster(data) ## the raster is being created
# if(save.spatial.files) writeRaster(
# data.raster,
# filename = paste(filename, ".asc", sep = ""),
# overwrite = overwrite.spatial.files
# ) ## if save = TRUE the raster is exported as an ascii file
return(data.raster) ## the raster is being returned
}
.CreateSpatialPoints <- function(long, lat, values = -9999, proj) {
## This function should not be called by the user but is itself called by .CreateRasterFromAssignment().
data.sp <- data.frame(long = long, lat = lat, values = values)
sp::coordinates(data.sp) <- ~long+lat
sp::proj4string(data.sp) <- sp::CRS(proj)
return(data.sp)
}
.HitReturn <- function() {
## This function should not be called by the user but is itself called by other functions.
## It asks the user to press return in RStudio (for plotting).
if (interactive() & .Platform$GUI == "RStudio" & IsoriX.getOption("dont_ask") == FALSE) {
cat("Hit <Return> for next plot")
readline()
}
return(NULL)
}
.CompleteArgs <- function(fn) {
## This function should not be called by the user but is itself called by other functions.
## It keeps the default list elements when
## a new list with fewer elements is provided
env <- parent.frame()
args <- formals(fn)
for (arg.name in names(args)) {
if (is.call(arg <- args[[arg.name]])) {
if (arg[1] == "list()") {
arg.input <- mget(names(args), envir = env)[[arg.name]]
arg.full <- eval(formals(fn)[[arg.name]])
arg.full.updated <- utils::modifyList(arg.full, arg.input)
assign(arg.name, arg.full.updated, envir = env)
}
}
}
return(NULL)
}
.BuildAdditionalLayers <- function(x, sources, calib, borders, mask, mask2 = NULL) {
## This function should not be called by the user but is itself called by other functions.
## It build the additional layers for plots
## layer for sources
if (!sources$draw) {
sources.layer <- latticeExtra::layer()
} else {
sources.layer <- latticeExtra::layer(sp::sp.points(sources,
col = pt$col,
cex = pt$cex,
pch = pt$pch,
lwd = pt$lwd
),
data = list(sources = x$sp.points$sources,
pt = sources,
sp.points = sp::sp.points
)
)
}
## layer for calibration points
if (is.null(calib)) {
calib.layer <- latticeExtra::layer()
} else {
if (!calib$draw) {
calib.layer <- latticeExtra::layer()
} else {
calib.layer <- latticeExtra::layer(sp::sp.points(calib, col = pt$col,
cex = pt$cex,
pch = pt$pch,
lwd = pt$lwd
),
data = list(calib = x$sp.points$calibs,
pt = calib,
sp.points = sp::sp.points
)
)
}
}
## layer for country borders
if (is.null(borders$borders)) {
borders.layer <- latticeExtra::layer()
} else {
borders.layer <- latticeExtra::layer(sp::sp.polygons(b$borders,
lwd = b$lwd,
col = b$col,
fill = "transparent"
),
data = list(b = borders,
sp.polygons = sp::sp.polygons
)
)
}
## layer for mask
if (is.null(mask$mask)) {
mask.layer <- latticeExtra::layer()
} else {
mask.layer <- latticeExtra::layer(sp::sp.polygons(m$mask,
fill = m$fill,
col = m$col,
lwd = m$lwd
),
data = list(m = mask,
sp.polygons = sp::sp.polygons
)
)
}
if (is.null(mask2$mask)) {
mask2.layer <- latticeExtra::layer()
} else {
mask2.layer <- latticeExtra::layer(sp::sp.polygons(m$mask,
fill = m$fill,
col = m$col,
lwd = m$lwd
),
data = list(m = mask2,
sp.polygons = sp::sp.polygons
)
)
}
out <- list(sources.layer = sources.layer,
calib.layer = calib.layer,
borders.layer = borders.layer,
mask.layer = mask.layer,
mask2.layer = mask2.layer
)
## tweack to please code checking procedure
b <- m <- pt <- NULL
return(out)
}
.converts_months_to_numbers <- function(x) {
## This function should not be called by the user but is itself called by other functions.
## It converts an english month names (abbrieviated or not) into numbers
## If the months are already as numbers, it works too
## Example: .month_as_numbers(c("January", "Feb", 3, "April", "Toto"))
end <- sapply(x, function(x) {
res <- match(tolower(x), tolower(month.abb)) ## deals with abbreviation
if (is.na(res)) {
res <- match(tolower(x), tolower(month.name)) ## deals with full names
}
if (is.na(res)) { ## deal with other cases
res <- x
}
if (res %in% paste(1:12)) { ## check if other cases are numbers (quoted or not)
res <- as.numeric(res)
}
if (is.numeric(res)) {
return(res)
} else {
warning("some months are NA after the conversion in integers check your data!")
return(NA) ## if final output is not a number, it returns NA
}
})
return(end)
}
|
#' Get column metadata
#'
#' \code{adm_column_metadata} returns details of all columns in a SQL Server table.
#'
#' @param database \code{string}. The database to get metadata data for.
#' @param server \code{string}. The server holding the database.
#' @param table \code{string}. The table to get metadata data for.
#'
#' @return \code{dataframe}
#'
#' @examples
#'
#' \dontrun{
#' adm_column_metadata(database = "DatabaseName", server = "ServerName", table = "TableName")
#' }
#'
#' @export
adm_column_metadata <- function(database, server, table) {
connection <- admStructuredData:::adm_create_connection(database = database, server = server)
column_query <- paste0("SET NOCOUNT ON;
DECLARE @table_catalog nvarchar(128) = '", database, "',
@table_schema nvarchar(128) = 'dbo',
@table_name nvarchar(128) = '", table, "';
DECLARE @sql_statement nvarchar(2000),
@param_definition nvarchar(500),
@column_name nvarchar(128),
@data_type nvarchar(128),
@null_count int,
@distinct_values int,
@minimum_value nvarchar(225),
@maximum_value nvarchar(225);
DROP TABLE IF EXISTS #T1;
CREATE TABLE #T1 (ColumnName nvarchar(128),
DataType nvarchar(128),
NullCount int,
DistinctValues int,
MinimumValue nvarchar(255),
MaximumValue nvarchar(255));
INSERT INTO #T1 (ColumnName, DataType)
SELECT COLUMN_NAME,
REPLACE(CONCAT(DATA_TYPE, '(', CHARACTER_MAXIMUM_LENGTH, ')', '(', DATETIME_PRECISION, ')'), '()', '')
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_CATALOG = @table_catalog
AND TABLE_SCHEMA = @table_schema
AND TABLE_NAME = @table_name;
DECLARE column_cursor CURSOR
FOR SELECT ColumnName, DataType FROM #T1;
OPEN column_cursor;
FETCH NEXT FROM column_cursor
INTO @column_name, @data_type;
WHILE @@FETCH_STATUS = 0
BEGIN
SET @sql_statement =
CONCAT(N'SET @null_countOUT =
(SELECT COUNT(*)
FROM [', @table_catalog, '].[', @table_schema, '].[', @table_name, ']
WHERE ', @column_name, ' IS NULL)
SET @distinct_valuesOUT =
(SELECT COUNT(DISTINCT(', @column_name, '))
FROM [', @table_catalog, '].[', @table_schema, '].[', @table_name, ']
WHERE ', @column_name, ' IS NOT NULL) ')
IF (@data_type != 'bit')
BEGIN
SET @sql_statement =
CONCAT(@sql_statement,
'SET @minimum_valueOUT =
CAST((SELECT MIN(', @column_name, ')
FROM [', @table_catalog, '].[', @table_schema, '].[', @table_name, ']
WHERE ', @column_name, ' IS NOT NULL)
AS nvarchar(225))
SET @maximum_valueOUT =
CAST((SELECT MAX(', @column_name, ')
FROM [', @table_catalog, '].[', @table_schema, '].[', @table_name, ']
WHERE ', @column_name, ' IS NOT NULL)
AS nvarchar(225))')
END
ELSE
BEGIN
SET @sql_statement =
CONCAT(@sql_statement,
'SET @minimum_valueOUT = NULL
SET @maximum_valueOUT = NULL');
END
print(@sql_statement)
SET @param_definition = N'@null_countOUT int OUTPUT,
@distinct_valuesOUT int OUTPUT,
@minimum_valueOUT nvarchar(255) OUTPUT,
@maximum_valueOUT nvarchar(255) OUTPUT';
EXECUTE sp_executesql @sql_statement,
@param_definition,
@null_countOUT = @null_count OUTPUT,
@distinct_valuesOUT = @distinct_values OUTPUT,
@minimum_valueOUT = @minimum_value OUTPUT,
@maximum_valueOUT = @maximum_value OUTPUT;
UPDATE #T1
SET NullCount = @null_count,
DistinctValues = @distinct_values,
MinimumValue = @minimum_value,
MaximumValue = @maximum_value
WHERE ColumnName = @column_name;
FETCH NEXT FROM column_cursor
INTO @column_name, @data_type;
END
CLOSE column_cursor;
DEALLOCATE column_cursor;
SELECT * FROM #T1;
DROP TABLE #T1;")
column_metadata <- DBI::dbGetQuery(connection, column_query)
DBI::dbDisconnect(connection)
return(column_metadata)
}
| /R/adm_column_metadata.R | permissive | thomascrines/admStructuredData | R | false | false | 5,705 | r | #' Get column metadata
#'
#' \code{adm_column_metadata} returns details of all columns in a SQL Server table.
#'
#' @param database \code{string}. The database to get metadata data for.
#' @param server \code{string}. The server holding the database.
#' @param table \code{string}. The table to get metadata data for.
#'
#' @return \code{dataframe}
#'
#' @examples
#'
#' \dontrun{
#' adm_column_metadata(database = "DatabaseName", server = "ServerName", table = "TableName")
#' }
#'
#' @export
adm_column_metadata <- function(database, server, table) {
connection <- admStructuredData:::adm_create_connection(database = database, server = server)
column_query <- paste0("SET NOCOUNT ON;
DECLARE @table_catalog nvarchar(128) = '", database, "',
@table_schema nvarchar(128) = 'dbo',
@table_name nvarchar(128) = '", table, "';
DECLARE @sql_statement nvarchar(2000),
@param_definition nvarchar(500),
@column_name nvarchar(128),
@data_type nvarchar(128),
@null_count int,
@distinct_values int,
@minimum_value nvarchar(225),
@maximum_value nvarchar(225);
DROP TABLE IF EXISTS #T1;
CREATE TABLE #T1 (ColumnName nvarchar(128),
DataType nvarchar(128),
NullCount int,
DistinctValues int,
MinimumValue nvarchar(255),
MaximumValue nvarchar(255));
INSERT INTO #T1 (ColumnName, DataType)
SELECT COLUMN_NAME,
REPLACE(CONCAT(DATA_TYPE, '(', CHARACTER_MAXIMUM_LENGTH, ')', '(', DATETIME_PRECISION, ')'), '()', '')
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_CATALOG = @table_catalog
AND TABLE_SCHEMA = @table_schema
AND TABLE_NAME = @table_name;
DECLARE column_cursor CURSOR
FOR SELECT ColumnName, DataType FROM #T1;
OPEN column_cursor;
FETCH NEXT FROM column_cursor
INTO @column_name, @data_type;
WHILE @@FETCH_STATUS = 0
BEGIN
SET @sql_statement =
CONCAT(N'SET @null_countOUT =
(SELECT COUNT(*)
FROM [', @table_catalog, '].[', @table_schema, '].[', @table_name, ']
WHERE ', @column_name, ' IS NULL)
SET @distinct_valuesOUT =
(SELECT COUNT(DISTINCT(', @column_name, '))
FROM [', @table_catalog, '].[', @table_schema, '].[', @table_name, ']
WHERE ', @column_name, ' IS NOT NULL) ')
IF (@data_type != 'bit')
BEGIN
SET @sql_statement =
CONCAT(@sql_statement,
'SET @minimum_valueOUT =
CAST((SELECT MIN(', @column_name, ')
FROM [', @table_catalog, '].[', @table_schema, '].[', @table_name, ']
WHERE ', @column_name, ' IS NOT NULL)
AS nvarchar(225))
SET @maximum_valueOUT =
CAST((SELECT MAX(', @column_name, ')
FROM [', @table_catalog, '].[', @table_schema, '].[', @table_name, ']
WHERE ', @column_name, ' IS NOT NULL)
AS nvarchar(225))')
END
ELSE
BEGIN
SET @sql_statement =
CONCAT(@sql_statement,
'SET @minimum_valueOUT = NULL
SET @maximum_valueOUT = NULL');
END
print(@sql_statement)
SET @param_definition = N'@null_countOUT int OUTPUT,
@distinct_valuesOUT int OUTPUT,
@minimum_valueOUT nvarchar(255) OUTPUT,
@maximum_valueOUT nvarchar(255) OUTPUT';
EXECUTE sp_executesql @sql_statement,
@param_definition,
@null_countOUT = @null_count OUTPUT,
@distinct_valuesOUT = @distinct_values OUTPUT,
@minimum_valueOUT = @minimum_value OUTPUT,
@maximum_valueOUT = @maximum_value OUTPUT;
UPDATE #T1
SET NullCount = @null_count,
DistinctValues = @distinct_values,
MinimumValue = @minimum_value,
MaximumValue = @maximum_value
WHERE ColumnName = @column_name;
FETCH NEXT FROM column_cursor
INTO @column_name, @data_type;
END
CLOSE column_cursor;
DEALLOCATE column_cursor;
SELECT * FROM #T1;
DROP TABLE #T1;")
column_metadata <- DBI::dbGetQuery(connection, column_query)
DBI::dbDisconnect(connection)
return(column_metadata)
}
|
library(shiny)
library(shinydashboard)
library(data.table)
library(DT)
library(ggplot2)
library(shinycssloaders)
library(shinydashboardPlus)
library(shinyWidgets)
library(leaflet)
library(rjson)
library(htmltools)
library(leaflet.minicharts)
library(echarts4r)
library(sparkline)
library(shinyBS)
library(shiny.i18n)
library(countup)
source(file = "01_Settings/Path.R", local = T, encoding = "UTF-8")
source(file = "02_Utils/Functions.R", local = T, encoding = "UTF-8")
source(file = "02_Utils/mapNameMap.R", local = T, encoding = "UTF-8")
source(file = "02_Utils/ConfirmedPyramidData.R", local = T, encoding = "UTF-8")
source(file = paste0(COMPONENT_PATH, "Notification.R"), local = T, encoding = "UTF-8")
source(file = paste0(PAGE_PATH, "Main/Utils/ValueBox.R"), local = T, encoding = "UTF-8")
source(file = paste0(COMPONENT_PATH, "/Main/NewsList.ui.R"), local = T, encoding = "UTF-8")
source(file = paste0(COMPONENT_PATH, "/Main/SymptomsProgression.ui.R"), local = T, encoding = "UTF-8")
source(file = paste0(COMPONENT_PATH, "/Main/ComfirmedPyramid.ui.R"), local = T, encoding = "UTF-8")
source(file = paste0(COMPONENT_PATH, "/Main/Tendency.ui.R"), local = T, encoding = "UTF-8")
# ====
# データの読み込み
# ====
i18n <- suppressWarnings(Translator$new(translation_json_path = "www/lang/translation.json"))
i18n$set_translation_language("ja")
languageSetting <- ifelse(length(i18n$translation_language) == 0, "ja", i18n$translation_language)
# マップのソースの読み込み
japanMap <- jsonlite::read_json(paste0(DATA_PATH, "Echarts/japan.json"))
# TODO ここで変換せず、ローカルで変換すべき
japanMap$features <- japanMap$features %>%
purrr::map(function(x){
x$properties$name <- convertRegionName(x$properties$nam_ja, languageSetting)
return(x)
})
byDate <- fread(paste0(DATA_PATH, "byDate.csv"), header = T)
byDate[is.na(byDate)] <- 0
byDate$date <- lapply(byDate[, 1], function(x) {
as.Date(as.character(x), format = "%Y%m%d")
})
# マップ用データ読み込み
mapData <- fread(paste0(DATA_PATH, "result.map.csv"), header = T)
# 死亡データ
death <- fread(paste0(DATA_PATH, "death.csv"))
death[is.na(death)] <- 0
# 行動歴データ
activity <- rjson::fromJSON(file = paste0(DATA_PATH, "caseMap.json"), unexpected.escape = "error")
# 経度緯度データ
position <- fread(paste0(DATA_PATH, "position.csv"))
# 厚労省の都道府県まとめデータ
detailByRegion <- fread(paste0(DATA_PATH, "detailByRegion.csv"))
detailByRegion[, 都道府県名 := gsub("県|府", "", 都道府県名)]
detailByRegion[, 都道府県名 := gsub("東京都", "東京", 都道府県名)]
detailByRegion[, 日付 := as.Date(as.character(日付), "%Y%m%d")]
# アプリ情報
# statics <- fromJSON(file = 'https://stg.covid-2019.live/ncov-static/stats.json',
# unexpected.escape = 'error')
# 国内の日報
domesticDailyReport <- fread(paste0(DATA_PATH, "domesticDailyReport.csv"))
domesticDailyReport$date <- as.Date(as.character(domesticDailyReport$date), "%Y%m%d")
setnafill(domesticDailyReport, type = "locf")
# チャーター便の日報
flightDailyReport <- fread(paste0(DATA_PATH, "flightDailyReport.csv"))
flightDailyReport$date <- as.Date(as.character(flightDailyReport$date), "%Y%m%d")
setnafill(flightDailyReport, type = "locf")
# 空港検疫の日報
airportDailyReport <- fread(paste0(DATA_PATH, "airportDailyReport.csv"))
airportDailyReport$date <- as.Date(as.character(airportDailyReport$date), "%Y%m%d")
setnafill(airportDailyReport, type = "locf")
# クルーズ船の日報
shipDailyReport <- fread(paste0(DATA_PATH, "shipDailyReport.csv"))
shipDailyReport$date <- as.Date(as.character(shipDailyReport$date), "%Y%m%d")
setnafill(shipDailyReport, type = "locf")
# 2020-04-22時点から、退院者数と死亡者数が速報値と確定値に分かれているので、それの対応
confirmingData <- fread(paste0(DATA_PATH, "confirmingData.csv"))
confirmingData$date <- as.Date(as.character(confirmingData$date), "%Y%m%d")
# 日報まとめ
dailyReport <- fread(paste0(DATA_PATH, "resultDailyReport.csv"))
dailyReport$date <- as.Date(dailyReport$date, "%Y-%m-%d")
setnafill(dailyReport, type = "locf")
# コールセンター
callCenterDailyReport <- fread(paste0(DATA_PATH, "MHLW/callCenter.csv"))
callCenterDailyReport$date <- as.Date(as.character(callCenterDailyReport$date), "%Y%m%d")
pcrByRegion <- fread(file = paste0(DATA_PATH, "MHLW/pcrByRegion.csv"))
pcrByRegion[, 日付 := as.Date(as.character(日付), "%Y%m%d")]
# 文言データ
lang <- fread(paste0(DATA_PATH, "lang.csv"))
langCode <- "ja"
# TODO 言語切り替え機能
# languageSet <- c('ja', 'cn')
# names(languageSet) <- c(lang[[langCode]][25], lang[[langCode]][26])
mhlwSummaryPath <- paste0(DATA_PATH, "/MHLW/summary.csv")
mhlwSummary <- fread(file = mhlwSummaryPath)
mhlwSummary$日付 <- as.Date(as.character(mhlwSummary$日付), "%Y%m%d")
mhlwSummary <- mhlwSummary[order(都道府県名, 日付)]
setnafill(mhlwSummary, type = "locf", cols = c("陽性者", "退院者", "検査人数"))
# ====総数基礎集計====
# 確認
TOTAL_DOMESITC <- sum(byDate[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
TOTAL_OFFICER <- sum(byDate$検疫職員) # クルーズ船関連の職員のPCR陽性数
TOTAL_FLIGHT <- sum(byDate$チャーター便) # チャーター便のPCR陽性数
TOTAL_WITHIN <- TOTAL_DOMESITC + TOTAL_OFFICER + TOTAL_FLIGHT # 日本国内事例のPCR陽性数
TOTAL_SHIP <- sum(byDate$クルーズ船) # クルーズ船のPCR陽性数
TOTAL_JAPAN <- TOTAL_WITHIN + TOTAL_SHIP + sum(byDate$伊客船) # 日本領土内のPCR陽性数
CONFIRMED_PIE_DATA <- data.table(
category = c(
lang[[langCode]][4], # 国内事例
lang[[langCode]][35], # クルーズ船
lang[[langCode]][36] # チャーター便
),
value = c(TOTAL_DOMESITC + TOTAL_OFFICER, TOTAL_SHIP, TOTAL_FLIGHT)
)
# 退院
DISCHARGE_WITHIN <- getFinalAndDiff(domesticDailyReport$discharge)
DISCHARGE_FLIGHT <- getFinalAndDiff(flightDailyReport$discharge)
DISCHARGE_SHIP <- getFinalAndDiff(shipDailyReport$discharge)
DISCHARGE_AIRPORT <- getFinalAndDiff(airportDailyReport$discharge)
CURED_PIE_DATA <- data.table(
category = c(
lang[[langCode]][4], # 国内事例
lang[[langCode]][36], # チャーター便 (無症状)
lang[[langCode]][35], # クルーズ船
"空港検疫"
),
value = c(
DISCHARGE_WITHIN$final,
DISCHARGE_FLIGHT$final,
DISCHARGE_SHIP$final,
DISCHARGE_AIRPORT$final
),
diff = c(
DISCHARGE_WITHIN$diff,
DISCHARGE_FLIGHT$diff,
DISCHARGE_SHIP$diff,
DISCHARGE_AIRPORT$diff
)
)
DISCHARGE_TOTAL <- sum(CURED_PIE_DATA$value)
DISCHARGE_TOTAL_NO_SHIP <- DISCHARGE_TOTAL - DISCHARGE_SHIP$final
DISCHARGE_DIFF <- sum(CURED_PIE_DATA$diff)
DISCHARGE_DIFF_NO_SHIP <- DISCHARGE_DIFF - DISCHARGE_SHIP$diff
# 死亡
DEATH_DOMESITC <- sum(death[, c(2:48)]) # 日本国内事例の死亡数(クルーズ船関連者除く)
DEATH_OFFICER <- sum(death[]$検疫職員) # クルーズ船関連の職員の死亡数
DEATH_FLIGHT <- sum(death$チャーター便) # チャーター便の死亡数
DEATH_WITHIN <- DEATH_DOMESITC + DEATH_OFFICER + DEATH_FLIGHT # 日本国内事例の死亡数
DEATH_SHIP <- sum(death$クルーズ船) # クルーズ船の死亡数
DEATH_JAPAN <- DEATH_WITHIN + DEATH_SHIP # 日本領土内の死亡数
DEATH_PIE_DATA <- data.table(
category = c(
lang[[langCode]][4], # 国内事例
lang[[langCode]][35], # クルーズ船
lang[[langCode]][36] # チャーター便
),
value = c(DEATH_DOMESITC + DEATH_OFFICER, DEATH_SHIP, DEATH_FLIGHT)
)
# ====本日のデータ====
# 確認
byDateToday <- byDate[nrow(byDate),] # 本日の差分データセット
todayConfirmed <- unlist(as.list(byDateToday[, 2:ncol(byDateToday)]))
HAS_TODAY_CONFIRMED <- todayConfirmed[todayConfirmed > 0] # 本日変化がある都道府県分類
deathToday <- death[nrow(byDate),] # 本日の差分データセット
todayDeath <- unlist(as.list(deathToday[, 2:ncol(deathToday)]))
HAS_TODAY_DEATH <- todayDeath[todayDeath > 0] # 本日変化がある都道府県分類
# ====前日比べの基礎集計(差分)====
# 確認
TOTAL_DOMESITC_DIFF <- sum(byDateToday[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
TOTAL_OFFICER_DIFF <- sum(byDateToday[]$検疫職員) # クルーズ船関連の職員のPCR陽性数
TOTAL_FLIGHT_DIFF <- sum(byDateToday$チャーター便) # チャーター便のPCR陽性数
TOTAL_WITHIN_DIFF <- TOTAL_DOMESITC_DIFF + TOTAL_OFFICER_DIFF + TOTAL_FLIGHT_DIFF # 日本国内事例のPCR陽性数
TOTAL_SHIP_DIFF <- sum(byDateToday$クルーズ船) # クルーズ船のPCR陽性数
TOTAL_JAPAN_DIFF <- TOTAL_WITHIN_DIFF + TOTAL_SHIP_DIFF + sum(byDateToday[, 52]) # 日本領土内のPCR陽性数
# 死亡
DEATH_DOMESITC_DIFF <- sum(deathToday[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
DEATH_OFFICER_DIFF <- sum(deathToday[]$検疫職員) # クルーズ船関連の職員のPCR陽性数
DEATH_FLIGHT_DIFF <- sum(deathToday$チャーター便) # チャーター便のPCR陽性数
DEATH_WITHIN_DIFF <- DEATH_DOMESITC_DIFF + DEATH_OFFICER_DIFF + DEATH_FLIGHT_DIFF # 日本国内事例のPCR陽性数
DEATH_SHIP_DIFF <- sum(deathToday$クルーズ船) # クルーズ船のPCR陽性数
DEATH_JAPAN_DIFF <- DEATH_WITHIN_DIFF + DEATH_SHIP_DIFF # 日本領土内のPCR陽性数
# 地域選択に表示する項目名
regionName <- colSums(byDate[, 2:ncol(byDate)])
regionNamePref <- regionName[1:47]
regionNamePref <- sort(regionNamePref[regionNamePref > 0], decreasing = T)
regionNamePrefName <- paste0(sapply(names(regionNamePref), i18n$t), " (", regionNamePref, ")")
regionNameOther <- regionName[48:length(regionName)]
regionNameOtherName <- paste0(convertRegionName(names(regionNameOther), languageSetting), " (", regionNameOther, ")")
regionName <- c("都道府県", names(regionNameOther), names(regionNamePref))
defaultSelectedRegionName <- regionName[1:3]
names(regionName) <- c(
paste0(i18n$t("都道府県合計"), " (", TOTAL_DOMESITC, ")"),
regionNameOtherName,
regionNamePrefName
)
regionName <- as.list(regionName)
news <- fread(paste0(DATA_PATH, "mhlw_houdou.csv"))
provinceCode <- fread(paste0(DATA_PATH, "prefectures.csv"))
provinceSelector <- provinceCode$id
provinceSelector <- as.list(provinceSelector)
names(provinceSelector) <- sapply(provinceCode$`name-ja`, i18n$t)
# 詳細データけんもねずみ
positiveDetail <- fread(paste0(DATA_PATH, "positiveDetail.csv"))
# 市レベルの感染者数
confirmedCityTreemapData <- fread(paste0(DATA_PATH, "Kenmo/confirmedNumberByCity.", languageSetting, ".csv"))
# 詳細データ
detail <- fread(paste0(DATA_PATH, "detail.csv"),
colClasses = list(
numeric = c(1, 2),
factor = c(5, 6, 9:11)
)
)
detailColName <- colnames(detail)
detail[, comfirmedDay := as.Date(as.character(detail$comfirmedDay), format = "%Y%m%d")]
detail[, link := as.integer(detail$link)]
detailMerged <- merge(detail, news, by.x = "link", by.y = "id")
detailMerged[, link := paste0("<a href='", detailMerged$link.y, "'>", detailMerged$title, "</a>")]
detail <- detailMerged[, detailColName, with = F][order(id)]
# 詳細データのサマリー
detailSummary <- detail[, .(count = .N), by = .(gender, age)]
# 症状の進行テーブルを読み込む
processData <- fread(input = paste0(DATA_PATH, "resultProcessData.csv"))
# ====
# 定数設定
# ====
# Real-time感染数の更新時間
UPDATE_DATETIME <- file.info(paste0(DATA_PATH, "byDate.csv"))$mtime
latestUpdateDuration <- difftime(Sys.time(), UPDATE_DATETIME)
LATEST_UPDATE <- paste0(
round(latestUpdateDuration[[1]], 0),
convertUnit2Ja(latestUpdateDuration)
)
RECOVERED_FILE_UPDATE_DATETIME <- file.info(paste0(DATA_PATH, "recovered.csv"))$mtime
DEATH_FILE_UPDATE_DATETIME <- file.info(paste0(DATA_PATH, "death.csv"))$mtime
UPDATE_DATE <- as.Date(UPDATE_DATETIME)
DEATH_UPDATE_DATE <- as.Date(DEATH_FILE_UPDATE_DATETIME)
# TODO Vectorのネーミングなぜかうまくいかないのでとりあえずここに置く
showOption <- c("showShip", "showFlight")
names(showOption) <- c(lang[[langCode]][35], lang[[langCode]][36])
twitterUrl <- paste0(
"https://twitter.com/intent/tweet?text=新型コロナウイルス感染速報:国内の感染確認",
TOTAL_JAPAN,
"人(クルーズ船含む)、",
byDate$date[nrow(byDate)],
"の現時点で新たに",
TOTAL_JAPAN_DIFF,
"人が確認されました。&hashtags=",
"新型コロナウイルス,新型コロナウイルス速報",
"&url=https://covid-2019.live/"
)
lightRed <- "#F56954"
middleRed <- "#DD4B39"
darkRed <- "#B03C2D"
lightYellow <- "#F8BF76"
middleYellow <- "#F39C11"
darkYellow <- "#DB8B0A"
lightGreen <- "#00A65A"
middleGreen <- "#01A65A"
darkGreen <- "#088448"
superDarkGreen <- "#046938"
lightNavy <- "#5A6E82"
middelNavy <- "#001F3F"
darkNavy <- "#001934"
lightGrey <- "#F5F5F5"
lightBlue <- "#7BD6F5"
middleBlue <- "#00C0EF"
darkBlue <- "#00A7D0"
options(spinner.color = middleRed)
GLOBAL_VALUE <- reactiveValues(
signateDetail = NULL,
signateDetail.ageGenderData = fread(file = paste0(DATA_PATH, "Generated/genderAgeData.csv")),
signateLink = NULL,
signatePlace = fread(file = paste0(DATA_PATH, "resultSignatePlace.csv")),
ECMO = list(
ecmoUising = NULL,
ecmo = NULL,
artificialRespirators = NULL
),
Academic = list(
onset_to_confirmed_map = NULL
),
hokkaidoData = NULL,
hokkaidoDataUpdateTime = NULL,
hokkaidoPatients = NULL,
Aomori = list(
summary = NULL,
patient = NULL,
callCenter = NULL,
contact = NULL,
updateTime = NULL
),
Kanagawa = list(
summary = NULL,
updateTime = NULL
),
Fukuoka = list(
summary = NULL,
updateTime = NULL,
patients = NULL,
nodes = NULL,
edges = NULL,
call = NULL
),
World = NULL,
Google = list(
mobility = NULL,
table = NULL
)
)
| /global.R | permissive | yuew08/2019-ncov-japan | R | false | false | 14,002 | r | library(shiny)
library(shinydashboard)
library(data.table)
library(DT)
library(ggplot2)
library(shinycssloaders)
library(shinydashboardPlus)
library(shinyWidgets)
library(leaflet)
library(rjson)
library(htmltools)
library(leaflet.minicharts)
library(echarts4r)
library(sparkline)
library(shinyBS)
library(shiny.i18n)
library(countup)
source(file = "01_Settings/Path.R", local = T, encoding = "UTF-8")
source(file = "02_Utils/Functions.R", local = T, encoding = "UTF-8")
source(file = "02_Utils/mapNameMap.R", local = T, encoding = "UTF-8")
source(file = "02_Utils/ConfirmedPyramidData.R", local = T, encoding = "UTF-8")
source(file = paste0(COMPONENT_PATH, "Notification.R"), local = T, encoding = "UTF-8")
source(file = paste0(PAGE_PATH, "Main/Utils/ValueBox.R"), local = T, encoding = "UTF-8")
source(file = paste0(COMPONENT_PATH, "/Main/NewsList.ui.R"), local = T, encoding = "UTF-8")
source(file = paste0(COMPONENT_PATH, "/Main/SymptomsProgression.ui.R"), local = T, encoding = "UTF-8")
source(file = paste0(COMPONENT_PATH, "/Main/ComfirmedPyramid.ui.R"), local = T, encoding = "UTF-8")
source(file = paste0(COMPONENT_PATH, "/Main/Tendency.ui.R"), local = T, encoding = "UTF-8")
# ====
# データの読み込み
# ====
i18n <- suppressWarnings(Translator$new(translation_json_path = "www/lang/translation.json"))
i18n$set_translation_language("ja")
languageSetting <- ifelse(length(i18n$translation_language) == 0, "ja", i18n$translation_language)
# マップのソースの読み込み
japanMap <- jsonlite::read_json(paste0(DATA_PATH, "Echarts/japan.json"))
# TODO ここで変換せず、ローカルで変換すべき
japanMap$features <- japanMap$features %>%
purrr::map(function(x){
x$properties$name <- convertRegionName(x$properties$nam_ja, languageSetting)
return(x)
})
byDate <- fread(paste0(DATA_PATH, "byDate.csv"), header = T)
byDate[is.na(byDate)] <- 0
byDate$date <- lapply(byDate[, 1], function(x) {
as.Date(as.character(x), format = "%Y%m%d")
})
# マップ用データ読み込み
mapData <- fread(paste0(DATA_PATH, "result.map.csv"), header = T)
# 死亡データ
death <- fread(paste0(DATA_PATH, "death.csv"))
death[is.na(death)] <- 0
# 行動歴データ
activity <- rjson::fromJSON(file = paste0(DATA_PATH, "caseMap.json"), unexpected.escape = "error")
# 経度緯度データ
position <- fread(paste0(DATA_PATH, "position.csv"))
# 厚労省の都道府県まとめデータ
detailByRegion <- fread(paste0(DATA_PATH, "detailByRegion.csv"))
detailByRegion[, 都道府県名 := gsub("県|府", "", 都道府県名)]
detailByRegion[, 都道府県名 := gsub("東京都", "東京", 都道府県名)]
detailByRegion[, 日付 := as.Date(as.character(日付), "%Y%m%d")]
# アプリ情報
# statics <- fromJSON(file = 'https://stg.covid-2019.live/ncov-static/stats.json',
# unexpected.escape = 'error')
# 国内の日報
domesticDailyReport <- fread(paste0(DATA_PATH, "domesticDailyReport.csv"))
domesticDailyReport$date <- as.Date(as.character(domesticDailyReport$date), "%Y%m%d")
setnafill(domesticDailyReport, type = "locf")
# チャーター便の日報
flightDailyReport <- fread(paste0(DATA_PATH, "flightDailyReport.csv"))
flightDailyReport$date <- as.Date(as.character(flightDailyReport$date), "%Y%m%d")
setnafill(flightDailyReport, type = "locf")
# 空港検疫の日報
airportDailyReport <- fread(paste0(DATA_PATH, "airportDailyReport.csv"))
airportDailyReport$date <- as.Date(as.character(airportDailyReport$date), "%Y%m%d")
setnafill(airportDailyReport, type = "locf")
# クルーズ船の日報
shipDailyReport <- fread(paste0(DATA_PATH, "shipDailyReport.csv"))
shipDailyReport$date <- as.Date(as.character(shipDailyReport$date), "%Y%m%d")
setnafill(shipDailyReport, type = "locf")
# 2020-04-22時点から、退院者数と死亡者数が速報値と確定値に分かれているので、それの対応
confirmingData <- fread(paste0(DATA_PATH, "confirmingData.csv"))
confirmingData$date <- as.Date(as.character(confirmingData$date), "%Y%m%d")
# 日報まとめ
dailyReport <- fread(paste0(DATA_PATH, "resultDailyReport.csv"))
dailyReport$date <- as.Date(dailyReport$date, "%Y-%m-%d")
setnafill(dailyReport, type = "locf")
# コールセンター
callCenterDailyReport <- fread(paste0(DATA_PATH, "MHLW/callCenter.csv"))
callCenterDailyReport$date <- as.Date(as.character(callCenterDailyReport$date), "%Y%m%d")
pcrByRegion <- fread(file = paste0(DATA_PATH, "MHLW/pcrByRegion.csv"))
pcrByRegion[, 日付 := as.Date(as.character(日付), "%Y%m%d")]
# 文言データ
lang <- fread(paste0(DATA_PATH, "lang.csv"))
langCode <- "ja"
# TODO 言語切り替え機能
# languageSet <- c('ja', 'cn')
# names(languageSet) <- c(lang[[langCode]][25], lang[[langCode]][26])
mhlwSummaryPath <- paste0(DATA_PATH, "/MHLW/summary.csv")
mhlwSummary <- fread(file = mhlwSummaryPath)
mhlwSummary$日付 <- as.Date(as.character(mhlwSummary$日付), "%Y%m%d")
mhlwSummary <- mhlwSummary[order(都道府県名, 日付)]
setnafill(mhlwSummary, type = "locf", cols = c("陽性者", "退院者", "検査人数"))
# ====総数基礎集計====
# 確認
TOTAL_DOMESITC <- sum(byDate[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
TOTAL_OFFICER <- sum(byDate$検疫職員) # クルーズ船関連の職員のPCR陽性数
TOTAL_FLIGHT <- sum(byDate$チャーター便) # チャーター便のPCR陽性数
TOTAL_WITHIN <- TOTAL_DOMESITC + TOTAL_OFFICER + TOTAL_FLIGHT # 日本国内事例のPCR陽性数
TOTAL_SHIP <- sum(byDate$クルーズ船) # クルーズ船のPCR陽性数
TOTAL_JAPAN <- TOTAL_WITHIN + TOTAL_SHIP + sum(byDate$伊客船) # 日本領土内のPCR陽性数
CONFIRMED_PIE_DATA <- data.table(
category = c(
lang[[langCode]][4], # 国内事例
lang[[langCode]][35], # クルーズ船
lang[[langCode]][36] # チャーター便
),
value = c(TOTAL_DOMESITC + TOTAL_OFFICER, TOTAL_SHIP, TOTAL_FLIGHT)
)
# 退院
DISCHARGE_WITHIN <- getFinalAndDiff(domesticDailyReport$discharge)
DISCHARGE_FLIGHT <- getFinalAndDiff(flightDailyReport$discharge)
DISCHARGE_SHIP <- getFinalAndDiff(shipDailyReport$discharge)
DISCHARGE_AIRPORT <- getFinalAndDiff(airportDailyReport$discharge)
CURED_PIE_DATA <- data.table(
category = c(
lang[[langCode]][4], # 国内事例
lang[[langCode]][36], # チャーター便 (無症状)
lang[[langCode]][35], # クルーズ船
"空港検疫"
),
value = c(
DISCHARGE_WITHIN$final,
DISCHARGE_FLIGHT$final,
DISCHARGE_SHIP$final,
DISCHARGE_AIRPORT$final
),
diff = c(
DISCHARGE_WITHIN$diff,
DISCHARGE_FLIGHT$diff,
DISCHARGE_SHIP$diff,
DISCHARGE_AIRPORT$diff
)
)
DISCHARGE_TOTAL <- sum(CURED_PIE_DATA$value)
DISCHARGE_TOTAL_NO_SHIP <- DISCHARGE_TOTAL - DISCHARGE_SHIP$final
DISCHARGE_DIFF <- sum(CURED_PIE_DATA$diff)
DISCHARGE_DIFF_NO_SHIP <- DISCHARGE_DIFF - DISCHARGE_SHIP$diff
# 死亡
DEATH_DOMESITC <- sum(death[, c(2:48)]) # 日本国内事例の死亡数(クルーズ船関連者除く)
DEATH_OFFICER <- sum(death[]$検疫職員) # クルーズ船関連の職員の死亡数
DEATH_FLIGHT <- sum(death$チャーター便) # チャーター便の死亡数
DEATH_WITHIN <- DEATH_DOMESITC + DEATH_OFFICER + DEATH_FLIGHT # 日本国内事例の死亡数
DEATH_SHIP <- sum(death$クルーズ船) # クルーズ船の死亡数
DEATH_JAPAN <- DEATH_WITHIN + DEATH_SHIP # 日本領土内の死亡数
DEATH_PIE_DATA <- data.table(
category = c(
lang[[langCode]][4], # 国内事例
lang[[langCode]][35], # クルーズ船
lang[[langCode]][36] # チャーター便
),
value = c(DEATH_DOMESITC + DEATH_OFFICER, DEATH_SHIP, DEATH_FLIGHT)
)
# ====本日のデータ====
# 確認
byDateToday <- byDate[nrow(byDate),] # 本日の差分データセット
todayConfirmed <- unlist(as.list(byDateToday[, 2:ncol(byDateToday)]))
HAS_TODAY_CONFIRMED <- todayConfirmed[todayConfirmed > 0] # 本日変化がある都道府県分類
deathToday <- death[nrow(byDate),] # 本日の差分データセット
todayDeath <- unlist(as.list(deathToday[, 2:ncol(deathToday)]))
HAS_TODAY_DEATH <- todayDeath[todayDeath > 0] # 本日変化がある都道府県分類
# ====前日比べの基礎集計(差分)====
# 確認
TOTAL_DOMESITC_DIFF <- sum(byDateToday[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
TOTAL_OFFICER_DIFF <- sum(byDateToday[]$検疫職員) # クルーズ船関連の職員のPCR陽性数
TOTAL_FLIGHT_DIFF <- sum(byDateToday$チャーター便) # チャーター便のPCR陽性数
TOTAL_WITHIN_DIFF <- TOTAL_DOMESITC_DIFF + TOTAL_OFFICER_DIFF + TOTAL_FLIGHT_DIFF # 日本国内事例のPCR陽性数
TOTAL_SHIP_DIFF <- sum(byDateToday$クルーズ船) # クルーズ船のPCR陽性数
TOTAL_JAPAN_DIFF <- TOTAL_WITHIN_DIFF + TOTAL_SHIP_DIFF + sum(byDateToday[, 52]) # 日本領土内のPCR陽性数
# 死亡
DEATH_DOMESITC_DIFF <- sum(deathToday[, c(2:48)]) # 日本国内事例のPCR陽性数(クルーズ船関連者除く)
DEATH_OFFICER_DIFF <- sum(deathToday[]$検疫職員) # クルーズ船関連の職員のPCR陽性数
DEATH_FLIGHT_DIFF <- sum(deathToday$チャーター便) # チャーター便のPCR陽性数
DEATH_WITHIN_DIFF <- DEATH_DOMESITC_DIFF + DEATH_OFFICER_DIFF + DEATH_FLIGHT_DIFF # 日本国内事例のPCR陽性数
DEATH_SHIP_DIFF <- sum(deathToday$クルーズ船) # クルーズ船のPCR陽性数
DEATH_JAPAN_DIFF <- DEATH_WITHIN_DIFF + DEATH_SHIP_DIFF # 日本領土内のPCR陽性数
# 地域選択に表示する項目名
regionName <- colSums(byDate[, 2:ncol(byDate)])
regionNamePref <- regionName[1:47]
regionNamePref <- sort(regionNamePref[regionNamePref > 0], decreasing = T)
regionNamePrefName <- paste0(sapply(names(regionNamePref), i18n$t), " (", regionNamePref, ")")
regionNameOther <- regionName[48:length(regionName)]
regionNameOtherName <- paste0(convertRegionName(names(regionNameOther), languageSetting), " (", regionNameOther, ")")
regionName <- c("都道府県", names(regionNameOther), names(regionNamePref))
defaultSelectedRegionName <- regionName[1:3]
names(regionName) <- c(
paste0(i18n$t("都道府県合計"), " (", TOTAL_DOMESITC, ")"),
regionNameOtherName,
regionNamePrefName
)
regionName <- as.list(regionName)
news <- fread(paste0(DATA_PATH, "mhlw_houdou.csv"))
provinceCode <- fread(paste0(DATA_PATH, "prefectures.csv"))
provinceSelector <- provinceCode$id
provinceSelector <- as.list(provinceSelector)
names(provinceSelector) <- sapply(provinceCode$`name-ja`, i18n$t)
# 詳細データけんもねずみ
positiveDetail <- fread(paste0(DATA_PATH, "positiveDetail.csv"))
# 市レベルの感染者数
confirmedCityTreemapData <- fread(paste0(DATA_PATH, "Kenmo/confirmedNumberByCity.", languageSetting, ".csv"))
# 詳細データ
detail <- fread(paste0(DATA_PATH, "detail.csv"),
colClasses = list(
numeric = c(1, 2),
factor = c(5, 6, 9:11)
)
)
detailColName <- colnames(detail)
detail[, comfirmedDay := as.Date(as.character(detail$comfirmedDay), format = "%Y%m%d")]
detail[, link := as.integer(detail$link)]
detailMerged <- merge(detail, news, by.x = "link", by.y = "id")
detailMerged[, link := paste0("<a href='", detailMerged$link.y, "'>", detailMerged$title, "</a>")]
detail <- detailMerged[, detailColName, with = F][order(id)]
# 詳細データのサマリー
detailSummary <- detail[, .(count = .N), by = .(gender, age)]
# 症状の進行テーブルを読み込む
processData <- fread(input = paste0(DATA_PATH, "resultProcessData.csv"))
# ====
# 定数設定
# ====
# Real-time感染数の更新時間
UPDATE_DATETIME <- file.info(paste0(DATA_PATH, "byDate.csv"))$mtime
latestUpdateDuration <- difftime(Sys.time(), UPDATE_DATETIME)
LATEST_UPDATE <- paste0(
round(latestUpdateDuration[[1]], 0),
convertUnit2Ja(latestUpdateDuration)
)
RECOVERED_FILE_UPDATE_DATETIME <- file.info(paste0(DATA_PATH, "recovered.csv"))$mtime
DEATH_FILE_UPDATE_DATETIME <- file.info(paste0(DATA_PATH, "death.csv"))$mtime
UPDATE_DATE <- as.Date(UPDATE_DATETIME)
DEATH_UPDATE_DATE <- as.Date(DEATH_FILE_UPDATE_DATETIME)
# TODO Vectorのネーミングなぜかうまくいかないのでとりあえずここに置く
showOption <- c("showShip", "showFlight")
names(showOption) <- c(lang[[langCode]][35], lang[[langCode]][36])
twitterUrl <- paste0(
"https://twitter.com/intent/tweet?text=新型コロナウイルス感染速報:国内の感染確認",
TOTAL_JAPAN,
"人(クルーズ船含む)、",
byDate$date[nrow(byDate)],
"の現時点で新たに",
TOTAL_JAPAN_DIFF,
"人が確認されました。&hashtags=",
"新型コロナウイルス,新型コロナウイルス速報",
"&url=https://covid-2019.live/"
)
lightRed <- "#F56954"
middleRed <- "#DD4B39"
darkRed <- "#B03C2D"
lightYellow <- "#F8BF76"
middleYellow <- "#F39C11"
darkYellow <- "#DB8B0A"
lightGreen <- "#00A65A"
middleGreen <- "#01A65A"
darkGreen <- "#088448"
superDarkGreen <- "#046938"
lightNavy <- "#5A6E82"
middelNavy <- "#001F3F"
darkNavy <- "#001934"
lightGrey <- "#F5F5F5"
lightBlue <- "#7BD6F5"
middleBlue <- "#00C0EF"
darkBlue <- "#00A7D0"
options(spinner.color = middleRed)
GLOBAL_VALUE <- reactiveValues(
signateDetail = NULL,
signateDetail.ageGenderData = fread(file = paste0(DATA_PATH, "Generated/genderAgeData.csv")),
signateLink = NULL,
signatePlace = fread(file = paste0(DATA_PATH, "resultSignatePlace.csv")),
ECMO = list(
ecmoUising = NULL,
ecmo = NULL,
artificialRespirators = NULL
),
Academic = list(
onset_to_confirmed_map = NULL
),
hokkaidoData = NULL,
hokkaidoDataUpdateTime = NULL,
hokkaidoPatients = NULL,
Aomori = list(
summary = NULL,
patient = NULL,
callCenter = NULL,
contact = NULL,
updateTime = NULL
),
Kanagawa = list(
summary = NULL,
updateTime = NULL
),
Fukuoka = list(
summary = NULL,
updateTime = NULL,
patients = NULL,
nodes = NULL,
edges = NULL,
call = NULL
),
World = NULL,
Google = list(
mobility = NULL,
table = NULL
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recurrent.marginal.R
\name{simRecurrentII}
\alias{simRecurrentII}
\title{Simulation of recurrent events data based on cumulative hazards II}
\usage{
simRecurrentII(
n,
cumhaz,
cumhaz2,
death.cumhaz = NULL,
r1 = NULL,
r2 = NULL,
rd = NULL,
rc = NULL,
gap.time = FALSE,
max.recurrent = 100,
dhaz = NULL,
haz2 = NULL,
dependence = 0,
var.z = 0.22,
cor.mat = NULL,
cens = NULL,
...
)
}
\arguments{
\item{n}{number of id's}
\item{cumhaz}{cumulative hazard of recurrent events}
\item{cumhaz2}{cumulative hazard of recurrent events of type 2}
\item{death.cumhaz}{cumulative hazard of death}
\item{r1}{potential relative risk adjustment of rate}
\item{r2}{potential relative risk adjustment of rate}
\item{rd}{potential relative risk adjustment of rate}
\item{rc}{potential relative risk adjustment of rate}
\item{gap.time}{if true simulates gap-times with specified cumulative hazard}
\item{max.recurrent}{limits number recurrent events to 100}
\item{dhaz}{rate for death hazard if it is extended to time-range of first event}
\item{haz2}{rate of second cause if it is extended to time-range of first event}
\item{dependence}{0:independence; 1:all share same random effect with variance var.z; 2:random effect exp(normal) with correlation structure from cor.mat; 3:additive gamma distributed random effects, z1= (z11+ z12)/2 such that mean is 1 , z2= (z11^cor.mat(1,2)+ z13)/2, z3= (z12^(cor.mat(2,3)+z13^cor.mat(1,3))/2, with z11 z12 z13 are gamma with mean and variance 1 , first random effect is z1 and for N1 second random effect is z2 and for N2 third random effect is for death}
\item{var.z}{variance of random effects}
\item{cor.mat}{correlation matrix for var.z variance of random effects}
\item{cens}{rate of censoring exponential distribution}
\item{...}{Additional arguments to lower level funtions}
}
\description{
Simulation of recurrent events data based on cumulative hazards
}
\details{
Must give hazard of death and two recurrent events. Possible with two
event types and their dependence can be specified but the two recurrent events need
to share random effect. Based on drawing the from cumhaz and cumhaz2 and
taking the first event rather
the cumulative and then distributing it out. Key advantage of this is that
there is more flexibility wrt random effects
}
\examples{
########################################
## getting some rates to mimick
########################################
data(base1cumhaz)
data(base4cumhaz)
data(drcumhaz)
dr <- drcumhaz
base1 <- base1cumhaz
base4 <- base4cumhaz
cor.mat <- corM <- rbind(c(1.0, 0.6, 0.9), c(0.6, 1.0, 0.5), c(0.9, 0.5, 1.0))
######################################################################
### simulating simple model that mimicks data
### now with two event types and second type has same rate as death rate
######################################################################
set.seed(100)
rr <- simRecurrentII(1000,base1,base4,death.cumhaz=dr)
dtable(rr,~death+status)
par(mfrow=c(2,2))
showfitsim(causes=2,rr,dr,base1,base4)
}
\author{
Thomas Scheike
}
| /man/simRecurrentII.Rd | no_license | cran/mets | R | false | true | 3,168 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recurrent.marginal.R
\name{simRecurrentII}
\alias{simRecurrentII}
\title{Simulation of recurrent events data based on cumulative hazards II}
\usage{
simRecurrentII(
n,
cumhaz,
cumhaz2,
death.cumhaz = NULL,
r1 = NULL,
r2 = NULL,
rd = NULL,
rc = NULL,
gap.time = FALSE,
max.recurrent = 100,
dhaz = NULL,
haz2 = NULL,
dependence = 0,
var.z = 0.22,
cor.mat = NULL,
cens = NULL,
...
)
}
\arguments{
\item{n}{number of id's}
\item{cumhaz}{cumulative hazard of recurrent events}
\item{cumhaz2}{cumulative hazard of recurrent events of type 2}
\item{death.cumhaz}{cumulative hazard of death}
\item{r1}{potential relative risk adjustment of rate}
\item{r2}{potential relative risk adjustment of rate}
\item{rd}{potential relative risk adjustment of rate}
\item{rc}{potential relative risk adjustment of rate}
\item{gap.time}{if true simulates gap-times with specified cumulative hazard}
\item{max.recurrent}{limits number recurrent events to 100}
\item{dhaz}{rate for death hazard if it is extended to time-range of first event}
\item{haz2}{rate of second cause if it is extended to time-range of first event}
\item{dependence}{0:independence; 1:all share same random effect with variance var.z; 2:random effect exp(normal) with correlation structure from cor.mat; 3:additive gamma distributed random effects, z1= (z11+ z12)/2 such that mean is 1 , z2= (z11^cor.mat(1,2)+ z13)/2, z3= (z12^(cor.mat(2,3)+z13^cor.mat(1,3))/2, with z11 z12 z13 are gamma with mean and variance 1 , first random effect is z1 and for N1 second random effect is z2 and for N2 third random effect is for death}
\item{var.z}{variance of random effects}
\item{cor.mat}{correlation matrix for var.z variance of random effects}
\item{cens}{rate of censoring exponential distribution}
\item{...}{Additional arguments to lower level funtions}
}
\description{
Simulation of recurrent events data based on cumulative hazards
}
\details{
Must give hazard of death and two recurrent events. Possible with two
event types and their dependence can be specified but the two recurrent events need
to share random effect. Based on drawing the from cumhaz and cumhaz2 and
taking the first event rather
the cumulative and then distributing it out. Key advantage of this is that
there is more flexibility wrt random effects
}
\examples{
########################################
## getting some rates to mimick
########################################
data(base1cumhaz)
data(base4cumhaz)
data(drcumhaz)
dr <- drcumhaz
base1 <- base1cumhaz
base4 <- base4cumhaz
cor.mat <- corM <- rbind(c(1.0, 0.6, 0.9), c(0.6, 1.0, 0.5), c(0.9, 0.5, 1.0))
######################################################################
### simulating simple model that mimicks data
### now with two event types and second type has same rate as death rate
######################################################################
set.seed(100)
rr <- simRecurrentII(1000,base1,base4,death.cumhaz=dr)
dtable(rr,~death+status)
par(mfrow=c(2,2))
showfitsim(causes=2,rr,dr,base1,base4)
}
\author{
Thomas Scheike
}
|
../../../../System/Library/Frameworks/ApplicationServices.framework/Frameworks/AE.framework/Headers/AEObjects.r | /MacOSX10.1.5.sdk/Developer/Headers/CFMCarbon/AE/AEObjects.r | no_license | alexey-lysiuk/macos-sdk | R | false | false | 111 | r | ../../../../System/Library/Frameworks/ApplicationServices.framework/Frameworks/AE.framework/Headers/AEObjects.r |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/messaging-package.R
\docType{package}
\name{messaging-package}
\alias{messaging}
\alias{messaging-package}
\title{messaging: Conveniently Issue Messages, Warnings, and Errors}
\description{
Provides tools for creating and issuing nicely-formatted
text within R diagnostic messages and those messages given during
warnings and errors. The formatting of the messages can be
customized using templating features. Issues with singular and
plural forms can be handled through specialized syntax.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/rich-iannone/messaging}
\item Report bugs at \url{https://github.com/rich-iannone/messaging/issues}
}
}
\author{
\strong{Maintainer}: Richard Iannone \email{riannone@me.com} (0000-0003-3925-190X)
}
\keyword{internal}
| /man/messaging-package.Rd | permissive | aespar21/messaging | R | false | true | 858 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/messaging-package.R
\docType{package}
\name{messaging-package}
\alias{messaging}
\alias{messaging-package}
\title{messaging: Conveniently Issue Messages, Warnings, and Errors}
\description{
Provides tools for creating and issuing nicely-formatted
text within R diagnostic messages and those messages given during
warnings and errors. The formatting of the messages can be
customized using templating features. Issues with singular and
plural forms can be handled through specialized syntax.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/rich-iannone/messaging}
\item Report bugs at \url{https://github.com/rich-iannone/messaging/issues}
}
}
\author{
\strong{Maintainer}: Richard Iannone \email{riannone@me.com} (0000-0003-3925-190X)
}
\keyword{internal}
|
rd <- function(i, data) {
d <- read.table(paste("/home/andrew/projects/personal/particl/data/diff-a-",
i, ".txt", sep=""), header=FALSE)
diff <- d[[1]]
l <- length(diff)
m <- mean(diff)
s <- sd(diff)
x <- data$x
x <- append(x, (sort(diff)-m)/s)
y <- data$y
y <- append(y, (1:l)/l)
n <- data$n
n <- append(n, rep(i, l))
list(n=n, x=x, y=y)
}
data <- list(n=c(), x=c(), y=c())
ns = c(5,6,7,8,9,10,15,20,25,30)
for (n in ns) data <- rd(n, data)
data <- data.frame(n=data$n, x=data$x, y=data$y)
require(ggplot2)
pdf("/home/andrew/projects/personal/particl/doc/ecdf-1.pdf",
width=3, height=3)
qplot(x, y, data=data, geom="step", colour=factor(n),
xlab='Normalized difference', ylab='ECDF')+
scale_colour_grey(end=0.7,start=0,name='size')
dev.off()
pdf("/home/andrew/projects/personal/particl/doc/ecdf-2.pdf",
width=3, height=3)
qplot(x, y, data=data, geom="step", colour=factor(n),
xlim=c(-3, -2), ylim=c(0, 0.03),
xlab='Normalized difference', ylab='ECDF')+
scale_colour_grey(end=0.7,start=0,name='size')
dev.off()
pdf("/home/andrew/projects/personal/particl/doc/ecdf-3.pdf",
width=3, height=3)
qplot(x, y, data=data, geom="step", colour=factor(n),
xlim=c(-3, -2.8), ylim=c(0.0025, 0.0045),
xlab='Normalized difference', ylab='ECDF')+
scale_colour_grey(end=0.7,start=0,name='size')
dev.off()
| /analysis/histogram.r | no_license | andrewcooke/particl | R | false | false | 1,391 | r |
rd <- function(i, data) {
d <- read.table(paste("/home/andrew/projects/personal/particl/data/diff-a-",
i, ".txt", sep=""), header=FALSE)
diff <- d[[1]]
l <- length(diff)
m <- mean(diff)
s <- sd(diff)
x <- data$x
x <- append(x, (sort(diff)-m)/s)
y <- data$y
y <- append(y, (1:l)/l)
n <- data$n
n <- append(n, rep(i, l))
list(n=n, x=x, y=y)
}
data <- list(n=c(), x=c(), y=c())
ns = c(5,6,7,8,9,10,15,20,25,30)
for (n in ns) data <- rd(n, data)
data <- data.frame(n=data$n, x=data$x, y=data$y)
require(ggplot2)
pdf("/home/andrew/projects/personal/particl/doc/ecdf-1.pdf",
width=3, height=3)
qplot(x, y, data=data, geom="step", colour=factor(n),
xlab='Normalized difference', ylab='ECDF')+
scale_colour_grey(end=0.7,start=0,name='size')
dev.off()
pdf("/home/andrew/projects/personal/particl/doc/ecdf-2.pdf",
width=3, height=3)
qplot(x, y, data=data, geom="step", colour=factor(n),
xlim=c(-3, -2), ylim=c(0, 0.03),
xlab='Normalized difference', ylab='ECDF')+
scale_colour_grey(end=0.7,start=0,name='size')
dev.off()
pdf("/home/andrew/projects/personal/particl/doc/ecdf-3.pdf",
width=3, height=3)
qplot(x, y, data=data, geom="step", colour=factor(n),
xlim=c(-3, -2.8), ylim=c(0.0025, 0.0045),
xlab='Normalized difference', ylab='ECDF')+
scale_colour_grey(end=0.7,start=0,name='size')
dev.off()
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/lymphoid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.5,family="gaussian",standardize=FALSE)
sink('./lymphoid_060.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/lymphoid/lymphoid_060.R | no_license | esbgkannan/QSMART | R | false | false | 350 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/lymphoid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.5,family="gaussian",standardize=FALSE)
sink('./lymphoid_060.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
## Computing the inverse of a matrix
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | eunbeecho/ProgrammingAssignment2 | R | false | false | 1,054 | r | ## Computing the inverse of a matrix
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
#' Convert an annotations list to a pairwise data frame
#'
#' Convert a list of gene sets (for instance, where each entry corresponds to a
#' complex and contains all the protein subunits of that complex) into a data
#' frame enumerating all of the possible pairs within each list item.
#'
#' @param ann a list of gene sets, e.g. as returned by \link{as_annotation_list}
#' @return a data frame with two columns, \code{'protein_A'} and
#' \code{'protein_B'}, containing all unique pairs of protiens found within
#' the same gene set
#'
#' @importFrom tidyr crossing
#' @importFrom dplyr filter distinct
#' @importFrom purrr map_dfr
#' @importFrom magrittr %>%
#'
#' @export
to_pairwise_df = function(ann) {
ann %>%
map_dfr(~ tidyr::crossing(protein_A = ., protein_B = .),
.id = 'complex') %>%
filter(protein_A < protein_B) %>%
distinct(protein_A, protein_B)
}
| /R/to_pairwise_df.R | permissive | fosterlab/CFTK | R | false | false | 897 | r | #' Convert an annotations list to a pairwise data frame
#'
#' Convert a list of gene sets (for instance, where each entry corresponds to a
#' complex and contains all the protein subunits of that complex) into a data
#' frame enumerating all of the possible pairs within each list item.
#'
#' @param ann a list of gene sets, e.g. as returned by \link{as_annotation_list}
#' @return a data frame with two columns, \code{'protein_A'} and
#' \code{'protein_B'}, containing all unique pairs of protiens found within
#' the same gene set
#'
#' @importFrom tidyr crossing
#' @importFrom dplyr filter distinct
#' @importFrom purrr map_dfr
#' @importFrom magrittr %>%
#'
#' @export
to_pairwise_df = function(ann) {
ann %>%
map_dfr(~ tidyr::crossing(protein_A = ., protein_B = .),
.id = 'complex') %>%
filter(protein_A < protein_B) %>%
distinct(protein_A, protein_B)
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{spend.EstimateParameters}
\alias{spend.EstimateParameters}
\title{Spend Parameter Estimation}
\usage{
spend.EstimateParameters(m.x.vector, x.vector, par.start = c(1, 1, 1),
max.param.value = 10000)
}
\arguments{
\item{m.x.vector}{a vector with each customer's average observed
transaction value in the calibration period.}
\item{x.vector}{a vector with the number of transactions each
customer made in the calibration period. Must correspond to
m.x.vector in terms of ordering of customers and length of the
vector.}
\item{par.start}{initial vector of gamma-gamma parameters: p, q,
and gamma, in that order. p is the shape parameter for each
transaction. The scale parameter for each transaction is
distributed across customers according to a gamma distribution
with parameters q (shape) and gamma (scale).}
\item{max.param.value}{the upper bound on parameters.}
}
\value{
Vector of estimated parameters.
}
\description{
Estimates parameters for the gamma-gamma spend model.
}
\details{
The best-fitting parameters are determined using the \code{\link{spend.LL}}
function. The sum of the log-likelihood for each customer (for a
set of parameters) is maximized in order to estimate parameters.
A set of starting parameters must be provided for this method. If
no parameters are provided, (1,1,1,1) is used as a default. It may
be necessary to run the estimation from multiple starting points
to ensure that it converges. To compare the log-likelihoods of
different parameters, use \code{\link{spend.LL}}.
The lower bound on the parameters to be estimated is always zero,
since gamma-gamma parameters cannot be negative. The upper bound
can be set with the max.param.value parameter.
}
\examples{
data(cdnowSummary)
ave.spend <- cdnowSummary$m.x
tot.trans <- cdnowSummary$cbs[,"x"]
# There will be many warnings due to the zeroes that are
# included in the data above. To avoid them, use the following:
# (see example for spend.LL)
ave.spend <- ave.spend[which(tot.trans > 0)]
tot.trans <- tot.trans[which(tot.trans > 0)]
# We will let the spend function use default starting parameters
spend.EstimateParameters(ave.spend, tot.trans)
}
| /man/spend.EstimateParameters.Rd | no_license | jamespaul007/BTYD | R | false | false | 2,204 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{spend.EstimateParameters}
\alias{spend.EstimateParameters}
\title{Spend Parameter Estimation}
\usage{
spend.EstimateParameters(m.x.vector, x.vector, par.start = c(1, 1, 1),
max.param.value = 10000)
}
\arguments{
\item{m.x.vector}{a vector with each customer's average observed
transaction value in the calibration period.}
\item{x.vector}{a vector with the number of transactions each
customer made in the calibration period. Must correspond to
m.x.vector in terms of ordering of customers and length of the
vector.}
\item{par.start}{initial vector of gamma-gamma parameters: p, q,
and gamma, in that order. p is the shape parameter for each
transaction. The scale parameter for each transaction is
distributed across customers according to a gamma distribution
with parameters q (shape) and gamma (scale).}
\item{max.param.value}{the upper bound on parameters.}
}
\value{
Vector of estimated parameters.
}
\description{
Estimates parameters for the gamma-gamma spend model.
}
\details{
The best-fitting parameters are determined using the \code{\link{spend.LL}}
function. The sum of the log-likelihood for each customer (for a
set of parameters) is maximized in order to estimate parameters.
A set of starting parameters must be provided for this method. If
no parameters are provided, (1,1,1,1) is used as a default. It may
be necessary to run the estimation from multiple starting points
to ensure that it converges. To compare the log-likelihoods of
different parameters, use \code{\link{spend.LL}}.
The lower bound on the parameters to be estimated is always zero,
since gamma-gamma parameters cannot be negative. The upper bound
can be set with the max.param.value parameter.
}
\examples{
data(cdnowSummary)
ave.spend <- cdnowSummary$m.x
tot.trans <- cdnowSummary$cbs[,"x"]
# There will be many warnings due to the zeroes that are
# included in the data above. To avoid them, use the following:
# (see example for spend.LL)
ave.spend <- ave.spend[which(tot.trans > 0)]
tot.trans <- tot.trans[which(tot.trans > 0)]
# We will let the spend function use default starting parameters
spend.EstimateParameters(ave.spend, tot.trans)
}
|
source(system.file(file.path('tests', 'test_utils.R'), package = 'nimble'))
context("Testing of default MCMC")
### TODO: add in the special cases for dipper
if(FALSE) { # template for running JAGS for comparison
require(R2jags)
dir = system.file(file.path('classic-bugs', 'vol2', 'air'), package = 'nimble')
data = new.env(); inits = new.env()
source(file.path(dir, 'air-data.R'), data)
source(file.path(dir, 'air-init.R'), inits)
data = as.list(data)
inits = list(as.list(inits))
out1 <- jags(data = data, inits = inits,
parameters.to.save = c('X','theta'), n.chains = 1,
n.iter = 100000, n.burnin = 50000, n.thin = 1, model.file = file.path(dir, 'air.bug'),
DIC = FALSE, jags.seed = 0)
out <- as.mcmc(out1)
}
if(FALSE) {
allModels <- c(# vol1
'blocker', 'bones', 'dyes', 'equiv', 'line', 'oxford', 'pump', 'rats', 'seeds',
# 'bones',
# vol2
'dugongs')
sapply(allModels, test_mcmc, numItsC = 1000)
}
### Beginning of actual tests
test_mcmc('blocker', numItsC = 1000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('bones', numItsC = 10000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('dyes', numItsC = 1000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('equiv', numItsC = 1000, resampleData = TRUE)
# looks good
# testing: tau[2]=97.95, 198.8 ; tau[1]=102.2,55
# phi = -.008,.052; pi = -.1805,.052
test_mcmc('line', numItsC = 1000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('oxford', numItsC = 1000, resampleData = TRUE)
# probably ok; seems to overcover for 'b', but 'b' in this
# parameteriz'n is a top-level node and the multiplic'n
# by sigma seems to lead to frequentist overcoverage
# similar results in JAGS
test_mcmc('pump', numItsC = 1000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('rats', numItsC = 1000, resampleData = TRUE)
# 93.8% coverage; looks fine and compares well to JAGS
# however in resampleData, one of the taus wildly misses
test_mcmc('seeds', numItsC = 1000, resampleData = TRUE)
# fine
test_mcmc('dugongs', numItsC = 1000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('epil', model = 'epil2.bug', inits = 'epil-inits.R',
data = 'epil-data.R', numItsC = 1000, resampleData = TRUE)
# looks ok
test_mcmc('epil', model = 'epil3.bug', inits = 'epil-inits.R',
data = 'epil-data.R', numItsC = 1000, resampleData = TRUE)
# looks ok
test_mcmc('seeds', model = 'seedsuni.bug', inits = 'seeds-init.R',
data = 'seeds-data.R', numItsC = 1000, resampleData = TRUE)
# looks fine - intervals for b's seem a bit large but probably ok
# particularly since default seeds.bug seems fine
# results compared to JAGS look fine
test_mcmc('seeds', model = 'seedssig.bug', inits = 'seeds-init.R',
data = 'seeds-data.R', numItsC = 1000, resampleData = TRUE)
# looks fine - intervals for b's seem a bit large but probably ok
test_mcmc('birats', model = 'birats1.bug', inits = 'birats-inits.R',
data = 'birats-data.R', numItsC = 1000, resampleData = TRUE)
# seems fine
test_mcmc('birats', model = 'birats3.bug', inits = 'birats-inits.R',
data = 'birats-data.R', numItsC = 1000, resampleData = TRUE)
# seems fine
test_mcmc('birats', model = 'birats2.bug', inits = 'birats-inits.R',
data = 'birats-data.R', numItsC = 1000, resampleData = TRUE)
# looks fine now that values() returns in order
# result changes as of v0.4 because in v0.3-1 'omega.beta' was found
# as both topNode and nontopNode and was being simulated into
# incorrectly in resampleData - this affected values further downstream
test_mcmc('ice', model = 'icear.bug', inits = 'ice-inits.R',
data = 'ice-data.R', numItsC = 1000, resampleData = TRUE)
# resampleData gives very large magnitude betas because beta[1],beta[2] are not
# actually topNodes because of (weak) dependence on tau, and
# are simulated from their priors to have large magnitude values
# rework ice example so that beta[1] and beta[2] will be top nodes
##system(paste("sed 's/tau\\*1.0E-6/1.0E-6/g'", system.file('classic-bugs','vol2','ice','icear.bug', package = 'nimble'), ">", file.path(tempdir(), "icear.bug")))
system.in.dir(paste("sed 's/tau\\*1.0E-6/1.0E-6/g' icear.bug > ", file.path(tempdir(), "icear.bug")), dir = system.file('classic-bugs','vol2','ice', package = 'nimble'))
test_mcmc(model = file.path(tempdir(), "icear.bug"), inits = system.file('classic-bugs', 'vol2', 'ice','ice-inits.R', package = 'nimble'), data = system.file('classic-bugs', 'vol2', 'ice','ice-data.R', package = 'nimble'), numItsC = 1000, resampleData = TRUE)
# looks fine, but alpha and beta values shifted a bit (systematically) relative to JAGS results - on further inspection this is because mixing for this model is poor in both NIMBLE and JAGS - with longer runs they seem to agree (as best as one can tell given the mixing without doing a super long run)
test_mcmc('beetles', model = 'beetles-logit.bug', inits = 'beetles-inits.R',
data = 'beetles-data.R', numItsC = 1000, resampleData = TRUE)
# getting warning; deterministic model node is NA or NaN in model initialization
# weirdness with llike.sat[8] being NaN on init (actually that makes sense), and with weird lifting of RHS of llike.sat
##system.in.dir(paste0("echo 'var\nY[N,T],\ndN[N,T];' >> ", file.path(tempdir(), "leuk.bug")))
writeLines(c("var","Y[N,T],","dN[N,T];"), con = file.path(tempdir(), "leuk.bug")) ## echo doesn't seem to work on Windows
##system(paste("cat", system.file('classic-bugs','vol1','leuk','leuk.bug', package = 'nimble'), ">>", file.path(tempdir(), "leuk.bug")))
# need nimStep in data block as we no longer have step
##system(paste("sed -i -e 's/step/nimStep/g'", file.path(tempdir(), "leuk.bug")))
system.in.dir(paste("cat leuk.bug >> ", file.path(tempdir(), "leuk.bug")), dir = system.file('classic-bugs','vol1','leuk',package = 'nimble'))
# need nimStep in data block as we no longer have step
system.in.dir(paste("sed -i -e 's/step/nimStep/g'", file.path(tempdir(), "leuk.bug")))
test_mcmc(model = file.path(tempdir(), "leuk.bug"), name = 'leuk', inits = system.file('classic-bugs', 'vol1', 'leuk','leuk-init.R', package = 'nimble'), data = system.file('classic-bugs', 'vol1', 'leuk','leuk-data.R', package = 'nimble'), numItsC = 1000,
results = list(mean = list(beta = 1.58), sd = list(beta = 0.43)),
resultsTolerance = list(mean = list(beta = 0.02), sd = list(beta = 0.02)))
writeLines(paste("var","logx[doses];"), con = file.path(tempdir(), "salm.bug"))
##system.in.dir(paste0("echo 'var\nlogx[doses];' >> ", file.path(tempdir(), "salm.bug")))
##system(paste("cat", system.file('classic-bugs','vol1','salm','salm.bug', package = 'nimble'), ">>", file.path(tempdir(), "salm.bug")))
system.in.dir(paste("cat salm.bug >>", file.path(tempdir(), "salm.bug")), dir = system.file('classic-bugs','vol1','salm', package = 'nimble'))
test_mcmc(model = file.path(tempdir(), "salm.bug"), name = 'salm', inits = system.file('classic-bugs', 'vol1', 'salm','salm-init.R', package = 'nimble'), data = system.file('classic-bugs', 'vol1', 'salm','salm-data.R', package = 'nimble'), numItsC = 1000)
# looks good compared to JAGS
##system(paste("cat", system.file('classic-bugs','vol2','air','air.bug', package = 'nimble'), ">>", file.path(tempdir(), "air.bug")))
file.copy(system.file('classic-bugs','vol2','air','air.bug', package = 'nimble'), file.path(tempdir(), "air.bug"), overwrite=TRUE)
system.in.dir(paste("sed -i -e 's/mean(X)/mean(X\\[\\])/g'", file.path(tempdir(), "air.bug")))
test_mcmc(model = file.path(tempdir(), "air.bug"), name = 'air', inits = system.file('classic-bugs', 'vol2', 'air','air-inits.R', package = 'nimble'), data = system.file('classic-bugs', 'vol2', 'air','air-data.R', package = 'nimble'), numItsC = 1000)
# theta[2] posterior is a bit off from JAGS - would be worth more investigation
##system(paste("sed 's/mean(age)/mean(age\\[1:M\\])/g'", system.file('classic-bugs','vol2','jaw','jaw-linear.bug', package = 'nimble'), ">", file.path(tempdir(), "jaw-linear.bug"))) # alternative way to get size info in there
system.in.dir(paste("sed 's/mean(age)/mean(age\\[1:M\\])/g' jaw-linear.bug > ", file.path(tempdir(), "jaw-linear.bug")), dir = system.file('classic-bugs','vol2','jaw', package = 'nimble')) # alternative way to get size info in there
test_mcmc(model = file.path(tempdir(), "jaw-linear.bug"), name = 'jaw-linear', inits = system.file('classic-bugs', 'vol2', 'jaw','jaw-inits.R', package = 'nimble'), data = system.file('classic-bugs', 'vol2', 'jaw','jaw-data.R', package = 'nimble'), numItsC = 1000)
# C MCMC runs and seems fine; R MCMC fails as can't do Cholesky of 0 matrix in 2-point method
# vectorized version of jaw to try to deal with scalar/vec bug - not needed now that above works
if(FALSE) {
model <- function() {
for (i in 1:N) {
Y[i,1:M] ~ dmnorm(mu[1:M], Omega[1:M,1:M]); # The 4 measurements for each
} # boy are multivariate normal
mu[1:M] <- beta0 * ones[1:M] + beta1 * (age[1:4] - mean(age[1:4]));
beta0.uncentred <- beta0 - beta1 * mean(age[1:4]);
beta0 ~ dnorm(0.0, 0.001);
beta1 ~ dnorm(0.0, 0.001);
Omega[1:M,1:M] ~ dwish(R[1:M,1:M], 4); # between-child variance in length at each age
#Sigma2[1:M,1:M] <- inverse(Omega[1:M,1:M]);
for (i in 1:N) {
for (j in 1:M) {
resid[i,j] <- Y[i,j] - mu[j]; # residuals
resid2[i,j] <- resid[i,j]^2; # squared residuals
}
}
RSS <- sum(resid2[1:N,1:M]); # Residual Sum of Squares
}
inits = list(beta0 = 40, beta1 = 0)
data =list(M=4,N=20, Y = matrix(c(47.8, 46.4, 46.3, 45.1, 47.6, 52.5, 51.2, 49.8, 48.1,
45, 51.2, 48.5, 52.1, 48.2, 49.6, 50.7, 47.2, 53.3, 46.2, 46.3,
48.8, 47.3, 46.8, 45.3, 48.5, 53.2, 53, 50, 50.8, 47, 51.4, 49.2,
52.8, 48.9, 50.4, 51.7, 47.7, 54.6, 47.5, 47.6, 49, 47.7, 47.8,
46.1, 48.9, 53.3, 54.3, 50.3, 52.3, 47.3, 51.6, 53, 53.7, 49.3,
51.2, 52.7, 48.4, 55.1, 48.1, 51.3, 49.7, 48.4, 48.5, 47.2, 49.3,
53.7, 54.5, 52.7, 54.4, 48.3, 51.9, 55.5, 55, 49.8, 51.8, 53.3,
49.5, 55.3, 48.4, 51.8) , 20, 4), age = c(8, 8.5, 9, 9.5),
R = matrix(c(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1), 4 ,4),
ones = rep(1, 4))
test_mcmc(model = model, name = 'dmnorm-dwish example', data = data, inits = inits, numItsC = 1000)
}
test_mcmc('pump', resampleData = TRUE, results = list(mean = list(
"theta[1]" = 0.06,
"theta[2]" = 0.10,
"theta[9]" = 1.58,
"theta[10]" = 1.97,
alpha = 0.73,
beta = 0.98)),
resultsTolerance = list(mean = list(
"theta[1]" = 0.01,
"theta[2]" = 0.01,
"theta[9]" = 0.05,
"theta[10]" = 0.05,
alpha = 0.1,
beta = 0.1)))
## LogProb gap: bug fixed in after v0.3
## Problem that occurred in v0.3: because of gap in logProb_a (i.e. logProb_a[2]
## is defined but logProb_a[1] is not)
## Because logProbs get scrambled, the random walk sampler would always accept,
## meaning the sd of proposal steps approaches Inf
gapCode <- nimbleCode({
a[1] <- 1
a[2] ~ dnorm(0,1)
})
test_mcmc(model = gapCode, seed = 0, numItsC = 100000,
results = list(mean = list(`a[2]` = 0) ),
resultsTolerance = list(mean = list(`a[2]` = 0.1)),
samplers = list(list(type = 'RW', target = 'a[2]'))
)
if(.Platform$OS.type == 'windows') {
message("Stopping tests now in Windows to avoid crashing until we can unload compiled projects")
message("To continue testing use 'mcmc2' tests")
q("no")
}
### Daniel's world's simplest MCMC demo
code <- nimbleCode({
x ~ dnorm(0, 2)
y ~ dnorm(x+1, 3)
z ~ dnorm(y+2, 4)
})
data = list(y = 3)
test_mcmc(model = code, name = 'very simple example', data = data, resampleData = FALSE, results = list(
mean = list(x = 6/5, z = 5),
sd = list(x = 1/sqrt(5), z = 1/2)),
resultsTolerance = list(mean = list(x = .1, z = .1),
sd = list(x = .05, z = .05)))
### basic block sampler example
code <- nimbleCode({
for(i in 1:3) {
x[i] ~ dnorm(0, 1)
y[i] ~ dnorm(x[i], 2)
}
})
data = list(y = -1:1)
test_mcmc(model = code, name = 'basic no-block sampler', data = data, resampleData = FALSE, results = list(
mean = list(x = c(-2/3,0,2/3)),
var = list(x = rep(1/3,3))),
resultsTolerance = list(mean = list(x = rep(.1,3)),
var = list(x = rep(.05,3))))
test_mcmc(model = code, name = 'basic block sampler on scalars', data = data, resampleData = FALSE, results = list(
mean = list(x = c(-2/3,0,2/3)),
var = list(x = rep(1/3,3))),
resultsTolerance = list(mean = list(x = rep(.1,3)),
var = list(x = rep(.05,3))),
samplers = list(
list(type = 'RW_block', target = 'x[1]'),
list(type = 'RW_block', target = 'x[2]'),
list(type = 'RW_block', target = 'x[3]')
), removeAllDefaultSamplers = TRUE, numItsC = 10000)
test_mcmc(model = code, name = 'basic block sampler on vector', data = data, resampleData = FALSE, results = list(
mean = list(x = c(-2/3,0,2/3)),
var = list(x = rep(1/3,3))),
resultsTolerance = list(mean = list(x = rep(.1,3)),
var = list(x = rep(.05,3))),
samplers = list(
list(type = 'RW_block', target = 'x', control = list(adaptInterval = 500))
), numItsC = 10000)
### slice sampler example
code <- nimbleCode({
z ~ dnorm(0, 1)
normal5_10 ~ dnorm(5, sd = 10)
beta1_1 ~ dbeta(1, 1)
beta3_5 ~ dbeta(3, 5)
binom10_p5 ~ dbin(size=10, prob=0.5)
binom20_p3 ~ dbin(size=20, prob=0.3)
})
test_mcmc(model = code, name = "slice sampler example", resampleData = FALSE, results = list(
mean = list(z = 0, "beta1_1" = 0.5, "beta3_5" = 3/(3+5),
"binom10_p5" = 10*.5, "binom20_p3" = 20*.3),
sd = list(z = 1, "beta1_1" = sqrt(1/12),
"beta3_5" = sqrt(3*5/((3+5)^2*(3+5+1))),
"binom10_p5" = sqrt(10*.5*.5),
"binom20_p3" = sqrt(20*.3*.7))),
resultsTolerance = list(
mean = list(z = 0.1, "beta1_1" = 0.5, "beta3_5" = .2,
"binom10_p5" = .25, "binom20_p3" = .25),
sd = list(z = .1, "beta1_1" = .05, "beta3_5" = .03,
"binom10_p5" = .2, "binom20_p3" = .25)),
samplers = list(list(type = 'slice', target = 'z', control = list(adaptInterval = 10)),
list(type = 'slice', target = 'normal5_10', control = list(adaptInterval = 10)),
list(type = 'slice', target = 'beta1_1', control = list(adaptInterval = 10)),
list(type = 'slice', target = 'beta3_5', control = list(adaptInterval = 10)),
list(type = 'slice', target = 'binom10_p5', control = list(adaptInterval = 10)),
list(type = 'slice', target = 'binom20_p3', control = list(adaptInterval = 10))))
### elliptical slice sampler 'ess'
set.seed(0)
ESScode <- quote({
x[1:d] ~ dmnorm(mu_x[1:d], prec = prec_x[1:d, 1:d])
y[1:d] ~ dmnorm(x[1:d], prec = prec_y[1:d, 1:d])
})
d <- 3
mu_x <- rnorm(d)
temp <- array(rnorm(d^2), c(d,d))
prec_x <- solve(temp %*% t(temp))
temp <- array(rnorm(d^2), c(d,d))
prec_y <- solve(temp %*% t(temp))
y <- rnorm(d)
ESSconstants <- list(d = d, mu_x = mu_x, prec_x = prec_x, prec_y = prec_y)
ESSdata <- list(y = y)
ESSinits <- list(x = rep(0, d))
test_mcmc(model = ESScode, data = c(ESSconstants, ESSdata), inits = ESSinits,
name = 'exact values of elliptical slice sampler',
seed = 0,
exactSample = list(`x[1]` = c(-0.492880566939352, -0.214539223107114, 1.79345037297218, 1.17324496091208, 2.14095077672555, 1.60417482445964, 1.94196916651627, 2.66737323347255, 2.66744178776022, 0.253966883192744), `x[2]` = c(-0.161210109217102, -0.0726534676226932, 0.338308532423757, -0.823652445515156, -0.344130712698579, -0.132642244861469, -0.0253168895009594, 0.0701624130921676, 0.0796842215444978, -0.66369112443311), `x[3]` = c(0.278627475932455, 0.0661336950029345, 0.407055002920732, 1.98761228946318, 1.0839897275519, 1.00262648370199, 0.459841485268785, 2.59229443025387, 1.83769567435409, 1.92954706515119)),
samplers = list(list(type = 'ess', target = 'x')))
test_mcmc(model = ESScode, data = c(ESSconstants, ESSdata), inits = ESSinits,
name = 'results to tolerance of elliptical slice sampler',
results = list(mean = list(x = c(1.0216463, -0.4007247, 1.1416904))),
resultsTolerance = list(mean = list(x = c(0.01, 0.01, 0.01))),
numItsC = 100000,
samplers = list(list(type = 'ess', target = 'x')))
### demo2 of check conjugacy
code <- nimbleCode({
x ~ dbeta(3, 13)
y[1] ~ dbin(x, 10)
y[2] ~ dbin(x, 20)
})
data = list(y = c(3,4))
test_mcmc(model = code, name = 'check of beta-binom conjugacy', data = data, exactSample = list(x = c(0.195510839527966, 0.332847482503424,0.247768152764931, 0.121748195439553, 0.157842271774841, 0.197566496350904, 0.216991517500577, 0.276609942874852, 0.165733872345582, 0.144695512780252)), seed = 0)
### checkConjugacy_demo3_run.R - various conjugacies
code <- nimbleCode({
x ~ dgamma(1, 1) # should satisfy 'gamma' conjugacy class
a ~ dnorm(0, x) # should satisfy 'norm' conjugacy class
a2 ~ dnorm(0, tau = 3*x+0)
b ~ dpois(0+5*x)
b2 ~ dpois(1*x*1)
c ~ dgamma(1, 7*x*5)
for(i in 2:3) {
jTau[i] <- 1
jNorm[i] ~ dnorm(c * (a+3) - i, var = jTau[i])
kTauSd[i] <- 2
kLogNorm[i] ~ dlnorm(0 - a - 6*i, kTauSd[i])
}
})
sampleVals = list(x = c(3.950556165467749, 1.556947815895538, 1.598959152023738, 2.223758981790340, 2.386291653164086, 3.266282048060261, 3.064019155073057, 3.229661999356182, 1.985990552839427, 2.057249437940977),
c = c( 0.010341199485849559, 0.010341199485849559, 0.003846483017887228, 0.003846483017887228, 0.007257679932131476, 0.009680314740728335, 0.012594777095902964, 0.012594777095902964, 0.018179641351556003, 0.018179641351556003))
test_mcmc(model = code, name = 'check various conjugacies', exactSample = sampleVals, seed = 0, mcmcControl = list(scale=0.01))
### Dirichlet-multinomial conjugacy
# as of v0.4, exact numerical results here have changed because
# ddirch now sometimes returns NaN rather than -Inf (when an
# alpha is proposed to be negative) -- this changes the RNG
# sequence because NaN values result in no runif() call in decide()
# single multinomial
set.seed(0)
n <- 100
alpha <- c(10, 30, 15, 60, 1)
K <- length(alpha)
p <- c(.12, .24, .09, .54, .01)
y <- rmulti(1, n, p)
code <- function() {
y[1:K] ~ dmulti(p[1:K], n);
p[1:K] ~ ddirch(alpha[1:K]);
for(i in 1:K) {
alpha[i] ~ dgamma(.001, .001);
}
}
inits <- list(p = rep(1/K, K), alpha = rep(K, K))
data <- list(n = n, K = K, y = y)
test_mcmc(model = code, name = 'Dirichlet-multinomial example', data= data, seed = 0, numItsC = 10000,
inits = inits,
results = list(mean = list(p = p)),
resultsTolerance = list(mean = list(p = rep(.06, K))))
# bad mixing for alphas; probably explains why posterior estimates for alphas changed so much as of v 0.4
# with replication
set.seed(0)
n <- 100
m <- 20
alpha <- c(10, 30, 15, 60, 1)
K <- length(alpha)
y <- p <- matrix(0, m, K)
for(i in 1:m) {
p[i, ] <- rdirch(1, alpha)
y[i, ] <- rmulti(1, n, p[i, ])
}
code <- function() {
for(i in 1:m) {
y[i, 1:K] ~ dmulti(p[i, 1:K], n);
p[i, 1:K] ~ ddirch(alpha[1:K]);
}
for(i in 1:K) {
alpha[i] ~ dgamma(.001, .001);
}
}
inits <- list(p = matrix(1/K, m, K), alpha = rep(1/K, K))
data <- list(n = n, K = K, m = m, y = y)
test_mcmc(model = code, name = 'Dirichlet-multinomial with replication', data= data, seed = 0, numItsC = 1000,
inits = inits, numItsC_results = 100000,
results = list(mean = list(p = p, alpha = alpha)),
resultsTolerance = list(mean = list(p = matrix(.05, m, K),
alpha = c(5,10,10,20,.5))))
# note alphas mix poorly (and are highly correlated),
# presumably because of cross-level dependence between
# p's and alphas. cross-level sampler would probably work well here,
# or, of course, integrating over the p's
### block sampler on MVN node
code <- nimbleCode({
mu[1] <- 10
mu[2] <- 20
mu[3] <- 30
x[1:3] ~ dmnorm(mu[1:3], prec = Q[1:3,1:3])
})
Q = matrix(c(1.0,0.2,-1.0,0.2,4.04,1.6,-1.0,1.6,10.81), nrow=3)
data = list(Q = Q)
inits = list(x = c(10, 20, 30))
test_mcmc(model = code, name = 'block sampler on multivariate node', data = data, seed = 0, numItsC = 10000,
results = list(mean = list(x = c(10,20,30)),
var = list(x = diag(solve(Q)))),
resultsTolerance = list(mean = list(x = rep(1,3)),
var = list(x = c(.1, .03, .01))),
samplers = list(
list(type = 'RW_block', target = 'x[1:3]')))
# caution: setting targetNodes='x' works but the initial end sampler is not removed because x[1:3] in targetNode in default sampler != 'x' in targetNodes passed in
if(FALSE) {
Rmodel <- nimbleModel(code, constants = list(Q=Q))
mcmcspec <- MCMCspec(Rmodel, nodes = NULL)
mcmcspec$addSampler(type = 'RW_block', target = 'x', control = list(adaptInterval=500))
mcmcspec$getMonitors()
Rmcmc <- buildMCMC(mcmcspec)
Cmodel <- compileNimble(Rmodel)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
Cmcmc(200000) ## this runs nearly instantaneously on my computer -DT
samples <- as.matrix(nfVar(Cmcmc, 'mvSamples'))
samples <- samples[50001:200000,]
dim(samples)
apply(samples, 2, mean)
solve(Q)
cov(samples)
propCov <- nfVar(Cmcmc, 'samplerFunctions')[[1]]$propCov
scale <- nfVar(Cmcmc, 'samplerFunctions')[[1]]$scale
propCov * scale^2
nfVar(Cmcmc, 'samplerFunctions')[[1]]$scaleHistory
nfVar(Cmcmc, 'samplerFunctions')[[1]]$acceptanceRateHistory
nfVar(Cmcmc, 'samplerFunctions')[[1]]$scale
nfVar(Cmcmc, 'samplerFunctions')[[1]]$propCov
## why is the proposal cov w/ .99 cross-corrs?
## also MCMC in C takes a surprisingly long time - this might be threaded lin alg behaving badly on small matrices
}
### DT's model
mu <- c(1,2,3)
corr <- matrix(c(1,.8,0.3,.8,1,0,0.3,0,1), nrow=3)
varr <- c(1,2,3)
Sig <- diag(sqrt(varr))
Q <- Sig %*% corr %*% Sig
P <- solve(Q)
code <- nimbleCode({
# x[1:3] ~ dmnorm(mu[1:3], cov = Q[1:3,1:3])
x[1:3] ~ dmnorm(mu[1:3], prec = P[1:3,1:3])
})
data = list(P = P, mu = mu)
test_mcmc(model = code, name = 'second block sampler on multivariate node', data = data, seed = 0, numItsC = 100000,
results = list(mean = list(x = mu),
var = list(x = varr)),
resultsTolerance = list(mean = list(x = rep(.1,3)),
var = list(x = c(.1,.1,.1))),
samplers = list(
list(type = 'RW_block', target = 'x[1:3]')))
### MVN conjugate update
set.seed(0)
mu0 = 1:3
Q0 = matrix(c(1, .2, .8, .2, 2, 1, .8, 1, 2), nrow = 3)
Q = solve(matrix(c(3, 1.7, .9, 1.7, 2, .6, .9, .6, 1), nrow = 3))
a = c(-2, .5, 1)
B = matrix(rnorm(9), 3)
##### not currently working - see Perry's email of ~ 10/6/14
## code <- nimbleCode({
## mu[1:3] ~ dmnorm(mu0[1:3], Q0[1:3, 1:3])
## y[1:3] ~ dmnorm(asCol(a[1:3]) + B[1:3, 1:3] %*% asCol(mu[1:3]), Q[1:3, 1:3])
## })
code <- nimbleCode({
mu[1:3] ~ dmnorm(mu0[1:3], Q0[1:3, 1:3])
y_mean[1:3] <- asCol(a[1:3]) + B[1:3, 1:3] %*% asCol(mu[1:3])
y[1:3] ~ dmnorm(y_mean[1:3], Q[1:3, 1:3])
})
## Simplest version of model w/o 'a' and 'B'
## a = rep(0,3)
## B = diag(rep(1,3))
## code <- nimbleCode({
## mu[1:3] ~ dmnorm(mu0[1:3], Q0[1:3, 1:3])
## y[1:3] ~ dmnorm(mu[1:3], Q[1:3, 1:3])
## })
mu <- mu0 + chol(solve(Q0)) %*% rnorm(3)
# make sure y is a vec not a 1-col matrix or get a dimensionality error
y <- c(a + B%*%mu + chol(solve(Q)) %*% rnorm(3))
data = list(mu0 = mu0, Q0 = Q0, Q = Q, a = a, B = B, y = y)
muQtrue = t(B) %*% Q%*%B + Q0
muMeanTrue = c(solve(muQtrue, crossprod(B, Q%*%(y-a)) + Q0%*%mu0))
test_mcmc(model = code, name = 'two-level multivariate normal', data = data, seed = 0, numItsC = 10000,
results = list(mean = list(mu = muMeanTrue),
cov = list(mu = solve(muQtrue))),
resultsTolerance = list(mean = list(mu = rep(.02,3)),
cov = list(mu = matrix(.01, 3, 3))))
### scalar RW updates in place of conjugate mv update
test_mcmc(model = code, name = 'two-level multivariate normal with scalar updaters', data = data, seed = 0, numItsC = 100000,
results = list(mean = list(mu = muMeanTrue),
cov = list(mu = solve(muQtrue))),
resultsTolerance = list(mean = list(mu = rep(.03,3)),
cov = list(mu = matrix(.03, 3, 3))),
samplers = list(list(type = 'RW', target = 'mu[1]'),
list(type = 'RW', target = 'mu[2]'),
list(type = 'RW', target = 'mu[3]')),
removeAllDefaultSamplers = TRUE)
## another example of MVN conjugate sampler, for test-mcmc.R
## using both cov and prec parametrizaions of MVN,
## and various linear links
set.seed(0)
prior_mean <- rep(0,5)
tmp <- array(rnorm(25), c(5,5))
tmp <- tmp + t(tmp) + 5*diag(5)
prior_cov <- tmp
a <- array(rnorm(20), c(4,5))
B <- array(NA, c(4,5,5))
for(i in c(2,4)) B[i,,] <- array(rnorm(25), c(5,5))
B[1,,] <- diag(5)
B[3,,] <- diag(5)
M_y <- array(NA, c(4,5,5))
for(i in 1:4) {
tmp <- array(rnorm(25,i), c(5,5))
tmp <- tmp + t(tmp) + 5*i*diag(5)
M_y[i,,] <- tmp
}
x <- rep(0, 5)
y <- array(rnorm(20), c(4,5))
code <- nimbleCode({
x[1:5] ~ dmnorm(mean = prior_mean[1:5], cov = prior_cov[1:5,1:5])
for(i in 1:4)
mu_y[i,1:5] <- asCol(a[i,1:5]) + B[i,1:5,1:5] %*% asCol(x[1:5])
y[1,1:5] ~ dmnorm(mu_y[1,1:5], prec = M_y[1,1:5,1:5])
y[2,1:5] ~ dmnorm(mu_y[2,1:5], cov = M_y[2,1:5,1:5])
y[3,1:5] ~ dmnorm(mu_y[3,1:5], prec = M_y[3,1:5,1:5])
y[4,1:5] ~ dmnorm(mu_y[4,1:5], cov = M_y[4,1:5,1:5])
})
constants <- list(prior_mean=prior_mean, prior_cov=prior_cov, a=a, B=B, M_y=M_y)
data <- list(y=y)
inits <- list(x=x)
Rmodel <- nimbleModel(code, constants, data, inits)
spec <- configureMCMC(Rmodel)
##spec$getSamplers()
Rmcmc <- buildMCMC(spec)
Cmodel <- compileNimble(Rmodel)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
set.seed(0)
Rmcmc$run(10)
Rsamples <- as.matrix(Rmcmc$mvSamples)
set.seed(0)
Cmcmc$run(10)
Csamples <- as.matrix(Cmcmc$mvSamples)
test_that(
'expected R sample',
expect_equal(round(as.numeric(Rsamples), 8),
##cat('c(', paste0(as.numeric(round(Rsamples,8)), collapse=', '), ')\n')
c(0.97473128, 0.50438666, 1.1251132, 0.83830666, 0.74077066, 0.92935482, 0.83758372, 0.98708273, 1.24199937, 0.67348127, -0.54387714, -0.60713969, -0.51392796, -0.3176801, -0.34416529, -0.08530564, -0.47160157, -0.21996584, -0.20504917, -0.77287122, 0.78462584, 0.46103509, 0.43862813, 0.49343096, 0.61020864, 0.55088287, 0.53887202, 0.49863894, 0.62691318, 0.80142839, 0.34941152, 0.06623608, 0.05624477, 0.21369178, 0.26585415, -0.1439989, -0.03133488, 0.3544062, -0.03518959, 0.27415746, 0.40977, 0.8351078, 0.25719293, 0.05663917, 0.30894028, 0.33113315, 0.47647909, 0.26143962, 0.07180759, 0.27255767)
))
dif <- as.numeric(Rsamples - Csamples)
test_that('R and C equiv', expect_lt(max(abs(dif)), 1E-15))
y_prec <- array(NA, c(4,5,5))
y_prec[1,,] <- M_y[1,,]
y_prec[2,,] <- solve(M_y[2,,])
y_prec[3,,] <- M_y[3,,]
y_prec[4,,] <- solve(M_y[4,,])
contribution_mean <- array(NA, c(4,5))
for(i in 1:4) contribution_mean[i,] <- t(B[i,,]) %*% y_prec[i,,] %*% (y[i,] - a[i,])
contribution_prec <- array(NA, c(4,5,5))
for(i in 1:4) contribution_prec[i,,] <- t(B[i,,]) %*% y_prec[i,,] %*% B[i,,]
prior_prec <- solve(prior_cov)
post_prec <- prior_prec + apply(contribution_prec, c(2,3), sum)
post_cov <- solve(post_prec)
post_mean <- (post_cov %*% (prior_prec %*% prior_mean + apply(contribution_mean, 2, sum)))[,1]
Cmcmc$run(100000)
Csamples <- as.matrix(Cmcmc$mvSamples)
dif_mean <- as.numeric(apply(Csamples, 2, mean)) - post_mean
test_that('posterior mean', expect_lt(max(abs(dif_mean)), 0.001))
dif_cov <- as.numeric(cov(Csamples) - post_cov)
test_that('posterior cov', expect_lt(max(abs(dif_cov)), 0.001))
### test of conjugate Wishart
set.seed(0)
trueCor <- matrix(c(1, .3, .7, .3, 1, -0.2, .7, -0.2, 1), 3)
covs <- c(3, 2, .5)
trueCov = diag(sqrt(covs)) %*% trueCor %*% diag(sqrt(covs))
Omega = solve(trueCov)
n = 20
R = diag(rep(1,3))
mu = 1:3
Y = mu + t(chol(trueCov)) %*% matrix(rnorm(3*n), ncol = n)
M = 3
data <- list(Y = t(Y), n = n, M = M, mu = mu, R = R)
code <- nimbleCode( {
for(i in 1:n) {
Y[i, 1:M] ~ dmnorm(mu[1:M], Omega[1:M,1:M]);
}
Omega[1:M,1:M] ~ dwish(R[1:M,1:M], 4);
})
newDf = 4 + n
newR = R + tcrossprod(Y- mu)
OmegaTrueMean = newDf * solve(newR)
wishRV <- array(0, c(M, M, 10000))
for(i in 1:10000) {
z <- t(chol(solve(newR))) %*% matrix(rnorm(3*newDf), ncol = newDf)
wishRV[ , , i] <- tcrossprod(z)
}
OmegaSimTrueSDs = apply(wishRV, c(1,2), sd)
test_mcmc(model = code, name = 'conjugate Wishart', data = data, seed = 0, numItsC = 1000, inits = list(Omega = OmegaTrueMean),
results = list(mean = list(Omega = OmegaTrueMean ),
sd = list(Omega = OmegaSimTrueSDs)),
resultsTolerance = list(mean = list(Omega = matrix(.05, M,M)),
sd = list(Omega = matrix(0.06, M, M))))
# issue with Chol in R MCMC - probably same issue as in jaw-linear
## testing conjugate MVN updating with ragged dependencies;
## that is, dmnorm dependents of different lengths from the target node
code <- nimbleCode({
x[1:3] ~ dmnorm(mu0[1:3], prec = ident[1:3,1:3])
mu_y2[1:2] <- asCol(a[1:2]) + B[1:2,1:3] %*% asCol(x[1:3])
mu_y3[1:3] <- asCol(a[1:3]) + B[1:3,1:3] %*% asCol(x[1:3])
mu_y5[1:5] <- asCol(a[1:5]) + B[1:5,1:3] %*% asCol(x[1:3])
y2[1:2] ~ dmnorm(mu_y2[1:2], prec = prec_y[1:2,1:2])
y3[1:3] ~ dmnorm(mu_y3[1:3], prec = prec_y[1:3,1:3])
y5[1:5] ~ dmnorm(mu_y5[1:5], prec = prec_y[1:5,1:5])
})
mu0 <- rep(0,3)
ident <- diag(3)
a <- 11:15
B <- matrix(1:15, nrow=5, ncol=3, byrow=TRUE)
prec_y <- diag(1:5)
constants <- list(mu0=mu0, ident=ident, a=a, B=B, prec_y=prec_y)
data <- list(y2=1:2, y3=1:3, y5=1:5)
inits <- list(x=rep(0,3))
Rmodel <- nimbleModel(code, constants, data, inits)
spec <- configureMCMC(Rmodel)
Rmcmc <- buildMCMC(spec)
Cmodel <- compileNimble(Rmodel)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
set.seed(0)
Rmcmc$run(10)
set.seed(0)
Cmcmc$run(10)
Rsamples <- as.matrix(Rmcmc$mvSamples)
Csamples <- as.matrix(Cmcmc$mvSamples)
test_that('correct samples for ragged dmnorm conjugate update', expect_true(all(abs(as.numeric(Rsamples[,]) - c(4.96686874, 3.94112676, 4.55975130, 4.01930176, 4.47744412, 4.12927167, 4.91242131, 4.62837537, 4.54227859, 4.97237602, -1.12524733, 1.24545265, -0.13454814, 0.82755276, 0.08252775, 0.71187071, -0.31322184, -0.57462284, -0.64800963, -0.52885823, -3.92276916, -5.23904995, -4.53535941, -4.89919931, -4.66995650, -4.94181562, -4.63558011, -4.16385294, -4.03469945, -4.51128205)) < 1E-8)))
dif <- Rsamples - Csamples
test_that('R and C samples same for ragged dmnorm conjugate update', expect_true(all(abs(dif) < 2E-13)))
set.seed(0)
Cmcmc$run(200000)
Csamples <- as.matrix(Cmcmc$mvSamples)
obsmean <- apply(Csamples, 2, mean)
obsprec <- inverse(cov(Csamples))
pprec <- ident +
t(B[1:2,1:3]) %*% prec_y[1:2,1:2] %*% B[1:2,1:3] +
t(B[1:3,1:3]) %*% prec_y[1:3,1:3] %*% B[1:3,1:3] +
t(B[1:5,1:3]) %*% prec_y[1:5,1:5] %*% B[1:5,1:3]
pmean <- inverse(pprec) %*% (ident %*% mu0 +
t(B[1:2,1:3]) %*% prec_y[1:2,1:2] %*% (1:2 - a[1:2]) +
t(B[1:3,1:3]) %*% prec_y[1:3,1:3] %*% (1:3 - a[1:3]) +
t(B[1:5,1:3]) %*% prec_y[1:5,1:5] %*% (1:5 - a[1:5]) )
test_that('ragged dmnorm conjugate posterior mean', expect_true(all(abs(pmean - obsmean) / pmean < 0.01)))
test_that('ragged dmnorm conjugate posterior precision', expect_true(all(abs(pprec - obsprec) / pprec < 0.005)))
## testing binary sampler
code <- nimbleCode({
a ~ dbern(0.5)
b ~ dbern(0.6)
c ~ dbern(0.05)
d ~ dbin(prob=0.2, size=1)
e ~ dbinom(prob=0.9, size=1)
f ~ dbern(0.5)
g ~ dbern(0.5)
h ~ dbern(0.5)
for(i in 1:10)
yf[i] ~ dnorm(f, sd = 1)
for(i in 1:10)
yg[i] ~ dnorm(g, sd = 1)
for(i in 1:10)
yh[i] ~ dnorm(h, sd = 1)
})
constants <- list()
data <- list(yf = c(rep(0,2), rep(1,8)), yg = c(rep(0,8), rep(1,2)), yh = c(rep(0,5), rep(1,5)))
inits <- list(a=0, b=0, c=0, d=0, e=0, f=0, g=0, h=0)
Rmodel <- nimbleModel(code, constants, data, inits)
test_that('model$isBinary', expect_true(Rmodel$isBinary('a')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('b')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('c')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('d')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('e')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('f')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('g')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('h')))
spec <- configureMCMC(Rmodel, nodes = NULL)
spec$addSampler('a', 'binary', print=FALSE)
spec$addSampler('b', 'binary', print=FALSE)
spec$addSampler('c', 'binary', print=FALSE)
spec$addSampler('d', 'binary', print=FALSE)
spec$addSampler('e', 'binary', print=FALSE)
spec$addSampler('f', 'binary', print=FALSE)
spec$addSampler('g', 'binary', print=FALSE)
spec$addSampler('h', 'binary', print=FALSE)
##spec$printSamplers()
Rmcmc <- buildMCMC(spec)
Cmodel <- compileNimble(Rmodel)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
set.seed(0)
Cmcmc$run(100000)
samples <- as.matrix(Cmcmc$mvSamples)
means <- apply(samples, 2, mean)
##means
tol <- 0.0025
test_that('binary sampler posterior', expect_lt(abs(means[['a']] - 0.5), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['b']] - 0.6), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['c']] - 0.05), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['d']] - 0.2), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['e']] - 0.9), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['f']] - 0.9525), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['g']] - 0.0475), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['h']] - 0.5), tol))
## testing the binary sampler handles 'out of bounds' ok
code <- nimbleCode({
px ~ dbern(0.5)
py ~ dbern(0.5)
x ~ dnorm(0, sd = px - 0.5)
y ~ dnorm(0, tau = py)
})
constants <- list()
data <- list(x = 0, y = 0)
inits <- list(px = 1, py = 1)
Rmodel <- nimbleModel(code, constants, data, inits)
spec <- configureMCMC(Rmodel)
spec$printSamplers()
Rmcmc <- buildMCMC(spec)
Cmodel <- compileNimble(Rmodel)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
set.seed(0)
Rmcmc$run(100)
Rsamples <- as.matrix(Rmcmc$mvSamples)
test_that('binary sampler out-of-bounds', expect_true(all(as.numeric(Rsamples) == 1)))
set.seed(0)
Cmcmc$run(100)
Csamples <- as.matrix(Cmcmc$mvSamples)
test_that('binary sampler out-of-bounds', expect_true(all(as.numeric(Csamples) == 1)))
## testing the RW_multinomial sampler
codeTest <- nimbleCode ({
X[1:nGroups] ~ dmultinom(size=N, prob=pVecX[1:nGroups])
Y[1:nGroups] ~ dmultinom(size=N, prob=pVecY[1:nGroups])
for (ii in 1:nGroups) {
Z[ii] ~ dbeta(1 + X[ii], 1 + Y[ii])
}
})
set.seed(0)
nGroups <- 5
N <- 1E6
pVecX <- rdirch(1, rep(1, nGroups))
pVecY <- rdirch(1, rep(1, nGroups))
X <- rmultinom(1, N, pVecX)[,1]
Y <- rmultinom(1, N, pVecY)[,1]
Z <- rbeta(nGroups, 1+X, 1+Y)
Xini <- rmultinom(1, N, sample(pVecX))[,1]
Yini <- rmultinom(1, N, sample(pVecY))[,1]
Constants <- list(nGroups=nGroups)
Inits <- list(X=Xini, Y=Yini, pVecX=pVecX, pVecY=pVecY, N=N)
Data <- list(Z=Z)
modelTest <- nimbleModel(codeTest, constants=Constants, inits=Inits, data=Data, check=TRUE)
cModelTest <- compileNimble(modelTest)
mcmcTestConfig <- configureMCMC(cModelTest, print = TRUE)
samplers <- mcmcTestConfig$getSamplers()
test_that('assign RW_multinomial sampler', expect_equal(samplers[[1]]$name, 'RW_multinomial'))
test_that('assign RW_multinomial sampler', expect_equal(samplers[[2]]$name, 'RW_multinomial'))
mcmcTest <- buildMCMC(mcmcTestConfig)
cMcmcTest <- compileNimble(mcmcTest, project=modelTest)
## Optionally resample data
cModelTest$N <- N <- 1E3
(cModelTest$pVecX <- sort(rdirch(1, rep(1, nGroups))))
(cModelTest$pVecY <- sort(rdirch(1, rep(1, nGroups))))
simulate(cModelTest, "X", includeData=TRUE); (X <- cModelTest$X)
simulate(cModelTest, "Y", includeData=TRUE); (Y <- cModelTest$Y)
simulate(cModelTest, "Z", includeData=TRUE); (Z <- cModelTest$Z)
niter <- 1E4
cMcmcTest$run(niter)
samples <- as.matrix(cMcmcTest$mvSamples)
test_that('exact results of RW_multinomial sampler', expect_identical(as.numeric(samples[10000,]), c(8, 25, 31, 115, 821, 25,19, 84, 510, 362)))
##################################
## Trajectory Plots & Histogram ##
##################################
##iColsX <- 1:nGroups
##iColsY <- iColsX + nGroups
##plotHistograms <- N <= 1E4 ## FALSE ## TRUE
##par(mfrow=c(2,1+plotHistograms))
####
##for (ii in 1:2) {
## if (ii == 1) {
## yMaxX <- 0
## yMaxY <- 0
## }
## ##
## plot (samples[,1],ylim=range(samples[,iColsX]), typ="n")
## for (ii in iColsX)
## lines(samples[,ii], col=rainbow(10,alpha=0.75)[ii])
## ##
## if (plotHistograms) {
## hist(samples[,1], col=rainbow(2*nGroups, alpha=0.1)[1], breaks=min(samples):max(samples), prob=TRUE, ylim=c(0,yMaxX))
## for (ii in iColsX) {
## h <- hist(samples[,ii], prob=TRUE,
## col=rainbow(2*nGroups, alpha=0.1)[ii],
## border=rainbow(2*nGroups, alpha=0.1)[ii],
## breaks=min(samples[,iColsX]):max(samples[,iColsX]), add=TRUE)
## yMaxX <- max(yMaxX, h$density)
## }
## }
## ##
## plot (samples[,1],ylim=range(samples[,iColsY]), typ="n")
## for (ii in iColsY)
## lines(samples[,ii], col=rainbow(10,alpha=0.75)[ii])
## ##
## if (plotHistograms) {
## hist(samples[,1+nGroups], col=rainbow(2*nGroups, alpha=0.1)[1], breaks=min(samples[,iColsY]):max(samples[,iColsY]), prob=TRUE, ylim=c(0,yMaxY))
## for (ii in iColsY) {
## h <- hist(samples[,ii], prob=TRUE,
## col=rainbow(2*nGroups, alpha=0.1)[ii],
## border=rainbow(2*nGroups, alpha=0.1)[ii],
## breaks=min(samples[,iColsY]):max(samples[,iColsY]), add=TRUE)
## yMaxY <- max(yMaxY, h$density)
## }
## }
##}
## testing the RW_multinomial sampler on distribution of size 2
code <- nimbleCode({
prob[1] <- p
prob[2] <- 1-p
x[1:2] ~ dmultinom(size = N, prob = prob[1:2])
y ~ dbinom( size = N, prob = p)
})
set.seed(0)
N <- 100
p <- 0.3
x1 <- rbinom(1, size=N, prob=p)
x2 <- N - x1
inits <- list(N = N, p = p, x = c(x1, x2), y = x1)
Rmodel <- nimbleModel(code, constants=list(), data=list(), inits=inits)
Cmodel <- compileNimble(Rmodel)
conf <- configureMCMC(Rmodel)
conf$printSamplers()
conf$removeSamplers()
conf$printSamplers()
conf$addSampler(target = 'x', type = 'RW_multinomial')
conf$addSampler(target = 'y', type = 'slice')
conf$printSamplers()
Rmcmc <- buildMCMC(conf)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
Cmcmc$run(100000)
samples <- as.matrix(Cmcmc$mvSamples)
fracs <- apply(samples, 2, mean) / N
test_that('RW_multinomial sampler results within tolerance', expect_true(all(abs(as.numeric(fracs[c(1,3)]) - p) < 0.01)))
| /packages/nimble/inst/tests/test-mcmc.R | no_license | clarkfitzg/nimble | R | false | false | 40,575 | r | source(system.file(file.path('tests', 'test_utils.R'), package = 'nimble'))
context("Testing of default MCMC")
### TODO: add in the special cases for dipper
if(FALSE) { # template for running JAGS for comparison
require(R2jags)
dir = system.file(file.path('classic-bugs', 'vol2', 'air'), package = 'nimble')
data = new.env(); inits = new.env()
source(file.path(dir, 'air-data.R'), data)
source(file.path(dir, 'air-init.R'), inits)
data = as.list(data)
inits = list(as.list(inits))
out1 <- jags(data = data, inits = inits,
parameters.to.save = c('X','theta'), n.chains = 1,
n.iter = 100000, n.burnin = 50000, n.thin = 1, model.file = file.path(dir, 'air.bug'),
DIC = FALSE, jags.seed = 0)
out <- as.mcmc(out1)
}
if(FALSE) {
allModels <- c(# vol1
'blocker', 'bones', 'dyes', 'equiv', 'line', 'oxford', 'pump', 'rats', 'seeds',
# 'bones',
# vol2
'dugongs')
sapply(allModels, test_mcmc, numItsC = 1000)
}
### Beginning of actual tests
test_mcmc('blocker', numItsC = 1000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('bones', numItsC = 10000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('dyes', numItsC = 1000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('equiv', numItsC = 1000, resampleData = TRUE)
# looks good
# testing: tau[2]=97.95, 198.8 ; tau[1]=102.2,55
# phi = -.008,.052; pi = -.1805,.052
test_mcmc('line', numItsC = 1000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('oxford', numItsC = 1000, resampleData = TRUE)
# probably ok; seems to overcover for 'b', but 'b' in this
# parameteriz'n is a top-level node and the multiplic'n
# by sigma seems to lead to frequentist overcoverage
# similar results in JAGS
test_mcmc('pump', numItsC = 1000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('rats', numItsC = 1000, resampleData = TRUE)
# 93.8% coverage; looks fine and compares well to JAGS
# however in resampleData, one of the taus wildly misses
test_mcmc('seeds', numItsC = 1000, resampleData = TRUE)
# fine
test_mcmc('dugongs', numItsC = 1000, resampleData = TRUE)
# 100% coverage; looks fine
test_mcmc('epil', model = 'epil2.bug', inits = 'epil-inits.R',
data = 'epil-data.R', numItsC = 1000, resampleData = TRUE)
# looks ok
test_mcmc('epil', model = 'epil3.bug', inits = 'epil-inits.R',
data = 'epil-data.R', numItsC = 1000, resampleData = TRUE)
# looks ok
test_mcmc('seeds', model = 'seedsuni.bug', inits = 'seeds-init.R',
data = 'seeds-data.R', numItsC = 1000, resampleData = TRUE)
# looks fine - intervals for b's seem a bit large but probably ok
# particularly since default seeds.bug seems fine
# results compared to JAGS look fine
test_mcmc('seeds', model = 'seedssig.bug', inits = 'seeds-init.R',
data = 'seeds-data.R', numItsC = 1000, resampleData = TRUE)
# looks fine - intervals for b's seem a bit large but probably ok
test_mcmc('birats', model = 'birats1.bug', inits = 'birats-inits.R',
data = 'birats-data.R', numItsC = 1000, resampleData = TRUE)
# seems fine
test_mcmc('birats', model = 'birats3.bug', inits = 'birats-inits.R',
data = 'birats-data.R', numItsC = 1000, resampleData = TRUE)
# seems fine
test_mcmc('birats', model = 'birats2.bug', inits = 'birats-inits.R',
data = 'birats-data.R', numItsC = 1000, resampleData = TRUE)
# looks fine now that values() returns in order
# result changes as of v0.4 because in v0.3-1 'omega.beta' was found
# as both topNode and nontopNode and was being simulated into
# incorrectly in resampleData - this affected values further downstream
test_mcmc('ice', model = 'icear.bug', inits = 'ice-inits.R',
data = 'ice-data.R', numItsC = 1000, resampleData = TRUE)
# resampleData gives very large magnitude betas because beta[1],beta[2] are not
# actually topNodes because of (weak) dependence on tau, and
# are simulated from their priors to have large magnitude values
# rework ice example so that beta[1] and beta[2] will be top nodes
##system(paste("sed 's/tau\\*1.0E-6/1.0E-6/g'", system.file('classic-bugs','vol2','ice','icear.bug', package = 'nimble'), ">", file.path(tempdir(), "icear.bug")))
system.in.dir(paste("sed 's/tau\\*1.0E-6/1.0E-6/g' icear.bug > ", file.path(tempdir(), "icear.bug")), dir = system.file('classic-bugs','vol2','ice', package = 'nimble'))
test_mcmc(model = file.path(tempdir(), "icear.bug"), inits = system.file('classic-bugs', 'vol2', 'ice','ice-inits.R', package = 'nimble'), data = system.file('classic-bugs', 'vol2', 'ice','ice-data.R', package = 'nimble'), numItsC = 1000, resampleData = TRUE)
# looks fine, but alpha and beta values shifted a bit (systematically) relative to JAGS results - on further inspection this is because mixing for this model is poor in both NIMBLE and JAGS - with longer runs they seem to agree (as best as one can tell given the mixing without doing a super long run)
test_mcmc('beetles', model = 'beetles-logit.bug', inits = 'beetles-inits.R',
data = 'beetles-data.R', numItsC = 1000, resampleData = TRUE)
# getting warning; deterministic model node is NA or NaN in model initialization
# weirdness with llike.sat[8] being NaN on init (actually that makes sense), and with weird lifting of RHS of llike.sat
##system.in.dir(paste0("echo 'var\nY[N,T],\ndN[N,T];' >> ", file.path(tempdir(), "leuk.bug")))
writeLines(c("var","Y[N,T],","dN[N,T];"), con = file.path(tempdir(), "leuk.bug")) ## echo doesn't seem to work on Windows
##system(paste("cat", system.file('classic-bugs','vol1','leuk','leuk.bug', package = 'nimble'), ">>", file.path(tempdir(), "leuk.bug")))
# need nimStep in data block as we no longer have step
##system(paste("sed -i -e 's/step/nimStep/g'", file.path(tempdir(), "leuk.bug")))
system.in.dir(paste("cat leuk.bug >> ", file.path(tempdir(), "leuk.bug")), dir = system.file('classic-bugs','vol1','leuk',package = 'nimble'))
# need nimStep in data block as we no longer have step
system.in.dir(paste("sed -i -e 's/step/nimStep/g'", file.path(tempdir(), "leuk.bug")))
test_mcmc(model = file.path(tempdir(), "leuk.bug"), name = 'leuk', inits = system.file('classic-bugs', 'vol1', 'leuk','leuk-init.R', package = 'nimble'), data = system.file('classic-bugs', 'vol1', 'leuk','leuk-data.R', package = 'nimble'), numItsC = 1000,
results = list(mean = list(beta = 1.58), sd = list(beta = 0.43)),
resultsTolerance = list(mean = list(beta = 0.02), sd = list(beta = 0.02)))
writeLines(paste("var","logx[doses];"), con = file.path(tempdir(), "salm.bug"))
##system.in.dir(paste0("echo 'var\nlogx[doses];' >> ", file.path(tempdir(), "salm.bug")))
##system(paste("cat", system.file('classic-bugs','vol1','salm','salm.bug', package = 'nimble'), ">>", file.path(tempdir(), "salm.bug")))
system.in.dir(paste("cat salm.bug >>", file.path(tempdir(), "salm.bug")), dir = system.file('classic-bugs','vol1','salm', package = 'nimble'))
test_mcmc(model = file.path(tempdir(), "salm.bug"), name = 'salm', inits = system.file('classic-bugs', 'vol1', 'salm','salm-init.R', package = 'nimble'), data = system.file('classic-bugs', 'vol1', 'salm','salm-data.R', package = 'nimble'), numItsC = 1000)
# looks good compared to JAGS
##system(paste("cat", system.file('classic-bugs','vol2','air','air.bug', package = 'nimble'), ">>", file.path(tempdir(), "air.bug")))
file.copy(system.file('classic-bugs','vol2','air','air.bug', package = 'nimble'), file.path(tempdir(), "air.bug"), overwrite=TRUE)
system.in.dir(paste("sed -i -e 's/mean(X)/mean(X\\[\\])/g'", file.path(tempdir(), "air.bug")))
test_mcmc(model = file.path(tempdir(), "air.bug"), name = 'air', inits = system.file('classic-bugs', 'vol2', 'air','air-inits.R', package = 'nimble'), data = system.file('classic-bugs', 'vol2', 'air','air-data.R', package = 'nimble'), numItsC = 1000)
# theta[2] posterior is a bit off from JAGS - would be worth more investigation
##system(paste("sed 's/mean(age)/mean(age\\[1:M\\])/g'", system.file('classic-bugs','vol2','jaw','jaw-linear.bug', package = 'nimble'), ">", file.path(tempdir(), "jaw-linear.bug"))) # alternative way to get size info in there
system.in.dir(paste("sed 's/mean(age)/mean(age\\[1:M\\])/g' jaw-linear.bug > ", file.path(tempdir(), "jaw-linear.bug")), dir = system.file('classic-bugs','vol2','jaw', package = 'nimble')) # alternative way to get size info in there
test_mcmc(model = file.path(tempdir(), "jaw-linear.bug"), name = 'jaw-linear', inits = system.file('classic-bugs', 'vol2', 'jaw','jaw-inits.R', package = 'nimble'), data = system.file('classic-bugs', 'vol2', 'jaw','jaw-data.R', package = 'nimble'), numItsC = 1000)
# C MCMC runs and seems fine; R MCMC fails as can't do Cholesky of 0 matrix in 2-point method
# vectorized version of jaw to try to deal with scalar/vec bug - not needed now that above works
if(FALSE) {
model <- function() {
for (i in 1:N) {
Y[i,1:M] ~ dmnorm(mu[1:M], Omega[1:M,1:M]); # The 4 measurements for each
} # boy are multivariate normal
mu[1:M] <- beta0 * ones[1:M] + beta1 * (age[1:4] - mean(age[1:4]));
beta0.uncentred <- beta0 - beta1 * mean(age[1:4]);
beta0 ~ dnorm(0.0, 0.001);
beta1 ~ dnorm(0.0, 0.001);
Omega[1:M,1:M] ~ dwish(R[1:M,1:M], 4); # between-child variance in length at each age
#Sigma2[1:M,1:M] <- inverse(Omega[1:M,1:M]);
for (i in 1:N) {
for (j in 1:M) {
resid[i,j] <- Y[i,j] - mu[j]; # residuals
resid2[i,j] <- resid[i,j]^2; # squared residuals
}
}
RSS <- sum(resid2[1:N,1:M]); # Residual Sum of Squares
}
inits = list(beta0 = 40, beta1 = 0)
data =list(M=4,N=20, Y = matrix(c(47.8, 46.4, 46.3, 45.1, 47.6, 52.5, 51.2, 49.8, 48.1,
45, 51.2, 48.5, 52.1, 48.2, 49.6, 50.7, 47.2, 53.3, 46.2, 46.3,
48.8, 47.3, 46.8, 45.3, 48.5, 53.2, 53, 50, 50.8, 47, 51.4, 49.2,
52.8, 48.9, 50.4, 51.7, 47.7, 54.6, 47.5, 47.6, 49, 47.7, 47.8,
46.1, 48.9, 53.3, 54.3, 50.3, 52.3, 47.3, 51.6, 53, 53.7, 49.3,
51.2, 52.7, 48.4, 55.1, 48.1, 51.3, 49.7, 48.4, 48.5, 47.2, 49.3,
53.7, 54.5, 52.7, 54.4, 48.3, 51.9, 55.5, 55, 49.8, 51.8, 53.3,
49.5, 55.3, 48.4, 51.8) , 20, 4), age = c(8, 8.5, 9, 9.5),
R = matrix(c(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1), 4 ,4),
ones = rep(1, 4))
test_mcmc(model = model, name = 'dmnorm-dwish example', data = data, inits = inits, numItsC = 1000)
}
test_mcmc('pump', resampleData = TRUE, results = list(mean = list(
"theta[1]" = 0.06,
"theta[2]" = 0.10,
"theta[9]" = 1.58,
"theta[10]" = 1.97,
alpha = 0.73,
beta = 0.98)),
resultsTolerance = list(mean = list(
"theta[1]" = 0.01,
"theta[2]" = 0.01,
"theta[9]" = 0.05,
"theta[10]" = 0.05,
alpha = 0.1,
beta = 0.1)))
## LogProb gap: bug fixed in after v0.3
## Problem that occurred in v0.3: because of gap in logProb_a (i.e. logProb_a[2]
## is defined but logProb_a[1] is not)
## Because logProbs get scrambled, the random walk sampler would always accept,
## meaning the sd of proposal steps approaches Inf
gapCode <- nimbleCode({
a[1] <- 1
a[2] ~ dnorm(0,1)
})
test_mcmc(model = gapCode, seed = 0, numItsC = 100000,
results = list(mean = list(`a[2]` = 0) ),
resultsTolerance = list(mean = list(`a[2]` = 0.1)),
samplers = list(list(type = 'RW', target = 'a[2]'))
)
if(.Platform$OS.type == 'windows') {
message("Stopping tests now in Windows to avoid crashing until we can unload compiled projects")
message("To continue testing use 'mcmc2' tests")
q("no")
}
### Daniel's world's simplest MCMC demo
code <- nimbleCode({
x ~ dnorm(0, 2)
y ~ dnorm(x+1, 3)
z ~ dnorm(y+2, 4)
})
data = list(y = 3)
test_mcmc(model = code, name = 'very simple example', data = data, resampleData = FALSE, results = list(
mean = list(x = 6/5, z = 5),
sd = list(x = 1/sqrt(5), z = 1/2)),
resultsTolerance = list(mean = list(x = .1, z = .1),
sd = list(x = .05, z = .05)))
### basic block sampler example
code <- nimbleCode({
for(i in 1:3) {
x[i] ~ dnorm(0, 1)
y[i] ~ dnorm(x[i], 2)
}
})
data = list(y = -1:1)
test_mcmc(model = code, name = 'basic no-block sampler', data = data, resampleData = FALSE, results = list(
mean = list(x = c(-2/3,0,2/3)),
var = list(x = rep(1/3,3))),
resultsTolerance = list(mean = list(x = rep(.1,3)),
var = list(x = rep(.05,3))))
test_mcmc(model = code, name = 'basic block sampler on scalars', data = data, resampleData = FALSE, results = list(
mean = list(x = c(-2/3,0,2/3)),
var = list(x = rep(1/3,3))),
resultsTolerance = list(mean = list(x = rep(.1,3)),
var = list(x = rep(.05,3))),
samplers = list(
list(type = 'RW_block', target = 'x[1]'),
list(type = 'RW_block', target = 'x[2]'),
list(type = 'RW_block', target = 'x[3]')
), removeAllDefaultSamplers = TRUE, numItsC = 10000)
test_mcmc(model = code, name = 'basic block sampler on vector', data = data, resampleData = FALSE, results = list(
mean = list(x = c(-2/3,0,2/3)),
var = list(x = rep(1/3,3))),
resultsTolerance = list(mean = list(x = rep(.1,3)),
var = list(x = rep(.05,3))),
samplers = list(
list(type = 'RW_block', target = 'x', control = list(adaptInterval = 500))
), numItsC = 10000)
### slice sampler example
code <- nimbleCode({
z ~ dnorm(0, 1)
normal5_10 ~ dnorm(5, sd = 10)
beta1_1 ~ dbeta(1, 1)
beta3_5 ~ dbeta(3, 5)
binom10_p5 ~ dbin(size=10, prob=0.5)
binom20_p3 ~ dbin(size=20, prob=0.3)
})
test_mcmc(model = code, name = "slice sampler example", resampleData = FALSE, results = list(
mean = list(z = 0, "beta1_1" = 0.5, "beta3_5" = 3/(3+5),
"binom10_p5" = 10*.5, "binom20_p3" = 20*.3),
sd = list(z = 1, "beta1_1" = sqrt(1/12),
"beta3_5" = sqrt(3*5/((3+5)^2*(3+5+1))),
"binom10_p5" = sqrt(10*.5*.5),
"binom20_p3" = sqrt(20*.3*.7))),
resultsTolerance = list(
mean = list(z = 0.1, "beta1_1" = 0.5, "beta3_5" = .2,
"binom10_p5" = .25, "binom20_p3" = .25),
sd = list(z = .1, "beta1_1" = .05, "beta3_5" = .03,
"binom10_p5" = .2, "binom20_p3" = .25)),
samplers = list(list(type = 'slice', target = 'z', control = list(adaptInterval = 10)),
list(type = 'slice', target = 'normal5_10', control = list(adaptInterval = 10)),
list(type = 'slice', target = 'beta1_1', control = list(adaptInterval = 10)),
list(type = 'slice', target = 'beta3_5', control = list(adaptInterval = 10)),
list(type = 'slice', target = 'binom10_p5', control = list(adaptInterval = 10)),
list(type = 'slice', target = 'binom20_p3', control = list(adaptInterval = 10))))
### elliptical slice sampler 'ess'
set.seed(0)
ESScode <- quote({
x[1:d] ~ dmnorm(mu_x[1:d], prec = prec_x[1:d, 1:d])
y[1:d] ~ dmnorm(x[1:d], prec = prec_y[1:d, 1:d])
})
d <- 3
mu_x <- rnorm(d)
temp <- array(rnorm(d^2), c(d,d))
prec_x <- solve(temp %*% t(temp))
temp <- array(rnorm(d^2), c(d,d))
prec_y <- solve(temp %*% t(temp))
y <- rnorm(d)
ESSconstants <- list(d = d, mu_x = mu_x, prec_x = prec_x, prec_y = prec_y)
ESSdata <- list(y = y)
ESSinits <- list(x = rep(0, d))
test_mcmc(model = ESScode, data = c(ESSconstants, ESSdata), inits = ESSinits,
name = 'exact values of elliptical slice sampler',
seed = 0,
exactSample = list(`x[1]` = c(-0.492880566939352, -0.214539223107114, 1.79345037297218, 1.17324496091208, 2.14095077672555, 1.60417482445964, 1.94196916651627, 2.66737323347255, 2.66744178776022, 0.253966883192744), `x[2]` = c(-0.161210109217102, -0.0726534676226932, 0.338308532423757, -0.823652445515156, -0.344130712698579, -0.132642244861469, -0.0253168895009594, 0.0701624130921676, 0.0796842215444978, -0.66369112443311), `x[3]` = c(0.278627475932455, 0.0661336950029345, 0.407055002920732, 1.98761228946318, 1.0839897275519, 1.00262648370199, 0.459841485268785, 2.59229443025387, 1.83769567435409, 1.92954706515119)),
samplers = list(list(type = 'ess', target = 'x')))
test_mcmc(model = ESScode, data = c(ESSconstants, ESSdata), inits = ESSinits,
name = 'results to tolerance of elliptical slice sampler',
results = list(mean = list(x = c(1.0216463, -0.4007247, 1.1416904))),
resultsTolerance = list(mean = list(x = c(0.01, 0.01, 0.01))),
numItsC = 100000,
samplers = list(list(type = 'ess', target = 'x')))
### demo2 of check conjugacy
code <- nimbleCode({
x ~ dbeta(3, 13)
y[1] ~ dbin(x, 10)
y[2] ~ dbin(x, 20)
})
data = list(y = c(3,4))
test_mcmc(model = code, name = 'check of beta-binom conjugacy', data = data, exactSample = list(x = c(0.195510839527966, 0.332847482503424,0.247768152764931, 0.121748195439553, 0.157842271774841, 0.197566496350904, 0.216991517500577, 0.276609942874852, 0.165733872345582, 0.144695512780252)), seed = 0)
### checkConjugacy_demo3_run.R - various conjugacies
code <- nimbleCode({
x ~ dgamma(1, 1) # should satisfy 'gamma' conjugacy class
a ~ dnorm(0, x) # should satisfy 'norm' conjugacy class
a2 ~ dnorm(0, tau = 3*x+0)
b ~ dpois(0+5*x)
b2 ~ dpois(1*x*1)
c ~ dgamma(1, 7*x*5)
for(i in 2:3) {
jTau[i] <- 1
jNorm[i] ~ dnorm(c * (a+3) - i, var = jTau[i])
kTauSd[i] <- 2
kLogNorm[i] ~ dlnorm(0 - a - 6*i, kTauSd[i])
}
})
sampleVals = list(x = c(3.950556165467749, 1.556947815895538, 1.598959152023738, 2.223758981790340, 2.386291653164086, 3.266282048060261, 3.064019155073057, 3.229661999356182, 1.985990552839427, 2.057249437940977),
c = c( 0.010341199485849559, 0.010341199485849559, 0.003846483017887228, 0.003846483017887228, 0.007257679932131476, 0.009680314740728335, 0.012594777095902964, 0.012594777095902964, 0.018179641351556003, 0.018179641351556003))
test_mcmc(model = code, name = 'check various conjugacies', exactSample = sampleVals, seed = 0, mcmcControl = list(scale=0.01))
### Dirichlet-multinomial conjugacy
# as of v0.4, exact numerical results here have changed because
# ddirch now sometimes returns NaN rather than -Inf (when an
# alpha is proposed to be negative) -- this changes the RNG
# sequence because NaN values result in no runif() call in decide()
# single multinomial
set.seed(0)
n <- 100
alpha <- c(10, 30, 15, 60, 1)
K <- length(alpha)
p <- c(.12, .24, .09, .54, .01)
y <- rmulti(1, n, p)
code <- function() {
y[1:K] ~ dmulti(p[1:K], n);
p[1:K] ~ ddirch(alpha[1:K]);
for(i in 1:K) {
alpha[i] ~ dgamma(.001, .001);
}
}
inits <- list(p = rep(1/K, K), alpha = rep(K, K))
data <- list(n = n, K = K, y = y)
test_mcmc(model = code, name = 'Dirichlet-multinomial example', data= data, seed = 0, numItsC = 10000,
inits = inits,
results = list(mean = list(p = p)),
resultsTolerance = list(mean = list(p = rep(.06, K))))
# bad mixing for alphas; probably explains why posterior estimates for alphas changed so much as of v 0.4
# with replication
set.seed(0)
n <- 100
m <- 20
alpha <- c(10, 30, 15, 60, 1)
K <- length(alpha)
y <- p <- matrix(0, m, K)
for(i in 1:m) {
p[i, ] <- rdirch(1, alpha)
y[i, ] <- rmulti(1, n, p[i, ])
}
code <- function() {
for(i in 1:m) {
y[i, 1:K] ~ dmulti(p[i, 1:K], n);
p[i, 1:K] ~ ddirch(alpha[1:K]);
}
for(i in 1:K) {
alpha[i] ~ dgamma(.001, .001);
}
}
inits <- list(p = matrix(1/K, m, K), alpha = rep(1/K, K))
data <- list(n = n, K = K, m = m, y = y)
test_mcmc(model = code, name = 'Dirichlet-multinomial with replication', data= data, seed = 0, numItsC = 1000,
inits = inits, numItsC_results = 100000,
results = list(mean = list(p = p, alpha = alpha)),
resultsTolerance = list(mean = list(p = matrix(.05, m, K),
alpha = c(5,10,10,20,.5))))
# note alphas mix poorly (and are highly correlated),
# presumably because of cross-level dependence between
# p's and alphas. cross-level sampler would probably work well here,
# or, of course, integrating over the p's
### block sampler on MVN node
code <- nimbleCode({
mu[1] <- 10
mu[2] <- 20
mu[3] <- 30
x[1:3] ~ dmnorm(mu[1:3], prec = Q[1:3,1:3])
})
Q = matrix(c(1.0,0.2,-1.0,0.2,4.04,1.6,-1.0,1.6,10.81), nrow=3)
data = list(Q = Q)
inits = list(x = c(10, 20, 30))
test_mcmc(model = code, name = 'block sampler on multivariate node', data = data, seed = 0, numItsC = 10000,
results = list(mean = list(x = c(10,20,30)),
var = list(x = diag(solve(Q)))),
resultsTolerance = list(mean = list(x = rep(1,3)),
var = list(x = c(.1, .03, .01))),
samplers = list(
list(type = 'RW_block', target = 'x[1:3]')))
# caution: setting targetNodes='x' works but the initial end sampler is not removed because x[1:3] in targetNode in default sampler != 'x' in targetNodes passed in
if(FALSE) {
Rmodel <- nimbleModel(code, constants = list(Q=Q))
mcmcspec <- MCMCspec(Rmodel, nodes = NULL)
mcmcspec$addSampler(type = 'RW_block', target = 'x', control = list(adaptInterval=500))
mcmcspec$getMonitors()
Rmcmc <- buildMCMC(mcmcspec)
Cmodel <- compileNimble(Rmodel)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
Cmcmc(200000) ## this runs nearly instantaneously on my computer -DT
samples <- as.matrix(nfVar(Cmcmc, 'mvSamples'))
samples <- samples[50001:200000,]
dim(samples)
apply(samples, 2, mean)
solve(Q)
cov(samples)
propCov <- nfVar(Cmcmc, 'samplerFunctions')[[1]]$propCov
scale <- nfVar(Cmcmc, 'samplerFunctions')[[1]]$scale
propCov * scale^2
nfVar(Cmcmc, 'samplerFunctions')[[1]]$scaleHistory
nfVar(Cmcmc, 'samplerFunctions')[[1]]$acceptanceRateHistory
nfVar(Cmcmc, 'samplerFunctions')[[1]]$scale
nfVar(Cmcmc, 'samplerFunctions')[[1]]$propCov
## why is the proposal cov w/ .99 cross-corrs?
## also MCMC in C takes a surprisingly long time - this might be threaded lin alg behaving badly on small matrices
}
### DT's model
mu <- c(1,2,3)
corr <- matrix(c(1,.8,0.3,.8,1,0,0.3,0,1), nrow=3)
varr <- c(1,2,3)
Sig <- diag(sqrt(varr))
Q <- Sig %*% corr %*% Sig
P <- solve(Q)
code <- nimbleCode({
# x[1:3] ~ dmnorm(mu[1:3], cov = Q[1:3,1:3])
x[1:3] ~ dmnorm(mu[1:3], prec = P[1:3,1:3])
})
data = list(P = P, mu = mu)
test_mcmc(model = code, name = 'second block sampler on multivariate node', data = data, seed = 0, numItsC = 100000,
results = list(mean = list(x = mu),
var = list(x = varr)),
resultsTolerance = list(mean = list(x = rep(.1,3)),
var = list(x = c(.1,.1,.1))),
samplers = list(
list(type = 'RW_block', target = 'x[1:3]')))
### MVN conjugate update
set.seed(0)
mu0 = 1:3
Q0 = matrix(c(1, .2, .8, .2, 2, 1, .8, 1, 2), nrow = 3)
Q = solve(matrix(c(3, 1.7, .9, 1.7, 2, .6, .9, .6, 1), nrow = 3))
a = c(-2, .5, 1)
B = matrix(rnorm(9), 3)
##### not currently working - see Perry's email of ~ 10/6/14
## code <- nimbleCode({
## mu[1:3] ~ dmnorm(mu0[1:3], Q0[1:3, 1:3])
## y[1:3] ~ dmnorm(asCol(a[1:3]) + B[1:3, 1:3] %*% asCol(mu[1:3]), Q[1:3, 1:3])
## })
code <- nimbleCode({
mu[1:3] ~ dmnorm(mu0[1:3], Q0[1:3, 1:3])
y_mean[1:3] <- asCol(a[1:3]) + B[1:3, 1:3] %*% asCol(mu[1:3])
y[1:3] ~ dmnorm(y_mean[1:3], Q[1:3, 1:3])
})
## Simplest version of model w/o 'a' and 'B'
## a = rep(0,3)
## B = diag(rep(1,3))
## code <- nimbleCode({
## mu[1:3] ~ dmnorm(mu0[1:3], Q0[1:3, 1:3])
## y[1:3] ~ dmnorm(mu[1:3], Q[1:3, 1:3])
## })
mu <- mu0 + chol(solve(Q0)) %*% rnorm(3)
# make sure y is a vec not a 1-col matrix or get a dimensionality error
y <- c(a + B%*%mu + chol(solve(Q)) %*% rnorm(3))
data = list(mu0 = mu0, Q0 = Q0, Q = Q, a = a, B = B, y = y)
muQtrue = t(B) %*% Q%*%B + Q0
muMeanTrue = c(solve(muQtrue, crossprod(B, Q%*%(y-a)) + Q0%*%mu0))
test_mcmc(model = code, name = 'two-level multivariate normal', data = data, seed = 0, numItsC = 10000,
results = list(mean = list(mu = muMeanTrue),
cov = list(mu = solve(muQtrue))),
resultsTolerance = list(mean = list(mu = rep(.02,3)),
cov = list(mu = matrix(.01, 3, 3))))
### scalar RW updates in place of conjugate mv update
test_mcmc(model = code, name = 'two-level multivariate normal with scalar updaters', data = data, seed = 0, numItsC = 100000,
results = list(mean = list(mu = muMeanTrue),
cov = list(mu = solve(muQtrue))),
resultsTolerance = list(mean = list(mu = rep(.03,3)),
cov = list(mu = matrix(.03, 3, 3))),
samplers = list(list(type = 'RW', target = 'mu[1]'),
list(type = 'RW', target = 'mu[2]'),
list(type = 'RW', target = 'mu[3]')),
removeAllDefaultSamplers = TRUE)
## another example of MVN conjugate sampler, for test-mcmc.R
## using both cov and prec parametrizaions of MVN,
## and various linear links
set.seed(0)
prior_mean <- rep(0,5)
tmp <- array(rnorm(25), c(5,5))
tmp <- tmp + t(tmp) + 5*diag(5)
prior_cov <- tmp
a <- array(rnorm(20), c(4,5))
B <- array(NA, c(4,5,5))
for(i in c(2,4)) B[i,,] <- array(rnorm(25), c(5,5))
B[1,,] <- diag(5)
B[3,,] <- diag(5)
M_y <- array(NA, c(4,5,5))
for(i in 1:4) {
tmp <- array(rnorm(25,i), c(5,5))
tmp <- tmp + t(tmp) + 5*i*diag(5)
M_y[i,,] <- tmp
}
x <- rep(0, 5)
y <- array(rnorm(20), c(4,5))
code <- nimbleCode({
x[1:5] ~ dmnorm(mean = prior_mean[1:5], cov = prior_cov[1:5,1:5])
for(i in 1:4)
mu_y[i,1:5] <- asCol(a[i,1:5]) + B[i,1:5,1:5] %*% asCol(x[1:5])
y[1,1:5] ~ dmnorm(mu_y[1,1:5], prec = M_y[1,1:5,1:5])
y[2,1:5] ~ dmnorm(mu_y[2,1:5], cov = M_y[2,1:5,1:5])
y[3,1:5] ~ dmnorm(mu_y[3,1:5], prec = M_y[3,1:5,1:5])
y[4,1:5] ~ dmnorm(mu_y[4,1:5], cov = M_y[4,1:5,1:5])
})
constants <- list(prior_mean=prior_mean, prior_cov=prior_cov, a=a, B=B, M_y=M_y)
data <- list(y=y)
inits <- list(x=x)
Rmodel <- nimbleModel(code, constants, data, inits)
spec <- configureMCMC(Rmodel)
##spec$getSamplers()
Rmcmc <- buildMCMC(spec)
Cmodel <- compileNimble(Rmodel)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
set.seed(0)
Rmcmc$run(10)
Rsamples <- as.matrix(Rmcmc$mvSamples)
set.seed(0)
Cmcmc$run(10)
Csamples <- as.matrix(Cmcmc$mvSamples)
test_that(
'expected R sample',
expect_equal(round(as.numeric(Rsamples), 8),
##cat('c(', paste0(as.numeric(round(Rsamples,8)), collapse=', '), ')\n')
c(0.97473128, 0.50438666, 1.1251132, 0.83830666, 0.74077066, 0.92935482, 0.83758372, 0.98708273, 1.24199937, 0.67348127, -0.54387714, -0.60713969, -0.51392796, -0.3176801, -0.34416529, -0.08530564, -0.47160157, -0.21996584, -0.20504917, -0.77287122, 0.78462584, 0.46103509, 0.43862813, 0.49343096, 0.61020864, 0.55088287, 0.53887202, 0.49863894, 0.62691318, 0.80142839, 0.34941152, 0.06623608, 0.05624477, 0.21369178, 0.26585415, -0.1439989, -0.03133488, 0.3544062, -0.03518959, 0.27415746, 0.40977, 0.8351078, 0.25719293, 0.05663917, 0.30894028, 0.33113315, 0.47647909, 0.26143962, 0.07180759, 0.27255767)
))
dif <- as.numeric(Rsamples - Csamples)
test_that('R and C equiv', expect_lt(max(abs(dif)), 1E-15))
y_prec <- array(NA, c(4,5,5))
y_prec[1,,] <- M_y[1,,]
y_prec[2,,] <- solve(M_y[2,,])
y_prec[3,,] <- M_y[3,,]
y_prec[4,,] <- solve(M_y[4,,])
contribution_mean <- array(NA, c(4,5))
for(i in 1:4) contribution_mean[i,] <- t(B[i,,]) %*% y_prec[i,,] %*% (y[i,] - a[i,])
contribution_prec <- array(NA, c(4,5,5))
for(i in 1:4) contribution_prec[i,,] <- t(B[i,,]) %*% y_prec[i,,] %*% B[i,,]
prior_prec <- solve(prior_cov)
post_prec <- prior_prec + apply(contribution_prec, c(2,3), sum)
post_cov <- solve(post_prec)
post_mean <- (post_cov %*% (prior_prec %*% prior_mean + apply(contribution_mean, 2, sum)))[,1]
Cmcmc$run(100000)
Csamples <- as.matrix(Cmcmc$mvSamples)
dif_mean <- as.numeric(apply(Csamples, 2, mean)) - post_mean
test_that('posterior mean', expect_lt(max(abs(dif_mean)), 0.001))
dif_cov <- as.numeric(cov(Csamples) - post_cov)
test_that('posterior cov', expect_lt(max(abs(dif_cov)), 0.001))
### test of conjugate Wishart
set.seed(0)
trueCor <- matrix(c(1, .3, .7, .3, 1, -0.2, .7, -0.2, 1), 3)
covs <- c(3, 2, .5)
trueCov = diag(sqrt(covs)) %*% trueCor %*% diag(sqrt(covs))
Omega = solve(trueCov)
n = 20
R = diag(rep(1,3))
mu = 1:3
Y = mu + t(chol(trueCov)) %*% matrix(rnorm(3*n), ncol = n)
M = 3
data <- list(Y = t(Y), n = n, M = M, mu = mu, R = R)
code <- nimbleCode( {
for(i in 1:n) {
Y[i, 1:M] ~ dmnorm(mu[1:M], Omega[1:M,1:M]);
}
Omega[1:M,1:M] ~ dwish(R[1:M,1:M], 4);
})
newDf = 4 + n
newR = R + tcrossprod(Y- mu)
OmegaTrueMean = newDf * solve(newR)
wishRV <- array(0, c(M, M, 10000))
for(i in 1:10000) {
z <- t(chol(solve(newR))) %*% matrix(rnorm(3*newDf), ncol = newDf)
wishRV[ , , i] <- tcrossprod(z)
}
OmegaSimTrueSDs = apply(wishRV, c(1,2), sd)
test_mcmc(model = code, name = 'conjugate Wishart', data = data, seed = 0, numItsC = 1000, inits = list(Omega = OmegaTrueMean),
results = list(mean = list(Omega = OmegaTrueMean ),
sd = list(Omega = OmegaSimTrueSDs)),
resultsTolerance = list(mean = list(Omega = matrix(.05, M,M)),
sd = list(Omega = matrix(0.06, M, M))))
# issue with Chol in R MCMC - probably same issue as in jaw-linear
## testing conjugate MVN updating with ragged dependencies;
## that is, dmnorm dependents of different lengths from the target node
code <- nimbleCode({
x[1:3] ~ dmnorm(mu0[1:3], prec = ident[1:3,1:3])
mu_y2[1:2] <- asCol(a[1:2]) + B[1:2,1:3] %*% asCol(x[1:3])
mu_y3[1:3] <- asCol(a[1:3]) + B[1:3,1:3] %*% asCol(x[1:3])
mu_y5[1:5] <- asCol(a[1:5]) + B[1:5,1:3] %*% asCol(x[1:3])
y2[1:2] ~ dmnorm(mu_y2[1:2], prec = prec_y[1:2,1:2])
y3[1:3] ~ dmnorm(mu_y3[1:3], prec = prec_y[1:3,1:3])
y5[1:5] ~ dmnorm(mu_y5[1:5], prec = prec_y[1:5,1:5])
})
mu0 <- rep(0,3)
ident <- diag(3)
a <- 11:15
B <- matrix(1:15, nrow=5, ncol=3, byrow=TRUE)
prec_y <- diag(1:5)
constants <- list(mu0=mu0, ident=ident, a=a, B=B, prec_y=prec_y)
data <- list(y2=1:2, y3=1:3, y5=1:5)
inits <- list(x=rep(0,3))
Rmodel <- nimbleModel(code, constants, data, inits)
spec <- configureMCMC(Rmodel)
Rmcmc <- buildMCMC(spec)
Cmodel <- compileNimble(Rmodel)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
set.seed(0)
Rmcmc$run(10)
set.seed(0)
Cmcmc$run(10)
Rsamples <- as.matrix(Rmcmc$mvSamples)
Csamples <- as.matrix(Cmcmc$mvSamples)
test_that('correct samples for ragged dmnorm conjugate update', expect_true(all(abs(as.numeric(Rsamples[,]) - c(4.96686874, 3.94112676, 4.55975130, 4.01930176, 4.47744412, 4.12927167, 4.91242131, 4.62837537, 4.54227859, 4.97237602, -1.12524733, 1.24545265, -0.13454814, 0.82755276, 0.08252775, 0.71187071, -0.31322184, -0.57462284, -0.64800963, -0.52885823, -3.92276916, -5.23904995, -4.53535941, -4.89919931, -4.66995650, -4.94181562, -4.63558011, -4.16385294, -4.03469945, -4.51128205)) < 1E-8)))
dif <- Rsamples - Csamples
test_that('R and C samples same for ragged dmnorm conjugate update', expect_true(all(abs(dif) < 2E-13)))
set.seed(0)
Cmcmc$run(200000)
Csamples <- as.matrix(Cmcmc$mvSamples)
obsmean <- apply(Csamples, 2, mean)
obsprec <- inverse(cov(Csamples))
pprec <- ident +
t(B[1:2,1:3]) %*% prec_y[1:2,1:2] %*% B[1:2,1:3] +
t(B[1:3,1:3]) %*% prec_y[1:3,1:3] %*% B[1:3,1:3] +
t(B[1:5,1:3]) %*% prec_y[1:5,1:5] %*% B[1:5,1:3]
pmean <- inverse(pprec) %*% (ident %*% mu0 +
t(B[1:2,1:3]) %*% prec_y[1:2,1:2] %*% (1:2 - a[1:2]) +
t(B[1:3,1:3]) %*% prec_y[1:3,1:3] %*% (1:3 - a[1:3]) +
t(B[1:5,1:3]) %*% prec_y[1:5,1:5] %*% (1:5 - a[1:5]) )
test_that('ragged dmnorm conjugate posterior mean', expect_true(all(abs(pmean - obsmean) / pmean < 0.01)))
test_that('ragged dmnorm conjugate posterior precision', expect_true(all(abs(pprec - obsprec) / pprec < 0.005)))
## testing binary sampler
code <- nimbleCode({
a ~ dbern(0.5)
b ~ dbern(0.6)
c ~ dbern(0.05)
d ~ dbin(prob=0.2, size=1)
e ~ dbinom(prob=0.9, size=1)
f ~ dbern(0.5)
g ~ dbern(0.5)
h ~ dbern(0.5)
for(i in 1:10)
yf[i] ~ dnorm(f, sd = 1)
for(i in 1:10)
yg[i] ~ dnorm(g, sd = 1)
for(i in 1:10)
yh[i] ~ dnorm(h, sd = 1)
})
constants <- list()
data <- list(yf = c(rep(0,2), rep(1,8)), yg = c(rep(0,8), rep(1,2)), yh = c(rep(0,5), rep(1,5)))
inits <- list(a=0, b=0, c=0, d=0, e=0, f=0, g=0, h=0)
Rmodel <- nimbleModel(code, constants, data, inits)
test_that('model$isBinary', expect_true(Rmodel$isBinary('a')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('b')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('c')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('d')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('e')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('f')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('g')))
test_that('model$isBinary', expect_true(Rmodel$isBinary('h')))
spec <- configureMCMC(Rmodel, nodes = NULL)
spec$addSampler('a', 'binary', print=FALSE)
spec$addSampler('b', 'binary', print=FALSE)
spec$addSampler('c', 'binary', print=FALSE)
spec$addSampler('d', 'binary', print=FALSE)
spec$addSampler('e', 'binary', print=FALSE)
spec$addSampler('f', 'binary', print=FALSE)
spec$addSampler('g', 'binary', print=FALSE)
spec$addSampler('h', 'binary', print=FALSE)
##spec$printSamplers()
Rmcmc <- buildMCMC(spec)
Cmodel <- compileNimble(Rmodel)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
set.seed(0)
Cmcmc$run(100000)
samples <- as.matrix(Cmcmc$mvSamples)
means <- apply(samples, 2, mean)
##means
tol <- 0.0025
test_that('binary sampler posterior', expect_lt(abs(means[['a']] - 0.5), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['b']] - 0.6), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['c']] - 0.05), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['d']] - 0.2), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['e']] - 0.9), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['f']] - 0.9525), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['g']] - 0.0475), tol))
test_that('binary sampler posterior', expect_lt(abs(means[['h']] - 0.5), tol))
## testing the binary sampler handles 'out of bounds' ok
code <- nimbleCode({
px ~ dbern(0.5)
py ~ dbern(0.5)
x ~ dnorm(0, sd = px - 0.5)
y ~ dnorm(0, tau = py)
})
constants <- list()
data <- list(x = 0, y = 0)
inits <- list(px = 1, py = 1)
Rmodel <- nimbleModel(code, constants, data, inits)
spec <- configureMCMC(Rmodel)
spec$printSamplers()
Rmcmc <- buildMCMC(spec)
Cmodel <- compileNimble(Rmodel)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
set.seed(0)
Rmcmc$run(100)
Rsamples <- as.matrix(Rmcmc$mvSamples)
test_that('binary sampler out-of-bounds', expect_true(all(as.numeric(Rsamples) == 1)))
set.seed(0)
Cmcmc$run(100)
Csamples <- as.matrix(Cmcmc$mvSamples)
test_that('binary sampler out-of-bounds', expect_true(all(as.numeric(Csamples) == 1)))
## testing the RW_multinomial sampler
codeTest <- nimbleCode ({
X[1:nGroups] ~ dmultinom(size=N, prob=pVecX[1:nGroups])
Y[1:nGroups] ~ dmultinom(size=N, prob=pVecY[1:nGroups])
for (ii in 1:nGroups) {
Z[ii] ~ dbeta(1 + X[ii], 1 + Y[ii])
}
})
set.seed(0)
nGroups <- 5
N <- 1E6
pVecX <- rdirch(1, rep(1, nGroups))
pVecY <- rdirch(1, rep(1, nGroups))
X <- rmultinom(1, N, pVecX)[,1]
Y <- rmultinom(1, N, pVecY)[,1]
Z <- rbeta(nGroups, 1+X, 1+Y)
Xini <- rmultinom(1, N, sample(pVecX))[,1]
Yini <- rmultinom(1, N, sample(pVecY))[,1]
Constants <- list(nGroups=nGroups)
Inits <- list(X=Xini, Y=Yini, pVecX=pVecX, pVecY=pVecY, N=N)
Data <- list(Z=Z)
modelTest <- nimbleModel(codeTest, constants=Constants, inits=Inits, data=Data, check=TRUE)
cModelTest <- compileNimble(modelTest)
mcmcTestConfig <- configureMCMC(cModelTest, print = TRUE)
samplers <- mcmcTestConfig$getSamplers()
test_that('assign RW_multinomial sampler', expect_equal(samplers[[1]]$name, 'RW_multinomial'))
test_that('assign RW_multinomial sampler', expect_equal(samplers[[2]]$name, 'RW_multinomial'))
mcmcTest <- buildMCMC(mcmcTestConfig)
cMcmcTest <- compileNimble(mcmcTest, project=modelTest)
## Optionally resample data
cModelTest$N <- N <- 1E3
(cModelTest$pVecX <- sort(rdirch(1, rep(1, nGroups))))
(cModelTest$pVecY <- sort(rdirch(1, rep(1, nGroups))))
simulate(cModelTest, "X", includeData=TRUE); (X <- cModelTest$X)
simulate(cModelTest, "Y", includeData=TRUE); (Y <- cModelTest$Y)
simulate(cModelTest, "Z", includeData=TRUE); (Z <- cModelTest$Z)
niter <- 1E4
cMcmcTest$run(niter)
samples <- as.matrix(cMcmcTest$mvSamples)
test_that('exact results of RW_multinomial sampler', expect_identical(as.numeric(samples[10000,]), c(8, 25, 31, 115, 821, 25,19, 84, 510, 362)))
##################################
## Trajectory Plots & Histogram ##
##################################
##iColsX <- 1:nGroups
##iColsY <- iColsX + nGroups
##plotHistograms <- N <= 1E4 ## FALSE ## TRUE
##par(mfrow=c(2,1+plotHistograms))
####
##for (ii in 1:2) {
## if (ii == 1) {
## yMaxX <- 0
## yMaxY <- 0
## }
## ##
## plot (samples[,1],ylim=range(samples[,iColsX]), typ="n")
## for (ii in iColsX)
## lines(samples[,ii], col=rainbow(10,alpha=0.75)[ii])
## ##
## if (plotHistograms) {
## hist(samples[,1], col=rainbow(2*nGroups, alpha=0.1)[1], breaks=min(samples):max(samples), prob=TRUE, ylim=c(0,yMaxX))
## for (ii in iColsX) {
## h <- hist(samples[,ii], prob=TRUE,
## col=rainbow(2*nGroups, alpha=0.1)[ii],
## border=rainbow(2*nGroups, alpha=0.1)[ii],
## breaks=min(samples[,iColsX]):max(samples[,iColsX]), add=TRUE)
## yMaxX <- max(yMaxX, h$density)
## }
## }
## ##
## plot (samples[,1],ylim=range(samples[,iColsY]), typ="n")
## for (ii in iColsY)
## lines(samples[,ii], col=rainbow(10,alpha=0.75)[ii])
## ##
## if (plotHistograms) {
## hist(samples[,1+nGroups], col=rainbow(2*nGroups, alpha=0.1)[1], breaks=min(samples[,iColsY]):max(samples[,iColsY]), prob=TRUE, ylim=c(0,yMaxY))
## for (ii in iColsY) {
## h <- hist(samples[,ii], prob=TRUE,
## col=rainbow(2*nGroups, alpha=0.1)[ii],
## border=rainbow(2*nGroups, alpha=0.1)[ii],
## breaks=min(samples[,iColsY]):max(samples[,iColsY]), add=TRUE)
## yMaxY <- max(yMaxY, h$density)
## }
## }
##}
## testing the RW_multinomial sampler on distribution of size 2
code <- nimbleCode({
prob[1] <- p
prob[2] <- 1-p
x[1:2] ~ dmultinom(size = N, prob = prob[1:2])
y ~ dbinom( size = N, prob = p)
})
set.seed(0)
N <- 100
p <- 0.3
x1 <- rbinom(1, size=N, prob=p)
x2 <- N - x1
inits <- list(N = N, p = p, x = c(x1, x2), y = x1)
Rmodel <- nimbleModel(code, constants=list(), data=list(), inits=inits)
Cmodel <- compileNimble(Rmodel)
conf <- configureMCMC(Rmodel)
conf$printSamplers()
conf$removeSamplers()
conf$printSamplers()
conf$addSampler(target = 'x', type = 'RW_multinomial')
conf$addSampler(target = 'y', type = 'slice')
conf$printSamplers()
Rmcmc <- buildMCMC(conf)
Cmcmc <- compileNimble(Rmcmc, project = Rmodel)
Cmcmc$run(100000)
samples <- as.matrix(Cmcmc$mvSamples)
fracs <- apply(samples, 2, mean) / N
test_that('RW_multinomial sampler results within tolerance', expect_true(all(abs(as.numeric(fracs[c(1,3)]) - p) < 0.01)))
|
# Function to create rasters from VGPM .xyz files
# This code was adapted from Luke Miller: http://lukemiller.org/index.php/2011/12/loading-osus-vgpm-ocean-productivity-data-in-r/
#libraries
# set tmp directory
tmpdir='~/big/R_raster_tmp'
dir.create(tmpdir, showWarnings=F)
rasterOptions(tmpdir=tmpdir)
#create empty raster
e <- extent(c(-180,180,-90,90))
r <- raster(e,ncol=2160,nrow=1080)
#The options supplied to vgpm.raster() are as follows:
# file = file name (or substitute file.choose() to pick file interactively)
# w.lon = western longitude limit for region of interest (-180 to +180)
# e.lon = eastern longitude limit for region of interest (-180 to +180)
# n.lat = northern latitude limit for region of interest (+90 to -90)
# s.lat = southern latitude limit for region of interest (+90 to -90)
# log = TRUE - log10 transform productivity data before plotting
# color = specify color set to plot productivity data
# Function returns a matrix of productivity values for the specified region of
# interest with lat/lon listed in the row and column names.
# I (jamie) added to this function to create an output raster rather than matrix of values
w.lon = -180
e.lon = 180
n.lat = 90
s.lat = -90
vgpm.raster = function(file, w.lon, e.lon, n.lat, s.lat, log = TRUE,
color = tim.colors(30)){
#Extract date from file title
fname = basename(file)
print(fname)
dots = gregexpr('\\.',fname) #find locations of . in file name
yrday = substr(fname,dots[[1]][1]+1,dots[[1]][2]-1) #extract yearday combo
yr = substr(yrday,1,4) #extract year
doy = substr(yrday,5,7) #extract day of year
day1 = as.Date(paste(yr,doy,sep = '-'), format = '%Y-%j') #convert to Date
#Read data from input file
x = read.table(file, sep = ' ', skip = 1, na.strings = '-9999')
names(x) = c('lon','lat','values') #Rename input columns
if (nrow(x) == 2332800) { f.size = '1080'
} else if (nrow(x) == 9331200) { f.size = '2160'
} else {
warning('Unknown file type\n', immediate. = TRUE)
}
if (f.size == '1080') {
lons = x$lon[1:2160] #get set of longitude values
lats = x$lat[seq(1,2332800,by = 2160)] #get latitude values
values = matrix(x$values, nrow = 1080, ncol = 2160, byrow = TRUE)
} else if (f.size == '2160') {
lons = x$lon[1:4320] #get set of longitude values
lats = x$lat[seq(1,9331200,by = 4320)] #get latitude values
values = matrix(x$values, nrow = 2160, ncol = 4320, byrow = TRUE)
}
#Insert the lat/lon values as the 'values' matrix dimension names
dimnames(values) = list(Latitude = lats, Longitude = lons)
# Specify the boundaries of your lat/lon of interest. Recall that
# longitude values run from -180E (international date line in the Pacific)
# to +180E, where Greenwich,England is at 0E. Latitude values range from
# +90N (north pole) to -90 (south pole). The first value for longitude must be
# the western-most edge of your region of interest, and the first value for the
# latitude must be the northern-most edge of the region of interest.
lonlim = c(w.lon,e.lon) # c(western edge, eastern edge)
latlim = c(n.lat,s.lat) # c(northern edge, southern edge)
# Create vectors of lat/lon indices
lonindx = 1:length(lons) #make vector of longitude cell indices
latindx = 1:length(lats) #make vector of latitude cell indices
# Pull out 2 vectors that contain the indices of the lat/lon coordinates
# of interest. We search for longitudes that are greater than the 1st value
# in lonlim, and longitudes that are less than the 2nd value in lonlim, and
# then grab the corresponding indices from lonindx to store in goodlons. The
# same is done for the latitudes
goodlons = lonindx[lons >= lonlim[1] & lons <= lonlim[2]]
goodlats = latindx[lats >= latlim[2] & lats <= latlim[1]]
# Extract a subset of the matrix 'values', call it the Region of Interest (ROI)
ROI = values[goodlats[1]:goodlats[length(goodlats)],
goodlons[1]:goodlons[length(goodlons)]]
# Add the latitudes and longitudes to the ROI matrix as dimension names
dimnames(ROI) = list(Latitude = lats[goodlats], Longitude = lons[goodlons])
n.lats = as.numeric(rownames(ROI))
n.lons = as.numeric(colnames(ROI))
# Generate a new set of lats and longs on a regular grid spacing for plot.
if (f.size == '1080') {
lats2 = seq(n.lats[1],(n.lats[length(n.lats)]-0.1666667),by=-0.1666667)
lons2 = seq(n.lons[1],(n.lons[length(n.lons)]+0.1666667),by=0.1666667)
} else if (f.size == '2160') {
lats2 = seq(n.lats[1],(n.lats[length(n.lats)]-0.0833333),by=-0.0833333)
lons2 = seq(n.lons[1],(n.lons[length(n.lons)]+0.0833333),by=0.0833333)
}
if(length(lats2) > length(n.lats)) lats2 = lats2[1:length(n.lats)]
if(length(lons2) > length(n.lons)) lons2 = lons2[1:length(n.lons)]
ROI.plot = t(ROI) # swap longs and lats in 'ROI', so lats are in columns
ROI.plot = ROI.plot[,rev(1:length(lats2))] # reverse latitudes so that
# southern lats are listed first
if (log) {
image.plot(lons2, rev(lats2), log10(ROI.plot), useRaster = TRUE,
col = color,
xlab = 'Longitude', ylab = 'Latitude',
main = paste('Net Primary Production', strftime(day1,'%B %Y')),
legend.lab = expression(paste(log[10],'(mg C /', m^2,'/ day)')),
legend.mar = 4.1)
} else if (!log){
image.plot(lons2, rev(lats2), ROI.plot, useRaster = TRUE,
col = color,
xlab = 'Longitude', ylab = 'Latitude',
main = paste('Net Primary Production', strftime(day1,'%B %Y')),
legend.lab = expression(paste('mg C /', m^2,'/ day')),
legend.mar = 4.3)
}
ROI # return region of interest data to workspace
# From here down, added code to the function to rasterize the matrix
log = log10(ROI)
r = raster(log)
extent(r)<-e
projection(r) <- "+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
plot(r,col=color,
xlab='Longitude',
ylab='Latitude',
main=paste('Net Primary Production',strftime(day1,'%B %Y')),
legend.lab=expression(paste('mg C /', m^2,'/ day')),
legend.mar=4.3)
writeRaster(r,filename=paste0(file.path(dir_M,'git-annex/globalprep/prs_fish/v2018/VGPM_primary_productivity/int/rasterized_rawdata/'),'npp',sep='_',strftime(day1,'%B %Y')),
format='GTiff',overwrite=T)
} # end of vgpm.raster() function
| /globalprep/prs_fish/v2018/R/vgpm_func.R | no_license | OHI-Science/ohiprep_v2018 | R | false | false | 6,502 | r | # Function to create rasters from VGPM .xyz files
# This code was adapted from Luke Miller: http://lukemiller.org/index.php/2011/12/loading-osus-vgpm-ocean-productivity-data-in-r/
#libraries
# set tmp directory
tmpdir='~/big/R_raster_tmp'
dir.create(tmpdir, showWarnings=F)
rasterOptions(tmpdir=tmpdir)
#create empty raster
e <- extent(c(-180,180,-90,90))
r <- raster(e,ncol=2160,nrow=1080)
#The options supplied to vgpm.raster() are as follows:
# file = file name (or substitute file.choose() to pick file interactively)
# w.lon = western longitude limit for region of interest (-180 to +180)
# e.lon = eastern longitude limit for region of interest (-180 to +180)
# n.lat = northern latitude limit for region of interest (+90 to -90)
# s.lat = southern latitude limit for region of interest (+90 to -90)
# log = TRUE - log10 transform productivity data before plotting
# color = specify color set to plot productivity data
# Function returns a matrix of productivity values for the specified region of
# interest with lat/lon listed in the row and column names.
# I (jamie) added to this function to create an output raster rather than matrix of values
w.lon = -180
e.lon = 180
n.lat = 90
s.lat = -90
vgpm.raster = function(file, w.lon, e.lon, n.lat, s.lat, log = TRUE,
color = tim.colors(30)){
#Extract date from file title
fname = basename(file)
print(fname)
dots = gregexpr('\\.',fname) #find locations of . in file name
yrday = substr(fname,dots[[1]][1]+1,dots[[1]][2]-1) #extract yearday combo
yr = substr(yrday,1,4) #extract year
doy = substr(yrday,5,7) #extract day of year
day1 = as.Date(paste(yr,doy,sep = '-'), format = '%Y-%j') #convert to Date
#Read data from input file
x = read.table(file, sep = ' ', skip = 1, na.strings = '-9999')
names(x) = c('lon','lat','values') #Rename input columns
if (nrow(x) == 2332800) { f.size = '1080'
} else if (nrow(x) == 9331200) { f.size = '2160'
} else {
warning('Unknown file type\n', immediate. = TRUE)
}
if (f.size == '1080') {
lons = x$lon[1:2160] #get set of longitude values
lats = x$lat[seq(1,2332800,by = 2160)] #get latitude values
values = matrix(x$values, nrow = 1080, ncol = 2160, byrow = TRUE)
} else if (f.size == '2160') {
lons = x$lon[1:4320] #get set of longitude values
lats = x$lat[seq(1,9331200,by = 4320)] #get latitude values
values = matrix(x$values, nrow = 2160, ncol = 4320, byrow = TRUE)
}
#Insert the lat/lon values as the 'values' matrix dimension names
dimnames(values) = list(Latitude = lats, Longitude = lons)
# Specify the boundaries of your lat/lon of interest. Recall that
# longitude values run from -180E (international date line in the Pacific)
# to +180E, where Greenwich,England is at 0E. Latitude values range from
# +90N (north pole) to -90 (south pole). The first value for longitude must be
# the western-most edge of your region of interest, and the first value for the
# latitude must be the northern-most edge of the region of interest.
lonlim = c(w.lon,e.lon) # c(western edge, eastern edge)
latlim = c(n.lat,s.lat) # c(northern edge, southern edge)
# Create vectors of lat/lon indices
lonindx = 1:length(lons) #make vector of longitude cell indices
latindx = 1:length(lats) #make vector of latitude cell indices
# Pull out 2 vectors that contain the indices of the lat/lon coordinates
# of interest. We search for longitudes that are greater than the 1st value
# in lonlim, and longitudes that are less than the 2nd value in lonlim, and
# then grab the corresponding indices from lonindx to store in goodlons. The
# same is done for the latitudes
goodlons = lonindx[lons >= lonlim[1] & lons <= lonlim[2]]
goodlats = latindx[lats >= latlim[2] & lats <= latlim[1]]
# Extract a subset of the matrix 'values', call it the Region of Interest (ROI)
ROI = values[goodlats[1]:goodlats[length(goodlats)],
goodlons[1]:goodlons[length(goodlons)]]
# Add the latitudes and longitudes to the ROI matrix as dimension names
dimnames(ROI) = list(Latitude = lats[goodlats], Longitude = lons[goodlons])
n.lats = as.numeric(rownames(ROI))
n.lons = as.numeric(colnames(ROI))
# Generate a new set of lats and longs on a regular grid spacing for plot.
if (f.size == '1080') {
lats2 = seq(n.lats[1],(n.lats[length(n.lats)]-0.1666667),by=-0.1666667)
lons2 = seq(n.lons[1],(n.lons[length(n.lons)]+0.1666667),by=0.1666667)
} else if (f.size == '2160') {
lats2 = seq(n.lats[1],(n.lats[length(n.lats)]-0.0833333),by=-0.0833333)
lons2 = seq(n.lons[1],(n.lons[length(n.lons)]+0.0833333),by=0.0833333)
}
if(length(lats2) > length(n.lats)) lats2 = lats2[1:length(n.lats)]
if(length(lons2) > length(n.lons)) lons2 = lons2[1:length(n.lons)]
ROI.plot = t(ROI) # swap longs and lats in 'ROI', so lats are in columns
ROI.plot = ROI.plot[,rev(1:length(lats2))] # reverse latitudes so that
# southern lats are listed first
if (log) {
image.plot(lons2, rev(lats2), log10(ROI.plot), useRaster = TRUE,
col = color,
xlab = 'Longitude', ylab = 'Latitude',
main = paste('Net Primary Production', strftime(day1,'%B %Y')),
legend.lab = expression(paste(log[10],'(mg C /', m^2,'/ day)')),
legend.mar = 4.1)
} else if (!log){
image.plot(lons2, rev(lats2), ROI.plot, useRaster = TRUE,
col = color,
xlab = 'Longitude', ylab = 'Latitude',
main = paste('Net Primary Production', strftime(day1,'%B %Y')),
legend.lab = expression(paste('mg C /', m^2,'/ day')),
legend.mar = 4.3)
}
ROI # return region of interest data to workspace
# From here down, added code to the function to rasterize the matrix
log = log10(ROI)
r = raster(log)
extent(r)<-e
projection(r) <- "+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
plot(r,col=color,
xlab='Longitude',
ylab='Latitude',
main=paste('Net Primary Production',strftime(day1,'%B %Y')),
legend.lab=expression(paste('mg C /', m^2,'/ day')),
legend.mar=4.3)
writeRaster(r,filename=paste0(file.path(dir_M,'git-annex/globalprep/prs_fish/v2018/VGPM_primary_productivity/int/rasterized_rawdata/'),'npp',sep='_',strftime(day1,'%B %Y')),
format='GTiff',overwrite=T)
} # end of vgpm.raster() function
|
# TODO: Add comment
#
# Author: xzhou
###############################################################################
source("rcalc.R")
#change 1 and 2 according to the 1 as major and 2 as minor
majorize <- function(genoData, verbose = F, ...)
{
mGenoData <- NULL
n <- ncol(genoData)
m <- nrow(genoData)
for(i in seq(1, n, by = 2))
{
snp1 = genoData[,i]
snp2 = genoData[,i+1]
combined = c(snp1, snp2)
c1 = length(combined[combined == 1])
c2 = length(combined[combined == 2])
if(c1 >= c2 )
{
major.A = 1
major.a = 2
}
else
{
major.A = 2
major.a = 1
}
#
# #major.A = combined[which.max(combined)]
# #major.a = combined[which.min(combined)]
# fct <- as.factor(combined)
# smr <- summary(fct[!is.na(fct)])
#
# if(length(smr) == 1)
# {
# major.A = 1
# minor.A = 2
# }
# else if(length(smr) == 2)
# {
# major.A = as.numeric(names(which.max(smr)))
# minor.A = as.numeric(names(which.min(smr)))
# }
# else
# {
# warning("each allele should have 2 types of nucleotide")
# stop()
# }
for (j in 1:m)
{
if(snp1[j] == major.A)
snp1[j] = 1
else
{
snp1[j] = 2
}
if(snp2[j] == major.A)
{
snp2[j] = 1
}
else
{
snp2[j] = 2
}
}
mGenoData <- cbind(mGenoData, snp1, snp2)
}
names(mGenoData) = NULL
mGenoData
}
#calculate real R value using haplotype to see if any imporovements
calculateRealR <- function(genotype, verbose = F)
{
#calculare major
genotype <- majorize(genotype)
m <- nrow(genotype)
n <- ncol(genotype)
nIndividuals <- m
nSnps <- round(n/2)
combinedGenotyp <- matrix(0, 2*m, nSnps)
#rearrange snps
for(j in seq(1, n, 2))
{
left <- genotype[,j]
right <- genotype[,j+1]
colIndex = (j+1)/2
for(i in seq(1, 2*m, 2))
{
rowIndex <- (i+1)/2
combinedGenotyp[i, colIndex] <- left[rowIndex]
combinedGenotyp[i+1, colIndex] <- right[rowIndex]
}
}
#print(combinedGenotyp)
m <- nrow(combinedGenotyp)
n <- ncol(combinedGenotyp)
r <- matrix(-2, n, n)
diag(r) <- 0
p00 <- r
for(i in seq(1, n-1))
{
for(j in (i+1):n)
{
c00 <- c01 <- c10 <- c11 <- 0
snp1 = combinedGenotyp[,i]
snp2 = combinedGenotyp[,j]
len <- length(snp1)
#modify the code for
for(k in seq(1, len))
{
if(snp1[k] == 1 && snp2[k] == 1)
c00 = c00 + 1
else if(snp1[k] == 1 && snp2[k] == 2)
c01 = c01 + 1
else if(snp1[k] == 2 && snp2[k] == 1)
c10 = c10 + 1
else if(snp1[k] == 2 && snp2[k] == 2)
c11 = c11 + 1
else
{
warning("error reading genotype")
stop()
}
}
pAB = c00/(c00+c01+c10+c11)
p00[i,j] <- p00[j,i] <- pAB
c0x <- c00 + c01
c1x <- c10 + c11
cx0 <- c00 + c10
cx1 <- c01 + c11
D = c00*c11 - c01*c10 + 0.0
L = c0x*c1x*cx0*cx1
if(L == 0)
{
r[j,i] <- r[i,j] <- 0
}
else
{
r[j,i] <- r[i,j] <- D/sqrt(L)
#cat(i,j, r[i,j], "\n")
}
}
}
if(verbose == T)
{
print(r)
}
diag(r) <- NA
diag(p00) <- NA
retL <- list("r" = r, "p00"=p00)
retL
}
diffR <- function(estR, realR)
{
fileName <- "/home/xzhou/research_linux/gnome/workspace/GenotypeLearnning/data/sim_4000seq/80SNP_CEU_sim_4000seq.rValue"
realR <- read.table(filename)
}
#locate
findGenotypeBlocks <- function(rValue, avgThreshold = 0.3, minBlockSize = 1)
{
threshold = 0.6*avgThreshold
rsValue <- rValue*rValue
#print(rsValue)
m <- nrow(rsValue)
n <- ncol(rsValue)
GenoBlocks <- matrix(0, 0, 4)
i = 1
while(i < m)
{
#cat(i, "\n")
if(rsValue[i,i+1] >= threshold)
{
x = i #row index
y = i + 1 #col index
#search horizontally
while(rsValue[x,y] >= threshold)
{
y = y + 1
if(y >n)
{
break
}
}
yLim = y - 1
xLim = y - 2
#calculate the
total = 0.0
count = 0
for(x1 in x:xLim)
{
for(y1 in (x1+1):yLim)
{
count = count + 1
total = total + rsValue[x1, y1]
}
}
if((total/count) >= avgThreshold)
{
GenoBlocks = rbind(GenoBlocks, c(i, i+1, xLim, yLim))
}
i = xLim + 1
cat("i = ", i, "xLim = ", xLim, "yLim = ", yLim, "\n")
}
else
{
i = i + 1
}
}
#print(GenoBlocks)
GenoBlocks
}
#it is difficult to compare the distance between to genotype since the
#individual is not aligned, we use a greedy algorithm to determine the distance
maxWeightSimilarity <- function(g1, g2, ...)
{
m1 = nrow(g1)
n1 = ncol(g1)
m2 = nrow(g2)
n2 = ncol(g2)
if(m1 != m2 || n1 != n2)
{
warning("inconsistent matrix")
stop()
}
similarityMatrix = matrix(0.0, m1, m2)
#calculate the distance of each row pairwise
for(i in 1:m1)
{
for(j in 1:m2)
{
r1 = g1[i,]
r2 = g2[j,]
diff = r1 - r2
sim = length(which(diff == 0))
similarityMatrix[i,j] = sim
}
}
m = similarityMatrix
#greedy algorithm
totalSim = 0.0
for(i in 1:m1)
{
x = which.max(m)
colIndex = ceiling(x/m1)
rowIndex = x - (m1*(colIndex-1))
#cat("<", colIndex, rowIndex, "> max = ", m[x], "\n")
totalSim = m[x] + totalSim
m[,colIndex] = -1
m[rowIndex, ] = -1
}
totalSim
}
checkSimlarity <- function()
{
crossCompare <- matrix(0.0, 10, 10)
popList <- list()
for(i in 1:9)
{
fileName = paste("finalPop", i, sep = "")
load(fileName)
y = x
for(j in (i+1):10)
{
fileName = paste("finalPop", j, sep = "")
load(fileName)
z = x
sim = maxWeightSimilarity(y, z)
crossCompare[i,j] = sim
crossCompare[j,i] = sim
cat("<", i, j, "> = ", sim, "\n")
}
}
print(crossCompare)
}
convert <- function()
{
for(i in 1:10)
{
fileName = paste("finalPop", i, sep="")
load(fileName)
write.table(x, file = paste(fileName, ".txt", sep=""), col.names=FALSE, row.names=FALSE)
}
}
formateGenotype <- function(genoData)
{
formatedGenotype <- NULL
n <- ncol(genoData)
#DEBUG
#cat("genoData", nrow(genoData), ncol(genoData), "\n")
for(i in seq(1, n, by = 2))
{
formatedGenotype = cbind(formatedGenotype, cbind(paste(genoData[,i], genoData[,i+1], sep="/")))
}
#debug
#print(formatedGenotype[,77])
n <- ncol(formatedGenotype)
m <- nrow(formatedGenotype)
#cat("formated", m,n)
plotGenoData <- data.frame(formatedGenotype)
#DEBUG
#cat("plotGenoData", nrow(plotGenoData), ncol(plotGenoData), "\n")
for(i in seq(1, n))
{
plotGenoData[,i] <- genotype(formatedGenotype[,i])
}
plotGenoData
}
#calculate the R values
calculateRValues <- function(genoData)
{
formatedGenotype <- NULL
n <- ncol(genoData)
#DEBUG
#cat("genoData", nrow(genoData), ncol(genoData), "\n")
for(i in seq(1, n, by = 2))
{
formatedGenotype = cbind(formatedGenotype, cbind(paste(genoData[,i], genoData[,i+1], sep="/")))
}
#debug
#print(formatedGenotype[,77])
n <- ncol(formatedGenotype)
m <- nrow(formatedGenotype)
#cat("formated", m,n)
plotGenoData <- data.frame(formatedGenotype)
#DEBUG
#cat("plotGenoData", nrow(plotGenoData), ncol(plotGenoData), "\n")
for(i in seq(1, n))
{
plotGenoData[,i] <- genotype(formatedGenotype[,i])
}
#print(plotGenoData[,77])
retValue <- calcAllR(plotGenoData)
#DEBUG
#stop("Debug check @calculateRValue")
rValue <- retValue$r
}
#calculate single allele frequency
calculateSingleAlleleFrequence <- function(genotype, ...)
{
m <- nrow(genotype)
n <- ncol(genotype)
if(n%%2 != 0)
{
warning("genotype should have even number of alleles")
stop()
}
nSnps <- round(n/2)
#first row = major counts
#second row = minor counts
singleAlleleFrequency = matrix(-1, nrow = 2, ncol = nSnps)
for(i in 1:nSnps)
{
snp <- c(genotype[,2*i-1], genotype[,2*i])
fct <- as.factor(snp)
smr <- summary(fct[!is.na(fct)])
if(length(smr) == 1)
{
majorCount <- max(smr)
minorCount <- 0
}
else if(length(smr) == 2)
{
majorCount <- max(smr)
minorCount <- min(smr)
}
else
{
warning("each allele should have only ")
stop()
}
singleAlleleFrequency[1, i] <- majorCount
singleAlleleFrequency[2, i] <- minorCount
}
singleAlleleFrequency
}
singRecoverate <- function(sampleRValues, targetRValues, verbose = F)
{
m = nrow(sampleRValues)
n = ncol(targetRValues)
totalSigns = 0.0
correctSigns = 0.0
for(i in 1:(m-1))
{
for(j in (i+1):n)
{
totalSigns = totalSigns + 1
if(targetRValues[i,j] != 0 && sampleRValues[i,j] != 0)
{
if(targetRValues[i,j] * sampleRValues[i,j] > 0)
{
correctSigns = correctSigns + 1
}
}
else if(targetRValues[i,j] == 0 && sampleRValues[i,j] == 0)
{
correctSigns = correctSigns + 1
}
}
}
signRecoverRate <- correctSigns/totalSigns
}
#' Evaluate a sample
#'
#' @param sample the sample genotype with sample.RValues, sample.singleAlleleFreq
#' @param target the target
evaluate <- function(sample = NULL, target = NULL, rWeight = 0.7, freqWeight = 0.3, verbose = F)
{
sampleRValues <- sample$RValues
sampleSingleAlleleFreq <- sample$singleAlleleFreq
targetRValues <- target$RValues
targetSingleAlleleFreq <- target$singleAlleleFreq
if(verbose)
{
cat("sample r values \n")
print(sampleRValues)
cat("real r values \n")
print(targetRValues)
}
totalSigns = 0.0
correctSigns = 0.0
m = nrow(sampleRValues)
n = ncol(sampleRValues)
if( m != n)
{
warning("different matrix")
stop()
}
#print(sampleRValues)
#print(targetRValues)
#upper tri angle
for(i in 1:(m-1))
{
for(j in (i+1):n)
{
totalSigns = totalSigns + 1
if(targetRValues[i,j] != 0 && sampleRValues[i,j] != 0)
{
if(targetRValues[i,j] * sampleRValues[i,j] > 0)
{
correctSigns = correctSigns + 1
}
}
else if(targetRValues[i,j] == 0 && sampleRValues[i,j] == 0)
{
correctSigns = correctSigns + 1
}
}
}
signRecoverRate <- correctSigns/totalSigns
#calc R diff
rdiff = sum(abs(targetRValues*targetRValues - sampleRValues*sampleRValues))
normalizedRDiff <- rdiff/sum(targetRValues*targetRValues)
freqDiff = sum(abs(sampleSingleAlleleFreq - targetSingleAlleleFreq))
normalizedFreqDiff = freqDiff*1.0/sum(targetSingleAlleleFreq)
quality = rWeight*normalizedRDiff + freqWeight * normalizedFreqDiff
ret <- list("quality" = quality, "normalizedRdiff"=normalizedRDiff, "normalizedFeqDiff" = normalizedFreqDiff, "recoverRate" = signRecoverRate)
}
#comfirm the result
checkFinalPop <- function()
{
load("rValue") #load var.targetRValue
load("targetGenoData") #load var.targetGenoData
for(i in 1:10)
{
fileName <- paste("finalPop", i, sep = "")
load(fileName) #load x
sampleR <- calculateRValues(x)
result <- evaluate(sampleR, var.targetRValue)
cat(i, "totalDiff = ", result$diff, "signRecoverRate", result$recoverRate, "\n")
}
}
# read the file from ped file and convert it to standard genotype matrix
readGenotypeFromFastaFile <- function(fileName = "../data/sim_4000seq/80SNP_CEU_sim_4000seq.12encode", nIndividuals = -1, nSnps = -1)
{
genoData <- read.table(fileName,
header = FALSE)
#DEBUG
#print(genoData[1:2,])
m <- nrow(genoData)
n <- ncol(genoData)
#DEBUG
#cat(class(genoData), "nRow = ", m, "nCol = ", n, "\n")
if(nIndividuals == -1)
nIndividuals = m
if(nSnps == -1)
nSnps = n/2
if(m < nIndividuals || n < 2*nSnps)
{
cat("not enough snps")
stop()
}
#cut
genoData <- genoData[1:nIndividuals, 1:(2*nSnps)]
#as.character(genoData)
genoData
}
#generate a genotype matrix of nIndividuals X nSnps
generateRandomSample <- function(nIndividual, nSnps)
{
x = rbinom(nIndividual*2*nSnps, 1, 0.5) + 1
sampleGenoData <- matrix(x, nrow = nIndividual, ncol = 2*nSnps)
as.data.frame(sampleGenoData)
}
#mutate a single point
singlePointMutate <- function(genoData, ...)
{
m <- nrow(genoData)
n <- ncol(genoData)
i <- round(runif(1, 1, m))
j <- round(runif(1, 1, n))
if(genoData[i,j] == 1)
genoData[i,j] = 2
else if(genoData[i,j] == 2)
genoData[i,j] = 1
genoData
}
sortMatrixByRow <- function(aGenotype, ...)
{
m = nrow(aGenotype)
n = ncol(aGenotype)
more <- function(a, b)
{
len = length(a)
for(i in 1:len)
{
if(a[i] > b[i])
{
return(TRUE)
}
else if(a[i] < b[i])
{
return(FALSE)
}
}
#the same
return(FALSE)
}
#bubble sort
swapped = FALSE
for(i in 1:(m-1))
{
for (j in 1:(m-i))
{
#print(aGenotype[j,])
#print(aGenotype[j+1, ])
#cat("j = ", j, "\n")
if(more(aGenotype[j,], aGenotype[j+1,]))
{
temp = aGenotype[j, ]
aGenotype[j,] = aGenotype[j+1, ]
aGenotype[j+1, ] = temp
swapped = TRUE
}
}
if(!swapped)
break
}
aGenotype
}
similarity <- function(targetGenotype, sampleGenotype)
{
#majorize both of the genotype
targetGenotype <- majorize(targetGenotype)
sampleGenotype <- majorize(sampleGenotype)
m <- mt <- nrow(targetGenotype)
n <- nt <- ncol(sampleGenotype)
ms <- nrow(sampleGenotype)
ns <- ncol(sampleGenotype)
if( mt != ms || ns != ns)
{
warning("incompatible geno type")
stop()
}
#sort the genotype to compare
targetGenotype = sortMatrixByRow(targetGenotype)
sampleGenotype = sortMatrixByRow(sampleGenotype)
totalElement = m*n
correct = 0.0
for(i in 1:m)
{
for(j in 1:n)
{
if(targetGenotype[i,j] == sampleGenotype[i,j])
{
correct = correct + 1
}
}
}
#return the rate
correct/totalElement
}
#TODO change code to accomodate the new evluate function
#simulated annealing algorithm
sa <- function(var, saConf, ...)
{
finalPopList = list()
for(ti in 1:saConf.totalIt)
{
#sink(file="sa.log")
cat("start simulated annealing algorithm\n")
T <- saConf.initT #the init T
t <- 1 #iteration counter
i <- 1 #the iteration
x <- generateRandomSample(var.nIndividuals, var.nSnps)
cat(ncol(x), nrow(x), "\n")
#currentRValues <- calculateRValues(x)
currentQuality <- evaluate(currentRValues, var.targetRValue)
while(T >= saConf.Tmin && currentQuality$diff > 0)
{
for(i in 1:saConf.k)
{
newx <- singlePointMutate(x)
newRValues <- calculateRValues(newx)
newQuality <- evaluate(newRValues, var.targetRValue)
if(newQuality$diff < currentQuality$diff)
{
x <- newx
currentRValue <- newRValues
currentQuality <- newQuality
cat(t, "\t-\t", "diff = ", currentQuality$diff, "\tsignRecoverRate = ", currentQuality$recoverRate, "\n")
save(x, file = "currentPop0diff")
if(currentQuality$diff == 0)
break
}
else
{
delta <- newQuality$diff - currentQuality$diff
p <- exp(-delta/T)
randomV <- runif(1, 0, 1)
if(randomV < p)
{
x <- newx
currentRValue <- newRValues
currentQuality <- newQuality
cat(t, "\t+\t", "diff = ", currentQuality$diff, "\tsignRecoverRate = ", currentQuality$recoverRate, "\t", p, "\n")
save(x, file = "currentPop")
}
else
{
cat(t, "\tX\t Rej\n")
}
}
t = t + 1
}
T <- saConf.beta*T #cool downc
cat("cool down", T, "\n")
if(currentQuality$diff < saConf.minDiff)
break
}
fileName = paste("finalPop", ti, sep="")
#finalPopList[i] = x
save(x, file = fileName)
}
save(finalPopList, file = "finalPopList")
}
#stacastic algorithm
stocasticHillClim <- function(var,...)
{
cat("start stocastic hill climbing with max_it = ", var.max_it, "T = ", var.T, "\n")
x <- generateRandomSample(var.nIndividuals, var.nSnps)
currentRValues <- calculateRValues(x) #get the R values
currentQuality <- evaluate(currentRValues, var.targetRValue)
#print(currentQuality)
t <- 0
while(t < var.max_it && currentQuality$diff != 0)
{
t <- t + 1
newx <- singlePointMutate(x)
newRValues <- calculateRValues(newx)
newQuality <- evaluate(newRValues, var.targetRValue)
diff <- newQuality$diff - currentQuality$diff
p <- 1/(1+exp(diff/var.T))
aRandomNumber = runif(1, 0, 1)
if(aRandomNumber < p){
x <- newx
currentQuality <- newQuality
currentRValues <- newRValues
cat(t, "RDiff=", currentQuality$diff,"\t signRecoverate", currentQuality$recoverRate, "\n")
}
else
{
cat(t, "\n")
#cat(t, "p =", p)
}
}
}
hillClimb <- function(var, targetRValues, max_it, nIndividuals, nSnps)
{
x <- generateRandomSample(nIndividuals, nSnps)
#print(x[1,])
currentRValues = calculateRValues(x)
#print(currentRValues)
currentQuality <- evaluate(currentRValues, targetRValues)
cat("0 RDiff =", currentQuality$diff, "\t signRecoverate =", currentQuality$recoverRate,"\n")
t <- 0
while (t < max_it && currentQuality$diff != 0 && currentQuality$recoverRate != 1)
{
t <- t + 1
newx <- singlePointMutate(x)
newRValues <- calculateRValues(newx)
newQuality <- evaluate(newRValues, targetRValues)
if (newQuality$diff < currentQuality$diff)
{
x <- newx
currentQuality <- newQuality
currentRValues <- newRValues
sim <- similarity(var.targetGenoData, newx)
cat(t, " RDiff=", currentQuality$diff,"\t signRecoverate", currentQuality$recoverRate, "\t similarity=", sim, "\n")
#print(x)
}
else
{
cat(t, "\n")
}
}
}
shcMain <- function(targetGenotypeFileName = "../data/sim_4000seq/80SNP_CEU_sim_4000seq.12encode", max_it = 10000000, nIndividuals = 100, nSnps = 10)
{
#-------------------------VARIABLES--------------------------------
#var is the global configuration variable
var.targetGenoData <- NULL
var.max_it <- NULL
var.nIndividuals <- NULL
var.nSnps <- NULL
var.currentGenoMatrxi <- NULL
#-------------------------START FROM HERE--------------------------
#init variables
var.max_it <- max_it
var.nIndividuals <- nIndividuals
var.nSnps <- nSnps
var.T <- 0.1 #for statistic hill climbing
saConf.initT <- 0.01
saConf.Tmin <- 1e-7 #minial temperature
saConf.beta <- 0.8 #exponetial decreasing temperature
saConf.k <- 100 #number of iterations for each level of temperature
saConf.totalIt = 10 #repeat sa algorithm for times to see if you have
#multiple local optimal value
saConf.minDiff = 0.001
cat("reading genodata from fasta file ...")
var.targetGenoData <- readGenotypeFromFastaFile(nIndividuals = var.nIndividuals, nSnps = var.nSnps)
save(var.targetGenoData, file = "targetGenoData")
print(var.targetGenoData)
cat("complete \n")
#realRSA(var, saConf)
}
| /rshc/SHC.R | no_license | xzhou/gnome | R | false | false | 18,145 | r | # TODO: Add comment
#
# Author: xzhou
###############################################################################
source("rcalc.R")
#change 1 and 2 according to the 1 as major and 2 as minor
majorize <- function(genoData, verbose = F, ...)
{
mGenoData <- NULL
n <- ncol(genoData)
m <- nrow(genoData)
for(i in seq(1, n, by = 2))
{
snp1 = genoData[,i]
snp2 = genoData[,i+1]
combined = c(snp1, snp2)
c1 = length(combined[combined == 1])
c2 = length(combined[combined == 2])
if(c1 >= c2 )
{
major.A = 1
major.a = 2
}
else
{
major.A = 2
major.a = 1
}
#
# #major.A = combined[which.max(combined)]
# #major.a = combined[which.min(combined)]
# fct <- as.factor(combined)
# smr <- summary(fct[!is.na(fct)])
#
# if(length(smr) == 1)
# {
# major.A = 1
# minor.A = 2
# }
# else if(length(smr) == 2)
# {
# major.A = as.numeric(names(which.max(smr)))
# minor.A = as.numeric(names(which.min(smr)))
# }
# else
# {
# warning("each allele should have 2 types of nucleotide")
# stop()
# }
for (j in 1:m)
{
if(snp1[j] == major.A)
snp1[j] = 1
else
{
snp1[j] = 2
}
if(snp2[j] == major.A)
{
snp2[j] = 1
}
else
{
snp2[j] = 2
}
}
mGenoData <- cbind(mGenoData, snp1, snp2)
}
names(mGenoData) = NULL
mGenoData
}
#calculate real R value using haplotype to see if any imporovements
calculateRealR <- function(genotype, verbose = F)
{
#calculare major
genotype <- majorize(genotype)
m <- nrow(genotype)
n <- ncol(genotype)
nIndividuals <- m
nSnps <- round(n/2)
combinedGenotyp <- matrix(0, 2*m, nSnps)
#rearrange snps
for(j in seq(1, n, 2))
{
left <- genotype[,j]
right <- genotype[,j+1]
colIndex = (j+1)/2
for(i in seq(1, 2*m, 2))
{
rowIndex <- (i+1)/2
combinedGenotyp[i, colIndex] <- left[rowIndex]
combinedGenotyp[i+1, colIndex] <- right[rowIndex]
}
}
#print(combinedGenotyp)
m <- nrow(combinedGenotyp)
n <- ncol(combinedGenotyp)
r <- matrix(-2, n, n)
diag(r) <- 0
p00 <- r
for(i in seq(1, n-1))
{
for(j in (i+1):n)
{
c00 <- c01 <- c10 <- c11 <- 0
snp1 = combinedGenotyp[,i]
snp2 = combinedGenotyp[,j]
len <- length(snp1)
#modify the code for
for(k in seq(1, len))
{
if(snp1[k] == 1 && snp2[k] == 1)
c00 = c00 + 1
else if(snp1[k] == 1 && snp2[k] == 2)
c01 = c01 + 1
else if(snp1[k] == 2 && snp2[k] == 1)
c10 = c10 + 1
else if(snp1[k] == 2 && snp2[k] == 2)
c11 = c11 + 1
else
{
warning("error reading genotype")
stop()
}
}
pAB = c00/(c00+c01+c10+c11)
p00[i,j] <- p00[j,i] <- pAB
c0x <- c00 + c01
c1x <- c10 + c11
cx0 <- c00 + c10
cx1 <- c01 + c11
D = c00*c11 - c01*c10 + 0.0
L = c0x*c1x*cx0*cx1
if(L == 0)
{
r[j,i] <- r[i,j] <- 0
}
else
{
r[j,i] <- r[i,j] <- D/sqrt(L)
#cat(i,j, r[i,j], "\n")
}
}
}
if(verbose == T)
{
print(r)
}
diag(r) <- NA
diag(p00) <- NA
retL <- list("r" = r, "p00"=p00)
retL
}
diffR <- function(estR, realR)
{
fileName <- "/home/xzhou/research_linux/gnome/workspace/GenotypeLearnning/data/sim_4000seq/80SNP_CEU_sim_4000seq.rValue"
realR <- read.table(filename)
}
#locate
findGenotypeBlocks <- function(rValue, avgThreshold = 0.3, minBlockSize = 1)
{
threshold = 0.6*avgThreshold
rsValue <- rValue*rValue
#print(rsValue)
m <- nrow(rsValue)
n <- ncol(rsValue)
GenoBlocks <- matrix(0, 0, 4)
i = 1
while(i < m)
{
#cat(i, "\n")
if(rsValue[i,i+1] >= threshold)
{
x = i #row index
y = i + 1 #col index
#search horizontally
while(rsValue[x,y] >= threshold)
{
y = y + 1
if(y >n)
{
break
}
}
yLim = y - 1
xLim = y - 2
#calculate the
total = 0.0
count = 0
for(x1 in x:xLim)
{
for(y1 in (x1+1):yLim)
{
count = count + 1
total = total + rsValue[x1, y1]
}
}
if((total/count) >= avgThreshold)
{
GenoBlocks = rbind(GenoBlocks, c(i, i+1, xLim, yLim))
}
i = xLim + 1
cat("i = ", i, "xLim = ", xLim, "yLim = ", yLim, "\n")
}
else
{
i = i + 1
}
}
#print(GenoBlocks)
GenoBlocks
}
#it is difficult to compare the distance between to genotype since the
#individual is not aligned, we use a greedy algorithm to determine the distance
maxWeightSimilarity <- function(g1, g2, ...)
{
m1 = nrow(g1)
n1 = ncol(g1)
m2 = nrow(g2)
n2 = ncol(g2)
if(m1 != m2 || n1 != n2)
{
warning("inconsistent matrix")
stop()
}
similarityMatrix = matrix(0.0, m1, m2)
#calculate the distance of each row pairwise
for(i in 1:m1)
{
for(j in 1:m2)
{
r1 = g1[i,]
r2 = g2[j,]
diff = r1 - r2
sim = length(which(diff == 0))
similarityMatrix[i,j] = sim
}
}
m = similarityMatrix
#greedy algorithm
totalSim = 0.0
for(i in 1:m1)
{
x = which.max(m)
colIndex = ceiling(x/m1)
rowIndex = x - (m1*(colIndex-1))
#cat("<", colIndex, rowIndex, "> max = ", m[x], "\n")
totalSim = m[x] + totalSim
m[,colIndex] = -1
m[rowIndex, ] = -1
}
totalSim
}
checkSimlarity <- function()
{
crossCompare <- matrix(0.0, 10, 10)
popList <- list()
for(i in 1:9)
{
fileName = paste("finalPop", i, sep = "")
load(fileName)
y = x
for(j in (i+1):10)
{
fileName = paste("finalPop", j, sep = "")
load(fileName)
z = x
sim = maxWeightSimilarity(y, z)
crossCompare[i,j] = sim
crossCompare[j,i] = sim
cat("<", i, j, "> = ", sim, "\n")
}
}
print(crossCompare)
}
convert <- function()
{
for(i in 1:10)
{
fileName = paste("finalPop", i, sep="")
load(fileName)
write.table(x, file = paste(fileName, ".txt", sep=""), col.names=FALSE, row.names=FALSE)
}
}
formateGenotype <- function(genoData)
{
formatedGenotype <- NULL
n <- ncol(genoData)
#DEBUG
#cat("genoData", nrow(genoData), ncol(genoData), "\n")
for(i in seq(1, n, by = 2))
{
formatedGenotype = cbind(formatedGenotype, cbind(paste(genoData[,i], genoData[,i+1], sep="/")))
}
#debug
#print(formatedGenotype[,77])
n <- ncol(formatedGenotype)
m <- nrow(formatedGenotype)
#cat("formated", m,n)
plotGenoData <- data.frame(formatedGenotype)
#DEBUG
#cat("plotGenoData", nrow(plotGenoData), ncol(plotGenoData), "\n")
for(i in seq(1, n))
{
plotGenoData[,i] <- genotype(formatedGenotype[,i])
}
plotGenoData
}
#calculate the R values
calculateRValues <- function(genoData)
{
formatedGenotype <- NULL
n <- ncol(genoData)
#DEBUG
#cat("genoData", nrow(genoData), ncol(genoData), "\n")
for(i in seq(1, n, by = 2))
{
formatedGenotype = cbind(formatedGenotype, cbind(paste(genoData[,i], genoData[,i+1], sep="/")))
}
#debug
#print(formatedGenotype[,77])
n <- ncol(formatedGenotype)
m <- nrow(formatedGenotype)
#cat("formated", m,n)
plotGenoData <- data.frame(formatedGenotype)
#DEBUG
#cat("plotGenoData", nrow(plotGenoData), ncol(plotGenoData), "\n")
for(i in seq(1, n))
{
plotGenoData[,i] <- genotype(formatedGenotype[,i])
}
#print(plotGenoData[,77])
retValue <- calcAllR(plotGenoData)
#DEBUG
#stop("Debug check @calculateRValue")
rValue <- retValue$r
}
#calculate single allele frequency
calculateSingleAlleleFrequence <- function(genotype, ...)
{
m <- nrow(genotype)
n <- ncol(genotype)
if(n%%2 != 0)
{
warning("genotype should have even number of alleles")
stop()
}
nSnps <- round(n/2)
#first row = major counts
#second row = minor counts
singleAlleleFrequency = matrix(-1, nrow = 2, ncol = nSnps)
for(i in 1:nSnps)
{
snp <- c(genotype[,2*i-1], genotype[,2*i])
fct <- as.factor(snp)
smr <- summary(fct[!is.na(fct)])
if(length(smr) == 1)
{
majorCount <- max(smr)
minorCount <- 0
}
else if(length(smr) == 2)
{
majorCount <- max(smr)
minorCount <- min(smr)
}
else
{
warning("each allele should have only ")
stop()
}
singleAlleleFrequency[1, i] <- majorCount
singleAlleleFrequency[2, i] <- minorCount
}
singleAlleleFrequency
}
singRecoverate <- function(sampleRValues, targetRValues, verbose = F)
{
m = nrow(sampleRValues)
n = ncol(targetRValues)
totalSigns = 0.0
correctSigns = 0.0
for(i in 1:(m-1))
{
for(j in (i+1):n)
{
totalSigns = totalSigns + 1
if(targetRValues[i,j] != 0 && sampleRValues[i,j] != 0)
{
if(targetRValues[i,j] * sampleRValues[i,j] > 0)
{
correctSigns = correctSigns + 1
}
}
else if(targetRValues[i,j] == 0 && sampleRValues[i,j] == 0)
{
correctSigns = correctSigns + 1
}
}
}
signRecoverRate <- correctSigns/totalSigns
}
#' Evaluate a sample
#'
#' @param sample the sample genotype with sample.RValues, sample.singleAlleleFreq
#' @param target the target
evaluate <- function(sample = NULL, target = NULL, rWeight = 0.7, freqWeight = 0.3, verbose = F)
{
sampleRValues <- sample$RValues
sampleSingleAlleleFreq <- sample$singleAlleleFreq
targetRValues <- target$RValues
targetSingleAlleleFreq <- target$singleAlleleFreq
if(verbose)
{
cat("sample r values \n")
print(sampleRValues)
cat("real r values \n")
print(targetRValues)
}
totalSigns = 0.0
correctSigns = 0.0
m = nrow(sampleRValues)
n = ncol(sampleRValues)
if( m != n)
{
warning("different matrix")
stop()
}
#print(sampleRValues)
#print(targetRValues)
#upper tri angle
for(i in 1:(m-1))
{
for(j in (i+1):n)
{
totalSigns = totalSigns + 1
if(targetRValues[i,j] != 0 && sampleRValues[i,j] != 0)
{
if(targetRValues[i,j] * sampleRValues[i,j] > 0)
{
correctSigns = correctSigns + 1
}
}
else if(targetRValues[i,j] == 0 && sampleRValues[i,j] == 0)
{
correctSigns = correctSigns + 1
}
}
}
signRecoverRate <- correctSigns/totalSigns
#calc R diff
rdiff = sum(abs(targetRValues*targetRValues - sampleRValues*sampleRValues))
normalizedRDiff <- rdiff/sum(targetRValues*targetRValues)
freqDiff = sum(abs(sampleSingleAlleleFreq - targetSingleAlleleFreq))
normalizedFreqDiff = freqDiff*1.0/sum(targetSingleAlleleFreq)
quality = rWeight*normalizedRDiff + freqWeight * normalizedFreqDiff
ret <- list("quality" = quality, "normalizedRdiff"=normalizedRDiff, "normalizedFeqDiff" = normalizedFreqDiff, "recoverRate" = signRecoverRate)
}
#comfirm the result
checkFinalPop <- function()
{
load("rValue") #load var.targetRValue
load("targetGenoData") #load var.targetGenoData
for(i in 1:10)
{
fileName <- paste("finalPop", i, sep = "")
load(fileName) #load x
sampleR <- calculateRValues(x)
result <- evaluate(sampleR, var.targetRValue)
cat(i, "totalDiff = ", result$diff, "signRecoverRate", result$recoverRate, "\n")
}
}
# read the file from ped file and convert it to standard genotype matrix
readGenotypeFromFastaFile <- function(fileName = "../data/sim_4000seq/80SNP_CEU_sim_4000seq.12encode", nIndividuals = -1, nSnps = -1)
{
genoData <- read.table(fileName,
header = FALSE)
#DEBUG
#print(genoData[1:2,])
m <- nrow(genoData)
n <- ncol(genoData)
#DEBUG
#cat(class(genoData), "nRow = ", m, "nCol = ", n, "\n")
if(nIndividuals == -1)
nIndividuals = m
if(nSnps == -1)
nSnps = n/2
if(m < nIndividuals || n < 2*nSnps)
{
cat("not enough snps")
stop()
}
#cut
genoData <- genoData[1:nIndividuals, 1:(2*nSnps)]
#as.character(genoData)
genoData
}
#generate a genotype matrix of nIndividuals X nSnps
generateRandomSample <- function(nIndividual, nSnps)
{
x = rbinom(nIndividual*2*nSnps, 1, 0.5) + 1
sampleGenoData <- matrix(x, nrow = nIndividual, ncol = 2*nSnps)
as.data.frame(sampleGenoData)
}
#mutate a single point
singlePointMutate <- function(genoData, ...)
{
m <- nrow(genoData)
n <- ncol(genoData)
i <- round(runif(1, 1, m))
j <- round(runif(1, 1, n))
if(genoData[i,j] == 1)
genoData[i,j] = 2
else if(genoData[i,j] == 2)
genoData[i,j] = 1
genoData
}
sortMatrixByRow <- function(aGenotype, ...)
{
m = nrow(aGenotype)
n = ncol(aGenotype)
more <- function(a, b)
{
len = length(a)
for(i in 1:len)
{
if(a[i] > b[i])
{
return(TRUE)
}
else if(a[i] < b[i])
{
return(FALSE)
}
}
#the same
return(FALSE)
}
#bubble sort
swapped = FALSE
for(i in 1:(m-1))
{
for (j in 1:(m-i))
{
#print(aGenotype[j,])
#print(aGenotype[j+1, ])
#cat("j = ", j, "\n")
if(more(aGenotype[j,], aGenotype[j+1,]))
{
temp = aGenotype[j, ]
aGenotype[j,] = aGenotype[j+1, ]
aGenotype[j+1, ] = temp
swapped = TRUE
}
}
if(!swapped)
break
}
aGenotype
}
similarity <- function(targetGenotype, sampleGenotype)
{
#majorize both of the genotype
targetGenotype <- majorize(targetGenotype)
sampleGenotype <- majorize(sampleGenotype)
m <- mt <- nrow(targetGenotype)
n <- nt <- ncol(sampleGenotype)
ms <- nrow(sampleGenotype)
ns <- ncol(sampleGenotype)
if( mt != ms || ns != ns)
{
warning("incompatible geno type")
stop()
}
#sort the genotype to compare
targetGenotype = sortMatrixByRow(targetGenotype)
sampleGenotype = sortMatrixByRow(sampleGenotype)
totalElement = m*n
correct = 0.0
for(i in 1:m)
{
for(j in 1:n)
{
if(targetGenotype[i,j] == sampleGenotype[i,j])
{
correct = correct + 1
}
}
}
#return the rate
correct/totalElement
}
#TODO change code to accomodate the new evluate function
#simulated annealing algorithm
sa <- function(var, saConf, ...)
{
finalPopList = list()
for(ti in 1:saConf.totalIt)
{
#sink(file="sa.log")
cat("start simulated annealing algorithm\n")
T <- saConf.initT #the init T
t <- 1 #iteration counter
i <- 1 #the iteration
x <- generateRandomSample(var.nIndividuals, var.nSnps)
cat(ncol(x), nrow(x), "\n")
#currentRValues <- calculateRValues(x)
currentQuality <- evaluate(currentRValues, var.targetRValue)
while(T >= saConf.Tmin && currentQuality$diff > 0)
{
for(i in 1:saConf.k)
{
newx <- singlePointMutate(x)
newRValues <- calculateRValues(newx)
newQuality <- evaluate(newRValues, var.targetRValue)
if(newQuality$diff < currentQuality$diff)
{
x <- newx
currentRValue <- newRValues
currentQuality <- newQuality
cat(t, "\t-\t", "diff = ", currentQuality$diff, "\tsignRecoverRate = ", currentQuality$recoverRate, "\n")
save(x, file = "currentPop0diff")
if(currentQuality$diff == 0)
break
}
else
{
delta <- newQuality$diff - currentQuality$diff
p <- exp(-delta/T)
randomV <- runif(1, 0, 1)
if(randomV < p)
{
x <- newx
currentRValue <- newRValues
currentQuality <- newQuality
cat(t, "\t+\t", "diff = ", currentQuality$diff, "\tsignRecoverRate = ", currentQuality$recoverRate, "\t", p, "\n")
save(x, file = "currentPop")
}
else
{
cat(t, "\tX\t Rej\n")
}
}
t = t + 1
}
T <- saConf.beta*T #cool downc
cat("cool down", T, "\n")
if(currentQuality$diff < saConf.minDiff)
break
}
fileName = paste("finalPop", ti, sep="")
#finalPopList[i] = x
save(x, file = fileName)
}
save(finalPopList, file = "finalPopList")
}
#stacastic algorithm
stocasticHillClim <- function(var,...)
{
cat("start stocastic hill climbing with max_it = ", var.max_it, "T = ", var.T, "\n")
x <- generateRandomSample(var.nIndividuals, var.nSnps)
currentRValues <- calculateRValues(x) #get the R values
currentQuality <- evaluate(currentRValues, var.targetRValue)
#print(currentQuality)
t <- 0
while(t < var.max_it && currentQuality$diff != 0)
{
t <- t + 1
newx <- singlePointMutate(x)
newRValues <- calculateRValues(newx)
newQuality <- evaluate(newRValues, var.targetRValue)
diff <- newQuality$diff - currentQuality$diff
p <- 1/(1+exp(diff/var.T))
aRandomNumber = runif(1, 0, 1)
if(aRandomNumber < p){
x <- newx
currentQuality <- newQuality
currentRValues <- newRValues
cat(t, "RDiff=", currentQuality$diff,"\t signRecoverate", currentQuality$recoverRate, "\n")
}
else
{
cat(t, "\n")
#cat(t, "p =", p)
}
}
}
hillClimb <- function(var, targetRValues, max_it, nIndividuals, nSnps)
{
x <- generateRandomSample(nIndividuals, nSnps)
#print(x[1,])
currentRValues = calculateRValues(x)
#print(currentRValues)
currentQuality <- evaluate(currentRValues, targetRValues)
cat("0 RDiff =", currentQuality$diff, "\t signRecoverate =", currentQuality$recoverRate,"\n")
t <- 0
while (t < max_it && currentQuality$diff != 0 && currentQuality$recoverRate != 1)
{
t <- t + 1
newx <- singlePointMutate(x)
newRValues <- calculateRValues(newx)
newQuality <- evaluate(newRValues, targetRValues)
if (newQuality$diff < currentQuality$diff)
{
x <- newx
currentQuality <- newQuality
currentRValues <- newRValues
sim <- similarity(var.targetGenoData, newx)
cat(t, " RDiff=", currentQuality$diff,"\t signRecoverate", currentQuality$recoverRate, "\t similarity=", sim, "\n")
#print(x)
}
else
{
cat(t, "\n")
}
}
}
shcMain <- function(targetGenotypeFileName = "../data/sim_4000seq/80SNP_CEU_sim_4000seq.12encode", max_it = 10000000, nIndividuals = 100, nSnps = 10)
{
#-------------------------VARIABLES--------------------------------
#var is the global configuration variable
var.targetGenoData <- NULL
var.max_it <- NULL
var.nIndividuals <- NULL
var.nSnps <- NULL
var.currentGenoMatrxi <- NULL
#-------------------------START FROM HERE--------------------------
#init variables
var.max_it <- max_it
var.nIndividuals <- nIndividuals
var.nSnps <- nSnps
var.T <- 0.1 #for statistic hill climbing
saConf.initT <- 0.01
saConf.Tmin <- 1e-7 #minial temperature
saConf.beta <- 0.8 #exponetial decreasing temperature
saConf.k <- 100 #number of iterations for each level of temperature
saConf.totalIt = 10 #repeat sa algorithm for times to see if you have
#multiple local optimal value
saConf.minDiff = 0.001
cat("reading genodata from fasta file ...")
var.targetGenoData <- readGenotypeFromFastaFile(nIndividuals = var.nIndividuals, nSnps = var.nSnps)
save(var.targetGenoData, file = "targetGenoData")
print(var.targetGenoData)
cat("complete \n")
#realRSA(var, saConf)
}
|
\name{baranja}
\docType{data}
\encoding{latin1}
\alias{barxyz}
\alias{bargrid}
\alias{barstr}
\title{Baranja hill case study}
\description{Baranja hill is a 4 by 4 km large study area in the Baranja region, eastern Croatia (corresponds to a size of an aerial photograph). This data set has been extensively used to describe various DEM modelling and analysis steps (see \href{https://geomorphometry.org/geomorphometry-concepts-software-applications/}{Hengl and Reuter, 2008}; Hengl et al., 2010; \doi{10.5194/hess-14-1153-2010}). Object \code{barxyz} contains 6370 precise observations of elevations (from field survey and digitized from the stereo images); \code{bargrid} contains \emph{observed} probabilities of streams (digitized from the 1:5000 topo map); \code{barstr} contains 100 simulated stream networks (\code{"SpatialLines"}) using \code{barxyz} point data as input (see examples below).}
\usage{data(bargrid)}
\format{
The \code{bargrid} data frame (regular grid at 30 m intervals) contains the following columns:
\describe{
\item{\code{p.obs}}{observed probability of stream (0-1)}
\item{\code{x}}{a numeric vector; x-coordinate (m) in the MGI / Balkans zone 6 }
\item{\code{y}}{a numeric vector; y-coordinate (m) in the MGI / Balkans zone 6 }
}
}
\author{ Tomislav Hengl }
\references{
\itemize{
\item Hengl, T., Reuter, H.I. (eds), (2008) \href{https://geomorphometry.org/geomorphometry-concepts-software-applications/}{Geomorphometry: Concepts, Software, Applications}. Developments in Soil Science, vol. 33, Elsevier, 772 p.
\item Hengl, T., Heuvelink, G. B. M., van Loon, E. E., (2010) On the uncertainty of stream networks derived from elevation data: the error propagation approach. Hydrology and Earth System Sciences, 14:1153-1165. \doi{10.5194/hess-14-1153-2010}
\item \url{https://geomorphometry.org/baranja-hill/}
}
}
\note{Consider using the 30 m resolution grid (see \code{bargrid}) as the target resolution (output maps).
}
\examples{
library(sp)
library(gstat)
## sampled elevations:
data(barxyz)
prj = "+proj=tmerc +lat_0=0 +lon_0=18 +k=0.9999 +x_0=6500000 +y_0=0 +ellps=bessel +units=m
+towgs84=550.499,164.116,475.142,5.80967,2.07902,-11.62386,0.99999445824"
coordinates(barxyz) <- ~x+y
proj4string(barxyz) <- CRS(prj)
## grids:
data(bargrid)
data(barstr)
coordinates(bargrid) <- ~x+y
gridded(bargrid) <- TRUE
proj4string(bargrid) <- barxyz@proj4string
bargrid@grid
\dontrun{## Example with simulated streams:
data(R_pal)
library(rgdal)
library(RSAGA)
pnt = list("sp.points", barxyz, col="black", pch="+")
spplot(bargrid[1], sp.layout=pnt,
col.regions = R_pal[["blue_grey_red"]])
## Deriving stream networks using geostatistical simulations:
Z.ovgm <- vgm(psill=1831, model="Mat", range=1051, nugget=0, kappa=1.2)
sel <- runif(length(barxyz$Z))<.2
N.sim <- 5
## geostatistical simulations:
DEM.sim <- krige(Z~1, barxyz[sel,], bargrid, model=Z.ovgm, nmax=20,
nsim=N.sim, debug.level=-1)
## Note: this operation can be time consuming
stream.list <- list(rep(NA, N.sim))
## derive stream networks in SAGA GIS:
for (i in 1:N.sim) {
writeGDAL(DEM.sim[i], paste("DEM", i, ".sdat", sep=""),
drivername = "SAGA", mvFlag = -99999)
## filter the spurious sinks:
rsaga.fill.sinks(in.dem=paste("DEM", i, ".sgrd", sep=""),
out.dem="DEMflt.sgrd", check.module.exists = FALSE)
## extract the channel network SAGA GIS:
rsaga.geoprocessor(lib="ta_channels", module=0,
param=list(ELEVATION="DEMflt.sgrd",
CHNLNTWRK=paste("channels", i, ".sgrd", sep=""),
CHNLROUTE="channel_route.sgrd",
SHAPES="channels.shp",
INIT_GRID="DEMflt.sgrd",
DIV_CELLS=3, MINLEN=40),
check.module.exists = FALSE,
show.output.on.console=FALSE)
stream.list[[i]] <- readOGR("channels.shp", "channels",
verbose=FALSE)
proj4string(stream.list[[i]]) <- barxyz@proj4string
}
# plot all derived streams at top of each other:
streams.plot <- as.list(rep(NA, N.sim))
for(i in 1:N.sim){
streams.plot[[i]] <- list("sp.lines", stream.list[[i]])
}
spplot(DEM.sim[1], col.regions=grey(seq(0.4,1,0.025)), scales=list(draw=T),
sp.layout=streams.plot)
}
}
\keyword{datasets}
| /man/baranja.Rd | no_license | cran/plotKML | R | false | false | 4,226 | rd | \name{baranja}
\docType{data}
\encoding{latin1}
\alias{barxyz}
\alias{bargrid}
\alias{barstr}
\title{Baranja hill case study}
\description{Baranja hill is a 4 by 4 km large study area in the Baranja region, eastern Croatia (corresponds to a size of an aerial photograph). This data set has been extensively used to describe various DEM modelling and analysis steps (see \href{https://geomorphometry.org/geomorphometry-concepts-software-applications/}{Hengl and Reuter, 2008}; Hengl et al., 2010; \doi{10.5194/hess-14-1153-2010}). Object \code{barxyz} contains 6370 precise observations of elevations (from field survey and digitized from the stereo images); \code{bargrid} contains \emph{observed} probabilities of streams (digitized from the 1:5000 topo map); \code{barstr} contains 100 simulated stream networks (\code{"SpatialLines"}) using \code{barxyz} point data as input (see examples below).}
\usage{data(bargrid)}
\format{
The \code{bargrid} data frame (regular grid at 30 m intervals) contains the following columns:
\describe{
\item{\code{p.obs}}{observed probability of stream (0-1)}
\item{\code{x}}{a numeric vector; x-coordinate (m) in the MGI / Balkans zone 6 }
\item{\code{y}}{a numeric vector; y-coordinate (m) in the MGI / Balkans zone 6 }
}
}
\author{ Tomislav Hengl }
\references{
\itemize{
\item Hengl, T., Reuter, H.I. (eds), (2008) \href{https://geomorphometry.org/geomorphometry-concepts-software-applications/}{Geomorphometry: Concepts, Software, Applications}. Developments in Soil Science, vol. 33, Elsevier, 772 p.
\item Hengl, T., Heuvelink, G. B. M., van Loon, E. E., (2010) On the uncertainty of stream networks derived from elevation data: the error propagation approach. Hydrology and Earth System Sciences, 14:1153-1165. \doi{10.5194/hess-14-1153-2010}
\item \url{https://geomorphometry.org/baranja-hill/}
}
}
\note{Consider using the 30 m resolution grid (see \code{bargrid}) as the target resolution (output maps).
}
\examples{
library(sp)
library(gstat)
## sampled elevations:
data(barxyz)
prj = "+proj=tmerc +lat_0=0 +lon_0=18 +k=0.9999 +x_0=6500000 +y_0=0 +ellps=bessel +units=m
+towgs84=550.499,164.116,475.142,5.80967,2.07902,-11.62386,0.99999445824"
coordinates(barxyz) <- ~x+y
proj4string(barxyz) <- CRS(prj)
## grids:
data(bargrid)
data(barstr)
coordinates(bargrid) <- ~x+y
gridded(bargrid) <- TRUE
proj4string(bargrid) <- barxyz@proj4string
bargrid@grid
\dontrun{## Example with simulated streams:
data(R_pal)
library(rgdal)
library(RSAGA)
pnt = list("sp.points", barxyz, col="black", pch="+")
spplot(bargrid[1], sp.layout=pnt,
col.regions = R_pal[["blue_grey_red"]])
## Deriving stream networks using geostatistical simulations:
Z.ovgm <- vgm(psill=1831, model="Mat", range=1051, nugget=0, kappa=1.2)
sel <- runif(length(barxyz$Z))<.2
N.sim <- 5
## geostatistical simulations:
DEM.sim <- krige(Z~1, barxyz[sel,], bargrid, model=Z.ovgm, nmax=20,
nsim=N.sim, debug.level=-1)
## Note: this operation can be time consuming
stream.list <- list(rep(NA, N.sim))
## derive stream networks in SAGA GIS:
for (i in 1:N.sim) {
writeGDAL(DEM.sim[i], paste("DEM", i, ".sdat", sep=""),
drivername = "SAGA", mvFlag = -99999)
## filter the spurious sinks:
rsaga.fill.sinks(in.dem=paste("DEM", i, ".sgrd", sep=""),
out.dem="DEMflt.sgrd", check.module.exists = FALSE)
## extract the channel network SAGA GIS:
rsaga.geoprocessor(lib="ta_channels", module=0,
param=list(ELEVATION="DEMflt.sgrd",
CHNLNTWRK=paste("channels", i, ".sgrd", sep=""),
CHNLROUTE="channel_route.sgrd",
SHAPES="channels.shp",
INIT_GRID="DEMflt.sgrd",
DIV_CELLS=3, MINLEN=40),
check.module.exists = FALSE,
show.output.on.console=FALSE)
stream.list[[i]] <- readOGR("channels.shp", "channels",
verbose=FALSE)
proj4string(stream.list[[i]]) <- barxyz@proj4string
}
# plot all derived streams at top of each other:
streams.plot <- as.list(rep(NA, N.sim))
for(i in 1:N.sim){
streams.plot[[i]] <- list("sp.lines", stream.list[[i]])
}
spplot(DEM.sim[1], col.regions=grey(seq(0.4,1,0.025)), scales=list(draw=T),
sp.layout=streams.plot)
}
}
\keyword{datasets}
|
\alias{gtkNotebookGetGroupId}
\name{gtkNotebookGetGroupId}
\title{gtkNotebookGetGroupId}
\description{
Gets the current group identificator for \code{notebook}.
\strong{WARNING: \code{gtk_notebook_get_group_id} has been deprecated since version 2.12 and should not be used in newly-written code. use \code{\link{gtkNotebookGetGroup}} instead.}
}
\usage{gtkNotebookGetGroupId(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkNotebook}}}}
\details{Since 2.10}
\value{[integer] the group identificator, or -1 if none is set.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gtkNotebookGetGroupId.Rd | no_license | lawremi/RGtk2 | R | false | false | 603 | rd | \alias{gtkNotebookGetGroupId}
\name{gtkNotebookGetGroupId}
\title{gtkNotebookGetGroupId}
\description{
Gets the current group identificator for \code{notebook}.
\strong{WARNING: \code{gtk_notebook_get_group_id} has been deprecated since version 2.12 and should not be used in newly-written code. use \code{\link{gtkNotebookGetGroup}} instead.}
}
\usage{gtkNotebookGetGroupId(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkNotebook}}}}
\details{Since 2.10}
\value{[integer] the group identificator, or -1 if none is set.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
##Plotting generics
#' @exportMethod lines
setGeneric("lines")
#' @exportMethod points
setGeneric("points")
#' The cumulative distribution function (cdf)
#'
#' This is a generic function for calculating
#' the cumulative distribution function (cdf) of
#' distribution objects. This is similar to base R's \code{pnorm}
#' for the normal distribution.
#' The \code{dist_cdf} function calculates the
#' cumulative probability distribution for the
#' current parameters and xmin value.
#'
#' @param m a distribution object.
#' @param all_values logical; \code{FALSE} (default). If \code{TRUE},
#' then the cdf is evaluated at points xmin, xmin+1, ..., xmax.
#' @param q a vector values where the function will be evaluated.
#' If \code{q} is \code{NULL} (default), then the data values
#' will be used.
#' @param lower_tail logical; if \code{TRUE} (default),
#' probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.
#' @docType methods
#' @exportMethod dist_cdf
#' @note This method does *not* alter the internal state of
#' the distribubtion objects.
#' @rdname dist_cdf-methods
#' @export
#' @examples
#' ##########################################
#' #Load data and create distribution object#
#' ##########################################
#' data(moby_sample)
#' m = displ$new(moby_sample)
#' m$setXmin(7); m$setPars(2)
#'
#' ##########################################
#' #Calculate the CDF at a particular values#
#' ##########################################
#' dist_cdf(m, 10:15)
#'
#' ##########################################
#' #Calculate the CDF at the data values #
#' ##########################################
#' dist_cdf(m)
setGeneric("dist_cdf",
function(m, q=NULL, lower_tail=FALSE, ...)
standardGeneric("dist_cdf"))
#' The data cumulative distribution function
#'
#' This is generic function for distribution objects.
#' This function calculates the data cdf.
#'
#' @param m a reference class distribution object.
#' @param lower_tail logical;
#' if \code{TRUE} (default), probabilities are \eqn{P[X \le x]},
#' otherwise, \eqn{P[X > x]}.
#' @param all_values logical, if \code{FALSE} (default), evaluate
#' at the data values. If \code{TRUE},
#' then the cdf is evaluated at points xmin, xmin+1, ..., xmax.
#' @docType methods
#' @exportMethod dist_data_cdf
#' @note This method does *not* alter the internal state of
#' the distribubtion objects.
#' @rdname dist_data_cdf-methods
#' @export
#' @examples
#' ##########################################
#' #Load data and create distribution object#
#' ##########################################
#' data(moby_sample)
#' m = displ$new(moby_sample)
#' m$setXmin(7);m$setPars(2)
#'
#' ##########################################
#' # The data cdf #
#' ##########################################
#' dist_data_cdf(m)
setGeneric("dist_data_cdf",
function(m, lower_tail=TRUE, all_values=FALSE)
standardGeneric("dist_data_cdf"))
#' The probability density function (pdf)
#'
#' This is generic function for distribution objects.
#' This function calculates the probability density function (pdf)
#' for the current parameters and xmin value.
#'
#' @param m The distribution reference object.
#' @param q a vector values where the function will be evaluated.
#' If \code{q} is \code{NULL} (default), then the data value will be used.
#' @param log default \code{FALSE}. If \code{TRUE}, probabilities are given as log(p).
#' @return The probability density (or mass) function
#'
#' @seealso \code{\link{dist_cdf}}, \code{\link{dist_ll}}
#' and \code{\link{dist_rand}}
#' @note This method does *not* alter the internal state of
#' the distribubtion objects.
#' @export
#' @docType methods
#' @rdname dist_pdf-methods
#' @examples
#' ##########################################
#' #Create distribution object #
#' ##########################################
#' m = displ$new()
#' m$setXmin(7); m$setPars(2)
#'
#' ##########################################
#' #Calculate the pdf at particular values #
#' ##########################################
#' dist_pdf(m, 7:10)
setGeneric("dist_pdf",
function(m, q=NULL, log=FALSE)
standardGeneric("dist_pdf"))
#' The log-likelihood function
#'
#' This is generic function for distribution objects.
#' This function calculates the log-likelihood for the current
#' parameters and xmin value.
#'
#' @param m The distribution reference object.
#' @return The log-likelihood
#'
#' @seealso \code{\link{dist_cdf}}, \code{\link{dist_pdf}}
#' and \code{\link{dist_rand}}
#' @note This method does *not* alter the internal state of
#' the distribution objects.
#' @export
#' @docType methods
#' @rdname dist_ll-methods
#' @examples
#' ##########################################
#' #Load data and create distribution object#
#' ##########################################
#' data(moby_sample)
#' m = displ$new(moby_sample)
#' m$setXmin(7); m$setPars(2)
#'
#' ##########################################
#' #Calculate the log-likelihood #
#' ##########################################
#' dist_ll(m)
setGeneric("dist_ll",
function(m)
standardGeneric("dist_ll"))
#' Random number generation for the distribution objects
#'
#' This is generic function for generating random numbers
#' from the underlying distribution of the distribution reference objects.
#' This function generates \code{n} random numbers using the parameters
#' and xmin values found in the associated reference object.
#'
#' @param m a distribution reference object.
#' @param n number of observations to be generated.
#' @return n random numbers
#'
#' @seealso \code{\link{dist_cdf}}, \code{\link{dist_pdf}}
#' and \code{\link{dist_ll}}
#' @note This method does *not* alter the internal state of
#' the distribubtion object. This method is only available for
#' discrete and continuous power law objects.
#' @export
#' @docType methods
#' @rdname dist_rand-methods
#' @examples
#' ##########################################
#' #Create distribution object #
#' ##########################################
#' m = displ$new()
#' m$setXmin(7);m$setPars(2)
#'
#' ##########################################
#' #Generate five random numbers #
#' ##########################################
#' dist_rand(m, 5)
setGeneric("dist_rand",
function(m, n)
standardGeneric("dist_rand"))
| /pkg/R/AllGenerics.R | no_license | hitalex/poweRlaw | R | false | false | 6,473 | r | ##Plotting generics
#' @exportMethod lines
setGeneric("lines")
#' @exportMethod points
setGeneric("points")
#' The cumulative distribution function (cdf)
#'
#' This is a generic function for calculating
#' the cumulative distribution function (cdf) of
#' distribution objects. This is similar to base R's \code{pnorm}
#' for the normal distribution.
#' The \code{dist_cdf} function calculates the
#' cumulative probability distribution for the
#' current parameters and xmin value.
#'
#' @param m a distribution object.
#' @param all_values logical; \code{FALSE} (default). If \code{TRUE},
#' then the cdf is evaluated at points xmin, xmin+1, ..., xmax.
#' @param q a vector values where the function will be evaluated.
#' If \code{q} is \code{NULL} (default), then the data values
#' will be used.
#' @param lower_tail logical; if \code{TRUE} (default),
#' probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.
#' @docType methods
#' @exportMethod dist_cdf
#' @note This method does *not* alter the internal state of
#' the distribubtion objects.
#' @rdname dist_cdf-methods
#' @export
#' @examples
#' ##########################################
#' #Load data and create distribution object#
#' ##########################################
#' data(moby_sample)
#' m = displ$new(moby_sample)
#' m$setXmin(7); m$setPars(2)
#'
#' ##########################################
#' #Calculate the CDF at a particular values#
#' ##########################################
#' dist_cdf(m, 10:15)
#'
#' ##########################################
#' #Calculate the CDF at the data values #
#' ##########################################
#' dist_cdf(m)
setGeneric("dist_cdf",
function(m, q=NULL, lower_tail=FALSE, ...)
standardGeneric("dist_cdf"))
#' The data cumulative distribution function
#'
#' This is generic function for distribution objects.
#' This function calculates the data cdf.
#'
#' @param m a reference class distribution object.
#' @param lower_tail logical;
#' if \code{TRUE} (default), probabilities are \eqn{P[X \le x]},
#' otherwise, \eqn{P[X > x]}.
#' @param all_values logical, if \code{FALSE} (default), evaluate
#' at the data values. If \code{TRUE},
#' then the cdf is evaluated at points xmin, xmin+1, ..., xmax.
#' @docType methods
#' @exportMethod dist_data_cdf
#' @note This method does *not* alter the internal state of
#' the distribubtion objects.
#' @rdname dist_data_cdf-methods
#' @export
#' @examples
#' ##########################################
#' #Load data and create distribution object#
#' ##########################################
#' data(moby_sample)
#' m = displ$new(moby_sample)
#' m$setXmin(7);m$setPars(2)
#'
#' ##########################################
#' # The data cdf #
#' ##########################################
#' dist_data_cdf(m)
setGeneric("dist_data_cdf",
function(m, lower_tail=TRUE, all_values=FALSE)
standardGeneric("dist_data_cdf"))
#' The probability density function (pdf)
#'
#' This is generic function for distribution objects.
#' This function calculates the probability density function (pdf)
#' for the current parameters and xmin value.
#'
#' @param m The distribution reference object.
#' @param q a vector values where the function will be evaluated.
#' If \code{q} is \code{NULL} (default), then the data value will be used.
#' @param log default \code{FALSE}. If \code{TRUE}, probabilities are given as log(p).
#' @return The probability density (or mass) function
#'
#' @seealso \code{\link{dist_cdf}}, \code{\link{dist_ll}}
#' and \code{\link{dist_rand}}
#' @note This method does *not* alter the internal state of
#' the distribubtion objects.
#' @export
#' @docType methods
#' @rdname dist_pdf-methods
#' @examples
#' ##########################################
#' #Create distribution object #
#' ##########################################
#' m = displ$new()
#' m$setXmin(7); m$setPars(2)
#'
#' ##########################################
#' #Calculate the pdf at particular values #
#' ##########################################
#' dist_pdf(m, 7:10)
setGeneric("dist_pdf",
function(m, q=NULL, log=FALSE)
standardGeneric("dist_pdf"))
#' The log-likelihood function
#'
#' This is generic function for distribution objects.
#' This function calculates the log-likelihood for the current
#' parameters and xmin value.
#'
#' @param m The distribution reference object.
#' @return The log-likelihood
#'
#' @seealso \code{\link{dist_cdf}}, \code{\link{dist_pdf}}
#' and \code{\link{dist_rand}}
#' @note This method does *not* alter the internal state of
#' the distribution objects.
#' @export
#' @docType methods
#' @rdname dist_ll-methods
#' @examples
#' ##########################################
#' #Load data and create distribution object#
#' ##########################################
#' data(moby_sample)
#' m = displ$new(moby_sample)
#' m$setXmin(7); m$setPars(2)
#'
#' ##########################################
#' #Calculate the log-likelihood #
#' ##########################################
#' dist_ll(m)
setGeneric("dist_ll",
function(m)
standardGeneric("dist_ll"))
#' Random number generation for the distribution objects
#'
#' This is generic function for generating random numbers
#' from the underlying distribution of the distribution reference objects.
#' This function generates \code{n} random numbers using the parameters
#' and xmin values found in the associated reference object.
#'
#' @param m a distribution reference object.
#' @param n number of observations to be generated.
#' @return n random numbers
#'
#' @seealso \code{\link{dist_cdf}}, \code{\link{dist_pdf}}
#' and \code{\link{dist_ll}}
#' @note This method does *not* alter the internal state of
#' the distribubtion object. This method is only available for
#' discrete and continuous power law objects.
#' @export
#' @docType methods
#' @rdname dist_rand-methods
#' @examples
#' ##########################################
#' #Create distribution object #
#' ##########################################
#' m = displ$new()
#' m$setXmin(7);m$setPars(2)
#'
#' ##########################################
#' #Generate five random numbers #
#' ##########################################
#' dist_rand(m, 5)
setGeneric("dist_rand",
function(m, n)
standardGeneric("dist_rand"))
|
#@since 1.8.0
category Windows
require win32/registry
win32/resolv は Win32 プラットフォームで名前解決に関する情報を取得する
ためのライブラリです。
= class Win32::Resolv
名前解決に関する情報を取得するためのクラスです。
== module functions
--- get_hosts_path -> String | nil
hosts ファイルのパスを返します。ファイルが存在しない場合は nil を返します。
예:
require "win32/resolv"
p Win32::Resolv.get_hosts_path #=> "C:\Windows\System32\drivers\etc\hosts"
--- get_resolv_info -> [[String], [String]]
ドメイン名とネームサーバを配列の配列で返します。
예:
require "win32/resolv"
p Win32::Resolv.get_resolv_info #=> [["my.example.com"], ["192.168.1.1"]]
設定されていない情報は nil になります。
# ドメイン名が設定されていない場合。 require "win32/resolv"
p Win32::Resolv.get_resolv_info #=> [nil, ["192.168.1.1"]]
#@end
| /target/rubydoc/refm/api/src/win32/resolv.rd | no_license | nacyot/omegat-rurima-ruby | R | false | false | 991 | rd | #@since 1.8.0
category Windows
require win32/registry
win32/resolv は Win32 プラットフォームで名前解決に関する情報を取得する
ためのライブラリです。
= class Win32::Resolv
名前解決に関する情報を取得するためのクラスです。
== module functions
--- get_hosts_path -> String | nil
hosts ファイルのパスを返します。ファイルが存在しない場合は nil を返します。
예:
require "win32/resolv"
p Win32::Resolv.get_hosts_path #=> "C:\Windows\System32\drivers\etc\hosts"
--- get_resolv_info -> [[String], [String]]
ドメイン名とネームサーバを配列の配列で返します。
예:
require "win32/resolv"
p Win32::Resolv.get_resolv_info #=> [["my.example.com"], ["192.168.1.1"]]
設定されていない情報は nil になります。
# ドメイン名が設定されていない場合。 require "win32/resolv"
p Win32::Resolv.get_resolv_info #=> [nil, ["192.168.1.1"]]
#@end
|
generateLinearRegressionDataFrame <- function(dataframe, columnSizeSentiment = 3, prediction = 0, sentimentColumnName = 'sentiment', renditeColumnName = 'Rendite', renditeColumnNameOutput = 'rendite'){
tmpFrame <- data.frame(matrix(NA, nrow = 1, ncol = columnSizeSentiment+1))
colnames(tmpFrame) <- c(1:columnSizeSentiment, renditeColumnNameOutput)
columnSizeSentiment <- columnSizeSentiment - 1
countRow <- 1
for (i in 1:nrow(dataframe)) {
for(j in 0:columnSizeSentiment) {
tmpFrame[countRow, j+1] <- dataframe[i+j, sentimentColumnName]
}
tmpFrame[countRow, renditeColumnNameOutput] <- dataframe[i + columnSizeSentiment + prediction, renditeColumnName]
countRow <- countRow + 1
}
tmpFrame[complete.cases(tmpFrame), ]
}
#tmp <- generateLinearRegressionDataFrame(rendite,
# columnSizeSentiment = 3,
# prediction = 1,
# sentimentColumnName = 'sentiment',
# renditeColumnName = 'Rendite',
# renditeColumnNameOutput = 'rendite') | /Testing/Sonstiges/functions.R | no_license | comnGuy/Sentimentindex-f-r-systemrelevante-Banken | R | false | false | 1,132 | r |
generateLinearRegressionDataFrame <- function(dataframe, columnSizeSentiment = 3, prediction = 0, sentimentColumnName = 'sentiment', renditeColumnName = 'Rendite', renditeColumnNameOutput = 'rendite'){
tmpFrame <- data.frame(matrix(NA, nrow = 1, ncol = columnSizeSentiment+1))
colnames(tmpFrame) <- c(1:columnSizeSentiment, renditeColumnNameOutput)
columnSizeSentiment <- columnSizeSentiment - 1
countRow <- 1
for (i in 1:nrow(dataframe)) {
for(j in 0:columnSizeSentiment) {
tmpFrame[countRow, j+1] <- dataframe[i+j, sentimentColumnName]
}
tmpFrame[countRow, renditeColumnNameOutput] <- dataframe[i + columnSizeSentiment + prediction, renditeColumnName]
countRow <- countRow + 1
}
tmpFrame[complete.cases(tmpFrame), ]
}
#tmp <- generateLinearRegressionDataFrame(rendite,
# columnSizeSentiment = 3,
# prediction = 1,
# sentimentColumnName = 'sentiment',
# renditeColumnName = 'Rendite',
# renditeColumnNameOutput = 'rendite') |
install.packages("readxl")
library(readxl)
S1<-read_excel("Group26_RData(RESE)_FinalData.xlsx")[2:229, 1:32]
str(S1)
S1[,3:32] <- lapply(S1[,3:32], as.numeric)
S1[, 1:2] <- lapply(S1[,1:2], as.factor)
str(S1)
table(is.na(S1))
install.packages("Hmisc")
library(Hmisc)
install.packages("MASS")
install.packages("ISLR")
install.packages("glmnet")
install.packages("leaps")
install.packages("ElemStatLearn")
library(ElemStatLearn)
library(glmnet)
library(ISLR)
library(MASS)
library(leaps)
install.packages("nlme")
library(mgcv)
install.packages("earth")
library(earth)
install.packages("tree")
library(tree)
install.packages("randomForest")
library(randomForest)
install.packages("ada")
library(ada)
install.packages("gbm")
library(gbm)
#Checking for normality of data
#Response
hist(S1$RCPC,prob=T,xlab='',
main='Histogram of CONSUMPTION(RESE)',col='pink')
lines(density(S1$RCPC,na.rm=T))
rug(S1$RCPC)
#Boxplots for Response Vs Year and Month
boxplot(S1$RCPC~I(S1$YEAR), main = "Annual Consumption(RESE) Distribution",col="yellow", xlab = "Years",ylab = "Consumption(RESE)", varwidth = TRUE)
rug(jitter(S1$RCPC),side=2)
abline(h=mean(S1$RCPC,na.rm=T),lty=2)
plot( tapply(S1$RCPC, S1$MONTH, mean), main = "Monthwise Yearly Average Consumption(RESE)", xlab = "Month" , ylab = "Consumption(RESE)", type = "h", col = "Red")
#SCATTERPLOT+LINES THROUGH THE POINTS
plot(S1$RCPC,xlab='')
abline(h=mean(S1$RCPC,na.rm=T),lty=1)
abline(h=mean(S1$RCPC,na_rm=T)+sd(S1$RCPC,na.rm=T))
plot(S1$PRICE, S1$RCPC, main="Scatter plot",
xlab="PRICE", ylab=" Rsidential Consumption ", pch=19)
# Response Vs Temperature Variables
plot(~S1$RCPC+S1$DT00+S1$DX90+S1$EMNT+S1$EMXT+S1$MNTM+S1$MMXT+S1$MMNT+S1$MDPT)
rcorr(cbind(S1$RCPC,S1$DT00,S1$DX90,S1$EMNT,S1$EMXT,S1$MNTM,S1$MMXT,S1$MMNT,S1$MDPT), type = "pearson")
#Remove MNTM
#Response Vs Precipitation Variables
plot(~S1$RCPC+S1$DP01+S1$DP10+S1$EMXP+S1$MNPM)
rcorr(cbind(S1$RCPC,S1$DP01,S1$DP10,S1$EMXP,S1$MNPM), type = "pearson")
# Keep all
#Response Vs Snow Variables
plot(~S1$RCPC+S1$DSND+S1$DSNF+S1$EMSD+S1$EMSF+S1$TMSF+S1$MSND)
rcorr(cbind(S1$RCPC,S1$DSND,S1$DSNF,S1$EMSD,S1$EMSF,S1$TMSF,S1$MSND), type = "pearson")
#Keep all
#Response Vs Cooling and heating days
plot(~S1$RCPC+S1$CMX65+S1$CMN65+S1$HMN65+S1$HMX65)
rcorr(cbind(S1$RCPC,S1$CMX65,S1$CMN65,S1$HMN65,S1$HMX65), type = "pearson")
#Keep all
#Response Vs Wind variables
plot(~S1$RCPC+S1$VISIB+S1$GUST+S1$WDSP+S1$MXSPD)
rcorr(cbind(S1$RCPC,S1$VISIB,S1$GUST,S1$WDSP,S1$MXSPD), type = "pearson")
#Keep all
#Response Vs Socioeconomic Variables
plot(~S1$RCPC+S1$UNEMPRATE+S1$GDP)
rcorr(cbind(S1$RCPC,S1$UNEMPRATE,S1$GDP), type = "pearson")
#Keep all
Final <- S1[,c(-22)]
Data <- Final[, c(-1, -2)]
View(Data)
#Splitting data into training and tesing
set.seed(123)
train_idx <- sample(x = 1:nrow(Data), size = floor(0.80*nrow(Data)))
train_data <- Data[train_idx,]
test_data <- Data[-train_idx,]
#Null model MSE
null.msetrain<-mean((mean(train_data$RCPC)-train_data$RCPC)^2)
null.rmsetrain<-sqrt(null.msetrain)
null.msetest<-mean((mean(test_data$RCPC)-test_data$RCPC)^2)
null.rmsetest<-sqrt(null.msetest)
#Multiple Linear Regression)
lm.fit <- lm(RCPC ~ ., data=train_data)
summary(lm.fit)
fit2=lm(RCPC ~ PRICE+WDSP+DSND+MXSPD+GDP, data = train_data)
summary(fit2)
fit3=lm(RCPC ~ PRICE+WDSP+DSND+MXSPD+GDP, data = test_data)
summary(fit3)
par(mfrow=c(2,2))
plot(fit2)
par(mfrow=c(2,2))
plot(fit3)
prediction = predict(fit3,newdata=test_data)
prediction
mean(fit2$residuals^2)
sqrt(mean(fit2$residuals^2))
linear.mse= mean((fit3$residuals)^2)
linear.rmse= sqrt(linear.mse)
print(linear.rmse)
#Cross validation for Multiple Linear Regression
install.packages('caret')
library('caret')
set.seed(123)
train.control<-trainControl(method="cv", number=10)
cv_lm<-train(RCPC~., data=train_data, method="lm",
trControl=train.control)
print(cv_lm)
train.control<-trainControl(method="cv", number=10)
cv_lm<-train(RCPC~., data=test_data, method="lm",
trControl=train.control)
print(cv_lm)
#Ridge Regression
set.seed(123)
train.X <- as.matrix(train_data[,-1])
train.Y <- as.matrix(train_data[,1])
test.X <- as.matrix(test_data[,-1])
test.Y <- as.matrix(test_data[,1])
library(glmnet)
set.seed(123)
cv <- cv.glmnet(x=train.X, y=train.Y, family = 'gaussian', alpha = 0)
cv$lambda.min
ridge.fit <- glmnet(x=train.X, y=train.Y, family = 'gaussian', alpha = 0, lambda = cv$lambda.min)
#ridge.fit <- glmnet(x=train.X, y=train.Y, family = 'gaussian', alpha = 0)
ridge.p<- predict(ridge.fit,newx=train.X)
ridge.msetrain<-mean((ridge.p-train.Y)^2)
ridge.rmsetrain<-sqrt(ridge.msetrain)
ridge.pred <- predict(ridge.fit, newx=test.X)
ridge.mse <- mean((ridge.pred - test.Y)^2)
ridge.rmse<-sqrt(ridge.mse)
plot(test.Y, ridge.pred, pch="o", col='black',lty=5, main="LM: Ridge Actual vs Predicted",
xlab = "Actual Sales",
ylab="Predicted Sales")
plot(train.Y, ridge.p, pch="o", col='black',lty=5, main="LM: Ridge Actual vs Predicted",
xlab = "Actual Sales",
ylab="Predicted Sales")
coef(ridge.fit)
#Lasso Regression
set.seed(123)
cv2 <- cv.glmnet(x=train.X, y=train.Y, family='gaussian', alpha = 1)
cv2$lambda.min
lasso.fit <- glmnet(x=train.X, y=train.Y, family='gaussian', alpha = 1, lambda = cv2$lambda.min)
#lasso.fit <- glmnet(x=train.X, y=train.Y, family = 'gaussian', alpha = 1)
lasso.p<-predict(lasso.fit,newx =train.X)
lasso.msetrain<-mean((lasso.p - train.Y)^2)
lasso.rmsetrain<-sqrt(lasso.msetrain)
plot(train.Y, lasso.p, pch="o", col='black',lty=5, main="LM: Lasso Actual vs Predicted",
xlab = "Actual Sales",
ylab="Predicted Sales")
lasso.pred <- predict(lasso.fit, newx=test.X)
lasso.mse <- mean((lasso.pred - test.Y)^2)
lasso.rmse<-sqrt(lasso.mse)
plot(test.Y, lasso.pred, pch="o", col='black',lty=5, main="LM: Lasso Actual vs Predicted",
xlab = "Actual Sales",
ylab="Predicted Sales")
coef(lasso.fit)
#Best Subset
library(MASS)
library(ISLR)
library(glmnet)
library(ElemStatLearn)
library(leaps)
set.seed(123)
regfit.full= regsubsets(RCPC~., data = train_data, method = "exhaustive", nvmax = 21)
plot(summary(regfit.full)$cp)
num_var <- which.min(summary(regfit.full)$cp)
print(num_var)
summary(regfit.full)
feat.cp <- which(summary(regfit.full)$which[num_var,] == TRUE)
subset.mod <- lm(RCPC ~ ., data=train_data[,feat.cp])
subset.predtrain <- predict(subset.mod, newdata=train_data[,feat.cp])
subset.msetrain <- mean((subset.predtrain - train.Y)^2)
subset.rmsetrain<-sqrt(subset.msetrain)
subset.pred <- predict(subset.mod, newdata=test_data[,feat.cp])
subset.mse <- mean((subset.pred - test.Y)^2)
subset.rmse<-sqrt(subset.mse)
#Forward subset
set.seed(123)
regfit.full=regsubsets(RCPC~., data = train_data, method = "forward", nvmax = 21)
plot(summary(regfit.full)$cp)
num_var <- which.min(summary(regfit.full)$cp)
print(num_var)
summary(regfit.full)
feat.cp <- which(summary(regfit.full)$which[num_var,] == TRUE)
forward.mod <- lm( RCPC~ ., data=train_data[,feat.cp])
forward.predtrain <- predict(forward.mod, newdata=train_data[,feat.cp])
forward.msetrain <- mean((forward.predtrain - train.Y)^2)
forward.rmsetrain<-sqrt(forward.msetrain)
forward.pred <- predict(forward.mod, newdata=test_data[,feat.cp])
forward.mse <- mean((forward.pred - test.Y)^2)
forward.rmse<-sqrt(forward.mse)
#Backward subset
set.seed(123)
regfit.full=regsubsets(RCPC~., data = train_data, method = "backward")
plot(summary(regfit.full)$cp)
num_var <- which.min(summary(regfit.full)$cp)
print(num_var)
summary(regfit.full)
feat.cp <- which(summary(regfit.full)$which[num_var,] == TRUE)
backward.mod <- lm(RCPC ~ ., data=train_data[,feat.cp])
backward.predtrain <- predict(backward.mod, newdata=train_data[,feat.cp])
backward.msetrain <- mean((backward.predtrain - train.Y)^2)
backward.rmsetrain<-sqrt(backward.msetrain)
backward.pred <- predict(backward.mod, newdata=test_data[,feat.cp])
backward.mse <- mean((backward.pred - test.Y)^2)
backward.rmse<-sqrt(backward.mse)
#Generalized Additive Model
install.packages("nlme")
library(mgcv)
attach(train_data)
set.seed(123)
gam1 <- gam(RCPC ~ s(PRICE, bs="cr", k=3) + s(CMX65, bs="cr",k=3)
+ s(CMN65, bs="cr",k=3) + s(DP01, bs="cr",k=3)
+ s(DP10, bs="cr",k=3) + s(DSND, bs="cr",k=3) + s(DT00, bs="cr",k=3)
+ s(DX90, bs="cr",k=3)
+ s(EMNT, bs="cr",k=3) + s(EMSD, bs="cr",k=3) + s(HMN65 , bs="cr",k=3)
+ s(VISIB, bs="cr",k=3) + s(HMX65, bs="cr",k=3) + s(MXSPD, bs="cr",k=3)
+ s(MMNT, bs="cr",k=3) + s(GUST, bs="cr",k=3) + s(WDSP, bs="cr",k=3)
+ s(UNEMPRATE, bs="cr",k=3) + s(GDP, bs="cr",k=3) + s(DSNF,bs="cr",k=3) +s(EMSF,bs="cr",k=3)
+ s(EMXP, bs="cr",k=3) + s(EMXT,bs="cr",k=3) +s(MNPM,bs="cr",k=3) +s(TMSF,bs="cr",k=3) + s(MMXT,bs="cr",k=3)
+ s(MDPT, bs="cr",k=3) + s(MSND,bs="cr",k=3), data=train_data)
par(mfrow=c(1,2))
gam.check(gam1)
summary(gam1)
par(mfrow=c(2,4))
plot(gam1, se=TRUE)
gam2 <- update(gam1, .~. -s(PRICE, bs="cr", k=3)
- s(DP01, bs="cr",k=3)
- s(DP10, bs="cr",k=3) - s(DSND, bs="cr",k=3) - s(DT00, bs="cr",k=3)
- s(DX90, bs="cr",k=3)
- s(EMNT, bs="cr",k=3) - s(EMSD, bs="cr",k=3) - s(HMX65, bs="cr",k=3) - s(MXSPD, bs="cr",k=3)
- s(GUST, bs="cr",k=3) - s(WDSP, bs="cr",k=3)
- s(UNEMPRATE, bs="cr",k=3) -s(EMSF,bs="cr",k=3)
- s(EMXT,bs="cr",k=3) -s(MNPM,bs="cr",k=3) -s(TMSF,bs="cr",k=3) - s(MMXT,bs="cr",k=3)
- s(MDPT, bs="cr",k=3) + PRICE+DP01+DP10+DSND+DT00+DX90+EMNT+EMSD+HMX65+MXSPD+GUST+WDSP
+UNEMPRATE+EMSF+EMXT+MNPM+TMSF+MMXT+MDPT, data=train_data)
summary(gam2)
par(mfrow=c(1,1))
plot(gam2)
layout(matrix(c(1:1),1,1,byrow=TRUE))
residuals.gam <- c()
gam.predicttrain <- predict(gam2, newdata = train_data, type="response")
residuals.gam <- (train_data$RCPC-gam.predicttrain)
library(car)
qqPlot(residuals.gam,main = "GAM:Residual Plot")
plot(train_data$RCPC, gam.predicttrain, pch="o", col='black',lty=5, main="GAM(TRAIN): Actual vs Fitted",
xlab = "Actual Consumption",
ylab="Predicted Consumption")
gam.msetrain <-mean((gam.predicttrain - train_data$RCPC)^2)
gam.rmsetrain<-sqrt(gam.msetrain)
layout(matrix(c(1:1),1,1,byrow=TRUE))
residuals.gam3 <- c()
gam.predict <- predict(gam2 , newdata = test_data, type="response")
residuals.gam3<- (test_data$RCPC-gam.predict)
library(car)
qqPlot(residuals.gam3,main = "GAM:Residual Plot")
plot(test_data$RCPC, gam.predict, pch="o", col='black',lty=5, main="GAM(TEST): Actual vs Fitted",
xlab = "Actual Consumption",
ylab="Predicted Consumption")
gam.mse <- mean((gam.predict - test_data$RCPC)^2)
gam.rmse<-sqrt(gam.mse)
#MARS
library(earth)
set.seed(123)
mars1 <- earth(RCPC~., data=train_data, pmethod="cv", nfold=10, ncross=10)
print(mars1)
summary(mars1)
plot(mars1,which=1)
install.packages("dplyr")
library(dplyr)
hyper_grid <- expand.grid(
degree = 1:3,
nprune = seq(2, 100, length.out = 10) %>% floor()
)
head(hyper_grid)
install.packages("caret")
library(caret)
cv_mars1 <- train(
x = subset(train_data, select = -RCPC),
y = train_data$RCPC,
method = "earth",
metric = "RMSE",
trControl = trainControl(method = "cv", number = 10),
tuneGrid = hyper_grid
)
# View results
cv_mars1$bestTune
ggplot(cv_mars1)
mars.predicttrain <- predict(mars1,train_data,type="response")
plot(train_data$RCPC, mars.predicttrain, pch="o", col='black',lty=5, main="MARS(RESE): Actual vs Predicted(Train Data)",
xlab = "Actual",
ylab="Predicted")
mars.msetrain <-mean((mars.predicttrain - train_data$RCPC)^2)
mars.rmsetrain <- sqrt(mars.msetrain)
residuals.mars <- (train_data$RCPC-mars.predicttrain)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.mars, main = "MARS(RESE): Residual Plot(Train Data)")
mars.predict <- predict(mars1,test_data,type="response")
plot(test_data$RCPC, mars.predict, pch="o", col='black',lty=5, main="MARS(RESE): Actual vs Fitted(Test Data)",
xlab = "Actual",
ylab="Predicted")
mars.mse <-mean((mars.predict - test_data$RCPC)^2)
mars.rmse <- sqrt(mars.mse)
residuals.mars1 <- (test_data$RCPC-mars.predict)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.mars1, main = "MARS(RESE): Residual Plot(Test Data)")
mars.imp1 = evimp(mars1, trim = FALSE)
print(mars.imp1)
plot(mars.imp1)
plot(mars1)
#Tree Model
library(tree)
set.seed(123)
tree=tree(RCPC~., data=train_data)
summary(tree)
plot(tree)
text(tree,pretty=0)
#Determining optimal size of tree
cv.tree=cv.tree(tree)
plot(cv.tree$size,cv.tree$dev,type='b')
#Prune the tree on the best 6 terminal nodes)
prune.tree=prune.tree(tree,best=6)
plot(prune.tree)
text(prune.tree,pretty=0)
#Random Forest and Bagging
library(randomForest)
set.seed(123)
tree.random <- tree(RCPC~., train_data)
# Helper function for calculating RMSE
rmse_reg <- function(model_obj, testing = NULL, target = NULL) {
#Calculates rmse for a regression decision tree
yhat <- predict(model_obj, newdata = testing)
actual <- testing[[target]]
sqrt(mean((yhat-actual)^2))
}
rmse_reg(tree.random, test_data, "RCPC")
#Bagging
#Check how many trees to fit
set.seed(123)
bag <- randomForest(RCPC ~ ., data=train_data, mtry = ncol(train_data) - 1, importance = TRUE, ntree=400)
plot(bag, type='l', main='MSE by ntree for Bagging')
#Bagging by using optimal number of trees
bag1 <- randomForest(RCPC~ ., data=train_data, mtry = ncol(train_data) - 1, importance = TRUE, ntree=60)
rmse_reg(bag1, train_data, "RCPC")
rmse_reg(bag1, test_data, "RCPC")
bag.predict <- predict(bag1,test_data,type="response")
plot(test_data$RCPC, bag.predict, pch="o", col='red',lty=5, main="Bagging: Actual vs Fitted(Test Data)",
xlab = "Actual",
ylab="Predicted")
bag.mse <-mean((bag.predict - test_data$RCPC)^2)
bag.rmse <- sqrt(bag.mse)
residuals.bag <- (test_data$RCPC-bag.predict)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.bag, main = "Bagging: Residual Plot(Test Data)")
bag.predicttrain <- predict(bag1,train_data,type="response")
plot(train_data$RCPC, bag.predicttrain, pch="o", col='red',lty=5, main="Bagging: Actual vs Fitted(train Data)",
xlab = "Actual",
ylab="Predicted")
bag.msetrain <-mean((bag.predicttrain - train_data$RCPC)^2)
bag.rmsetrain <- sqrt(bag.msetrain)
residuals.bagtrain <- (train_data$RCPC-bag.predicttrain)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.bagtrain, main = "Bagging: Residual Plot(Train Data)")
# Random Forest using optimal number of trees
set.seed(123)
rf.mse <- c()
for(i in 1:(ncol(train_data)-1)){
rf.RESE <- randomForest(RCPC~., data=train_data, mtry=i, importance=TRUE, ntree=60)
rf.mse[i] <- rf.RESE$mse[60]
}
plot(rf.mse, main='Training Error by m', xlab='Number of Predictors', ylab='MSE')
#Select final model, 18 predictors per tree.
rf.RESE <- randomForest(RCPC~., data=train_data, mtry=18, importance=TRUE, ntree=60)
#Out of bag error
rmse_reg(rf.RESE, test_data, "RCPC")
rmse_reg(rf.RESE, train_data, "RCPC")
#Variable importance for bagging
importance(bag1)
varImpPlot(bag1)
# Variable importance for Random Forest
importance(rf.RESE)
varImpPlot(rf.RESE)
summary(rf.RESE)
par(mfrow=c(2,2))
plot(rf.RESE)
random.predict <- predict(rf.RESE,test_data,type="response")
plot(test_data$RCPC, random.predict, pch="o", col='red',lty=5, main="Random Forest: Actual vs Fitted(Test Data)",
xlab = "Actual",
ylab="Predicted")
random.mse <-mean((random.predict - test_data$RCPC)^2)
random.rmse <- sqrt(random.mse)
residuals.random <- (test_data$RCPC-random.predict)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.random, main = "Random Forest: Residual Plot(Test Data)")
random.predicttrain <- predict(rf.RESE,train_data,type="response")
plot(train_data$RCPC, random.predicttrain, pch="o", col='red',lty=5, main="Random Forest: Actual vs Fitted(train Data)",
xlab = "Actual",
ylab="Predicted")
random.msetrain <-mean((random.predicttrain - train_data$RCPC)^2)
random.rmsetrain <- sqrt(random.msetrain)
residuals.randomtrain <- (train_data$RCPC-random.predicttrain)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.randomtrain, main = "Random Forest: Residual Plot(Train Data)")
# Construct partial dependence plots for final model(BAGGING)
install.packages("pdp")
library(pdp)
p1 <- partial(bag1, pred.var = "HMX65", grid.resolution = 10) %>%
autoplot()
p2 <- partial(bag1, pred.var = "MMXT", grid.resolution = 10) %>%
autoplot()
p3 <- partial(bag1, pred.var = "PRICE",grid.resolution = 10) %>%
autoplot()
p4 <- partial(bag1, pred.var ="GDP",grid.resolution = 10) %>%
autoplot()
p5 <- partial(bag1, pred.var = "VISIB", grid.resolution = 10) %>%
autoplot()
p6 <- partial(bag1, pred.var = "CMN65", grid.resolution = 10) %>%
autoplot()
p7 <- partial(bag1, pred.var = "DSND",grid.resolution = 10) %>%
autoplot()
p8 <- partial(bag1, pred.var ="EMNT",grid.resolution = 10) %>%
autoplot()
p9 <- partial(bag1, pred.var = "EMXT", grid.resolution = 10) %>%
autoplot()
p10 <- partial(bag1, pred.var = "MMNT",grid.resolution = 10) %>%
autoplot()
p11 <- partial(bag1, pred.var ="UNEMPRATE",grid.resolution = 10) %>%
autoplot()
# Display plots side by side
gridExtra::grid.arrange(p1, p2, ncol = 2)
gridExtra::grid.arrange(p3, p4, ncol = 2)
gridExtra::grid.arrange(p5, p6, ncol = 2)
gridExtra::grid.arrange(p7, p8, ncol = 2)
gridExtra::grid.arrange(p9, p10, ncol = 2)
gridExtra::grid.arrange(p11, ncol = 2)
save(list = ls(all=T),file = "./Group26_RData(RESE)_FinalData.RData")
| /Data Modeling for Residential Heating Oil.R | no_license | khushbu3apr/Data-Analytics-and-Predictive-Modeling- | R | false | false | 18,271 | r | install.packages("readxl")
library(readxl)
S1<-read_excel("Group26_RData(RESE)_FinalData.xlsx")[2:229, 1:32]
str(S1)
S1[,3:32] <- lapply(S1[,3:32], as.numeric)
S1[, 1:2] <- lapply(S1[,1:2], as.factor)
str(S1)
table(is.na(S1))
install.packages("Hmisc")
library(Hmisc)
install.packages("MASS")
install.packages("ISLR")
install.packages("glmnet")
install.packages("leaps")
install.packages("ElemStatLearn")
library(ElemStatLearn)
library(glmnet)
library(ISLR)
library(MASS)
library(leaps)
install.packages("nlme")
library(mgcv)
install.packages("earth")
library(earth)
install.packages("tree")
library(tree)
install.packages("randomForest")
library(randomForest)
install.packages("ada")
library(ada)
install.packages("gbm")
library(gbm)
#Checking for normality of data
#Response
hist(S1$RCPC,prob=T,xlab='',
main='Histogram of CONSUMPTION(RESE)',col='pink')
lines(density(S1$RCPC,na.rm=T))
rug(S1$RCPC)
#Boxplots for Response Vs Year and Month
boxplot(S1$RCPC~I(S1$YEAR), main = "Annual Consumption(RESE) Distribution",col="yellow", xlab = "Years",ylab = "Consumption(RESE)", varwidth = TRUE)
rug(jitter(S1$RCPC),side=2)
abline(h=mean(S1$RCPC,na.rm=T),lty=2)
plot( tapply(S1$RCPC, S1$MONTH, mean), main = "Monthwise Yearly Average Consumption(RESE)", xlab = "Month" , ylab = "Consumption(RESE)", type = "h", col = "Red")
#SCATTERPLOT+LINES THROUGH THE POINTS
plot(S1$RCPC,xlab='')
abline(h=mean(S1$RCPC,na.rm=T),lty=1)
abline(h=mean(S1$RCPC,na_rm=T)+sd(S1$RCPC,na.rm=T))
plot(S1$PRICE, S1$RCPC, main="Scatter plot",
xlab="PRICE", ylab=" Rsidential Consumption ", pch=19)
# Response Vs Temperature Variables
plot(~S1$RCPC+S1$DT00+S1$DX90+S1$EMNT+S1$EMXT+S1$MNTM+S1$MMXT+S1$MMNT+S1$MDPT)
rcorr(cbind(S1$RCPC,S1$DT00,S1$DX90,S1$EMNT,S1$EMXT,S1$MNTM,S1$MMXT,S1$MMNT,S1$MDPT), type = "pearson")
#Remove MNTM
#Response Vs Precipitation Variables
plot(~S1$RCPC+S1$DP01+S1$DP10+S1$EMXP+S1$MNPM)
rcorr(cbind(S1$RCPC,S1$DP01,S1$DP10,S1$EMXP,S1$MNPM), type = "pearson")
# Keep all
#Response Vs Snow Variables
plot(~S1$RCPC+S1$DSND+S1$DSNF+S1$EMSD+S1$EMSF+S1$TMSF+S1$MSND)
rcorr(cbind(S1$RCPC,S1$DSND,S1$DSNF,S1$EMSD,S1$EMSF,S1$TMSF,S1$MSND), type = "pearson")
#Keep all
#Response Vs Cooling and heating days
plot(~S1$RCPC+S1$CMX65+S1$CMN65+S1$HMN65+S1$HMX65)
rcorr(cbind(S1$RCPC,S1$CMX65,S1$CMN65,S1$HMN65,S1$HMX65), type = "pearson")
#Keep all
#Response Vs Wind variables
plot(~S1$RCPC+S1$VISIB+S1$GUST+S1$WDSP+S1$MXSPD)
rcorr(cbind(S1$RCPC,S1$VISIB,S1$GUST,S1$WDSP,S1$MXSPD), type = "pearson")
#Keep all
#Response Vs Socioeconomic Variables
plot(~S1$RCPC+S1$UNEMPRATE+S1$GDP)
rcorr(cbind(S1$RCPC,S1$UNEMPRATE,S1$GDP), type = "pearson")
#Keep all
Final <- S1[,c(-22)]
Data <- Final[, c(-1, -2)]
View(Data)
#Splitting data into training and tesing
set.seed(123)
train_idx <- sample(x = 1:nrow(Data), size = floor(0.80*nrow(Data)))
train_data <- Data[train_idx,]
test_data <- Data[-train_idx,]
#Null model MSE
null.msetrain<-mean((mean(train_data$RCPC)-train_data$RCPC)^2)
null.rmsetrain<-sqrt(null.msetrain)
null.msetest<-mean((mean(test_data$RCPC)-test_data$RCPC)^2)
null.rmsetest<-sqrt(null.msetest)
#Multiple Linear Regression)
lm.fit <- lm(RCPC ~ ., data=train_data)
summary(lm.fit)
fit2=lm(RCPC ~ PRICE+WDSP+DSND+MXSPD+GDP, data = train_data)
summary(fit2)
fit3=lm(RCPC ~ PRICE+WDSP+DSND+MXSPD+GDP, data = test_data)
summary(fit3)
par(mfrow=c(2,2))
plot(fit2)
par(mfrow=c(2,2))
plot(fit3)
prediction = predict(fit3,newdata=test_data)
prediction
mean(fit2$residuals^2)
sqrt(mean(fit2$residuals^2))
linear.mse= mean((fit3$residuals)^2)
linear.rmse= sqrt(linear.mse)
print(linear.rmse)
#Cross validation for Multiple Linear Regression
install.packages('caret')
library('caret')
set.seed(123)
train.control<-trainControl(method="cv", number=10)
cv_lm<-train(RCPC~., data=train_data, method="lm",
trControl=train.control)
print(cv_lm)
train.control<-trainControl(method="cv", number=10)
cv_lm<-train(RCPC~., data=test_data, method="lm",
trControl=train.control)
print(cv_lm)
#Ridge Regression
set.seed(123)
train.X <- as.matrix(train_data[,-1])
train.Y <- as.matrix(train_data[,1])
test.X <- as.matrix(test_data[,-1])
test.Y <- as.matrix(test_data[,1])
library(glmnet)
set.seed(123)
cv <- cv.glmnet(x=train.X, y=train.Y, family = 'gaussian', alpha = 0)
cv$lambda.min
ridge.fit <- glmnet(x=train.X, y=train.Y, family = 'gaussian', alpha = 0, lambda = cv$lambda.min)
#ridge.fit <- glmnet(x=train.X, y=train.Y, family = 'gaussian', alpha = 0)
ridge.p<- predict(ridge.fit,newx=train.X)
ridge.msetrain<-mean((ridge.p-train.Y)^2)
ridge.rmsetrain<-sqrt(ridge.msetrain)
ridge.pred <- predict(ridge.fit, newx=test.X)
ridge.mse <- mean((ridge.pred - test.Y)^2)
ridge.rmse<-sqrt(ridge.mse)
plot(test.Y, ridge.pred, pch="o", col='black',lty=5, main="LM: Ridge Actual vs Predicted",
xlab = "Actual Sales",
ylab="Predicted Sales")
plot(train.Y, ridge.p, pch="o", col='black',lty=5, main="LM: Ridge Actual vs Predicted",
xlab = "Actual Sales",
ylab="Predicted Sales")
coef(ridge.fit)
#Lasso Regression
set.seed(123)
cv2 <- cv.glmnet(x=train.X, y=train.Y, family='gaussian', alpha = 1)
cv2$lambda.min
lasso.fit <- glmnet(x=train.X, y=train.Y, family='gaussian', alpha = 1, lambda = cv2$lambda.min)
#lasso.fit <- glmnet(x=train.X, y=train.Y, family = 'gaussian', alpha = 1)
lasso.p<-predict(lasso.fit,newx =train.X)
lasso.msetrain<-mean((lasso.p - train.Y)^2)
lasso.rmsetrain<-sqrt(lasso.msetrain)
plot(train.Y, lasso.p, pch="o", col='black',lty=5, main="LM: Lasso Actual vs Predicted",
xlab = "Actual Sales",
ylab="Predicted Sales")
lasso.pred <- predict(lasso.fit, newx=test.X)
lasso.mse <- mean((lasso.pred - test.Y)^2)
lasso.rmse<-sqrt(lasso.mse)
plot(test.Y, lasso.pred, pch="o", col='black',lty=5, main="LM: Lasso Actual vs Predicted",
xlab = "Actual Sales",
ylab="Predicted Sales")
coef(lasso.fit)
#Best Subset
library(MASS)
library(ISLR)
library(glmnet)
library(ElemStatLearn)
library(leaps)
set.seed(123)
regfit.full= regsubsets(RCPC~., data = train_data, method = "exhaustive", nvmax = 21)
plot(summary(regfit.full)$cp)
num_var <- which.min(summary(regfit.full)$cp)
print(num_var)
summary(regfit.full)
feat.cp <- which(summary(regfit.full)$which[num_var,] == TRUE)
subset.mod <- lm(RCPC ~ ., data=train_data[,feat.cp])
subset.predtrain <- predict(subset.mod, newdata=train_data[,feat.cp])
subset.msetrain <- mean((subset.predtrain - train.Y)^2)
subset.rmsetrain<-sqrt(subset.msetrain)
subset.pred <- predict(subset.mod, newdata=test_data[,feat.cp])
subset.mse <- mean((subset.pred - test.Y)^2)
subset.rmse<-sqrt(subset.mse)
#Forward subset
set.seed(123)
regfit.full=regsubsets(RCPC~., data = train_data, method = "forward", nvmax = 21)
plot(summary(regfit.full)$cp)
num_var <- which.min(summary(regfit.full)$cp)
print(num_var)
summary(regfit.full)
feat.cp <- which(summary(regfit.full)$which[num_var,] == TRUE)
forward.mod <- lm( RCPC~ ., data=train_data[,feat.cp])
forward.predtrain <- predict(forward.mod, newdata=train_data[,feat.cp])
forward.msetrain <- mean((forward.predtrain - train.Y)^2)
forward.rmsetrain<-sqrt(forward.msetrain)
forward.pred <- predict(forward.mod, newdata=test_data[,feat.cp])
forward.mse <- mean((forward.pred - test.Y)^2)
forward.rmse<-sqrt(forward.mse)
#Backward subset
set.seed(123)
regfit.full=regsubsets(RCPC~., data = train_data, method = "backward")
plot(summary(regfit.full)$cp)
num_var <- which.min(summary(regfit.full)$cp)
print(num_var)
summary(regfit.full)
feat.cp <- which(summary(regfit.full)$which[num_var,] == TRUE)
backward.mod <- lm(RCPC ~ ., data=train_data[,feat.cp])
backward.predtrain <- predict(backward.mod, newdata=train_data[,feat.cp])
backward.msetrain <- mean((backward.predtrain - train.Y)^2)
backward.rmsetrain<-sqrt(backward.msetrain)
backward.pred <- predict(backward.mod, newdata=test_data[,feat.cp])
backward.mse <- mean((backward.pred - test.Y)^2)
backward.rmse<-sqrt(backward.mse)
#Generalized Additive Model
install.packages("nlme")
library(mgcv)
attach(train_data)
set.seed(123)
gam1 <- gam(RCPC ~ s(PRICE, bs="cr", k=3) + s(CMX65, bs="cr",k=3)
+ s(CMN65, bs="cr",k=3) + s(DP01, bs="cr",k=3)
+ s(DP10, bs="cr",k=3) + s(DSND, bs="cr",k=3) + s(DT00, bs="cr",k=3)
+ s(DX90, bs="cr",k=3)
+ s(EMNT, bs="cr",k=3) + s(EMSD, bs="cr",k=3) + s(HMN65 , bs="cr",k=3)
+ s(VISIB, bs="cr",k=3) + s(HMX65, bs="cr",k=3) + s(MXSPD, bs="cr",k=3)
+ s(MMNT, bs="cr",k=3) + s(GUST, bs="cr",k=3) + s(WDSP, bs="cr",k=3)
+ s(UNEMPRATE, bs="cr",k=3) + s(GDP, bs="cr",k=3) + s(DSNF,bs="cr",k=3) +s(EMSF,bs="cr",k=3)
+ s(EMXP, bs="cr",k=3) + s(EMXT,bs="cr",k=3) +s(MNPM,bs="cr",k=3) +s(TMSF,bs="cr",k=3) + s(MMXT,bs="cr",k=3)
+ s(MDPT, bs="cr",k=3) + s(MSND,bs="cr",k=3), data=train_data)
par(mfrow=c(1,2))
gam.check(gam1)
summary(gam1)
par(mfrow=c(2,4))
plot(gam1, se=TRUE)
gam2 <- update(gam1, .~. -s(PRICE, bs="cr", k=3)
- s(DP01, bs="cr",k=3)
- s(DP10, bs="cr",k=3) - s(DSND, bs="cr",k=3) - s(DT00, bs="cr",k=3)
- s(DX90, bs="cr",k=3)
- s(EMNT, bs="cr",k=3) - s(EMSD, bs="cr",k=3) - s(HMX65, bs="cr",k=3) - s(MXSPD, bs="cr",k=3)
- s(GUST, bs="cr",k=3) - s(WDSP, bs="cr",k=3)
- s(UNEMPRATE, bs="cr",k=3) -s(EMSF,bs="cr",k=3)
- s(EMXT,bs="cr",k=3) -s(MNPM,bs="cr",k=3) -s(TMSF,bs="cr",k=3) - s(MMXT,bs="cr",k=3)
- s(MDPT, bs="cr",k=3) + PRICE+DP01+DP10+DSND+DT00+DX90+EMNT+EMSD+HMX65+MXSPD+GUST+WDSP
+UNEMPRATE+EMSF+EMXT+MNPM+TMSF+MMXT+MDPT, data=train_data)
summary(gam2)
par(mfrow=c(1,1))
plot(gam2)
layout(matrix(c(1:1),1,1,byrow=TRUE))
residuals.gam <- c()
gam.predicttrain <- predict(gam2, newdata = train_data, type="response")
residuals.gam <- (train_data$RCPC-gam.predicttrain)
library(car)
qqPlot(residuals.gam,main = "GAM:Residual Plot")
plot(train_data$RCPC, gam.predicttrain, pch="o", col='black',lty=5, main="GAM(TRAIN): Actual vs Fitted",
xlab = "Actual Consumption",
ylab="Predicted Consumption")
gam.msetrain <-mean((gam.predicttrain - train_data$RCPC)^2)
gam.rmsetrain<-sqrt(gam.msetrain)
layout(matrix(c(1:1),1,1,byrow=TRUE))
residuals.gam3 <- c()
gam.predict <- predict(gam2 , newdata = test_data, type="response")
residuals.gam3<- (test_data$RCPC-gam.predict)
library(car)
qqPlot(residuals.gam3,main = "GAM:Residual Plot")
plot(test_data$RCPC, gam.predict, pch="o", col='black',lty=5, main="GAM(TEST): Actual vs Fitted",
xlab = "Actual Consumption",
ylab="Predicted Consumption")
gam.mse <- mean((gam.predict - test_data$RCPC)^2)
gam.rmse<-sqrt(gam.mse)
#MARS
library(earth)
set.seed(123)
mars1 <- earth(RCPC~., data=train_data, pmethod="cv", nfold=10, ncross=10)
print(mars1)
summary(mars1)
plot(mars1,which=1)
install.packages("dplyr")
library(dplyr)
hyper_grid <- expand.grid(
degree = 1:3,
nprune = seq(2, 100, length.out = 10) %>% floor()
)
head(hyper_grid)
install.packages("caret")
library(caret)
cv_mars1 <- train(
x = subset(train_data, select = -RCPC),
y = train_data$RCPC,
method = "earth",
metric = "RMSE",
trControl = trainControl(method = "cv", number = 10),
tuneGrid = hyper_grid
)
# View results
cv_mars1$bestTune
ggplot(cv_mars1)
mars.predicttrain <- predict(mars1,train_data,type="response")
plot(train_data$RCPC, mars.predicttrain, pch="o", col='black',lty=5, main="MARS(RESE): Actual vs Predicted(Train Data)",
xlab = "Actual",
ylab="Predicted")
mars.msetrain <-mean((mars.predicttrain - train_data$RCPC)^2)
mars.rmsetrain <- sqrt(mars.msetrain)
residuals.mars <- (train_data$RCPC-mars.predicttrain)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.mars, main = "MARS(RESE): Residual Plot(Train Data)")
mars.predict <- predict(mars1,test_data,type="response")
plot(test_data$RCPC, mars.predict, pch="o", col='black',lty=5, main="MARS(RESE): Actual vs Fitted(Test Data)",
xlab = "Actual",
ylab="Predicted")
mars.mse <-mean((mars.predict - test_data$RCPC)^2)
mars.rmse <- sqrt(mars.mse)
residuals.mars1 <- (test_data$RCPC-mars.predict)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.mars1, main = "MARS(RESE): Residual Plot(Test Data)")
mars.imp1 = evimp(mars1, trim = FALSE)
print(mars.imp1)
plot(mars.imp1)
plot(mars1)
#Tree Model
library(tree)
set.seed(123)
tree=tree(RCPC~., data=train_data)
summary(tree)
plot(tree)
text(tree,pretty=0)
#Determining optimal size of tree
cv.tree=cv.tree(tree)
plot(cv.tree$size,cv.tree$dev,type='b')
#Prune the tree on the best 6 terminal nodes)
prune.tree=prune.tree(tree,best=6)
plot(prune.tree)
text(prune.tree,pretty=0)
#Random Forest and Bagging
library(randomForest)
set.seed(123)
tree.random <- tree(RCPC~., train_data)
# Helper function for calculating RMSE
rmse_reg <- function(model_obj, testing = NULL, target = NULL) {
#Calculates rmse for a regression decision tree
yhat <- predict(model_obj, newdata = testing)
actual <- testing[[target]]
sqrt(mean((yhat-actual)^2))
}
rmse_reg(tree.random, test_data, "RCPC")
#Bagging
#Check how many trees to fit
set.seed(123)
bag <- randomForest(RCPC ~ ., data=train_data, mtry = ncol(train_data) - 1, importance = TRUE, ntree=400)
plot(bag, type='l', main='MSE by ntree for Bagging')
#Bagging by using optimal number of trees
bag1 <- randomForest(RCPC~ ., data=train_data, mtry = ncol(train_data) - 1, importance = TRUE, ntree=60)
rmse_reg(bag1, train_data, "RCPC")
rmse_reg(bag1, test_data, "RCPC")
bag.predict <- predict(bag1,test_data,type="response")
plot(test_data$RCPC, bag.predict, pch="o", col='red',lty=5, main="Bagging: Actual vs Fitted(Test Data)",
xlab = "Actual",
ylab="Predicted")
bag.mse <-mean((bag.predict - test_data$RCPC)^2)
bag.rmse <- sqrt(bag.mse)
residuals.bag <- (test_data$RCPC-bag.predict)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.bag, main = "Bagging: Residual Plot(Test Data)")
bag.predicttrain <- predict(bag1,train_data,type="response")
plot(train_data$RCPC, bag.predicttrain, pch="o", col='red',lty=5, main="Bagging: Actual vs Fitted(train Data)",
xlab = "Actual",
ylab="Predicted")
bag.msetrain <-mean((bag.predicttrain - train_data$RCPC)^2)
bag.rmsetrain <- sqrt(bag.msetrain)
residuals.bagtrain <- (train_data$RCPC-bag.predicttrain)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.bagtrain, main = "Bagging: Residual Plot(Train Data)")
# Random Forest using optimal number of trees
set.seed(123)
rf.mse <- c()
for(i in 1:(ncol(train_data)-1)){
rf.RESE <- randomForest(RCPC~., data=train_data, mtry=i, importance=TRUE, ntree=60)
rf.mse[i] <- rf.RESE$mse[60]
}
plot(rf.mse, main='Training Error by m', xlab='Number of Predictors', ylab='MSE')
#Select final model, 18 predictors per tree.
rf.RESE <- randomForest(RCPC~., data=train_data, mtry=18, importance=TRUE, ntree=60)
#Out of bag error
rmse_reg(rf.RESE, test_data, "RCPC")
rmse_reg(rf.RESE, train_data, "RCPC")
#Variable importance for bagging
importance(bag1)
varImpPlot(bag1)
# Variable importance for Random Forest
importance(rf.RESE)
varImpPlot(rf.RESE)
summary(rf.RESE)
par(mfrow=c(2,2))
plot(rf.RESE)
random.predict <- predict(rf.RESE,test_data,type="response")
plot(test_data$RCPC, random.predict, pch="o", col='red',lty=5, main="Random Forest: Actual vs Fitted(Test Data)",
xlab = "Actual",
ylab="Predicted")
random.mse <-mean((random.predict - test_data$RCPC)^2)
random.rmse <- sqrt(random.mse)
residuals.random <- (test_data$RCPC-random.predict)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.random, main = "Random Forest: Residual Plot(Test Data)")
random.predicttrain <- predict(rf.RESE,train_data,type="response")
plot(train_data$RCPC, random.predicttrain, pch="o", col='red',lty=5, main="Random Forest: Actual vs Fitted(train Data)",
xlab = "Actual",
ylab="Predicted")
random.msetrain <-mean((random.predicttrain - train_data$RCPC)^2)
random.rmsetrain <- sqrt(random.msetrain)
residuals.randomtrain <- (train_data$RCPC-random.predicttrain)
install.packages("car")
library(car)
par(mfrow=c(1,1))
qqPlot(residuals.randomtrain, main = "Random Forest: Residual Plot(Train Data)")
# Construct partial dependence plots for final model(BAGGING)
install.packages("pdp")
library(pdp)
p1 <- partial(bag1, pred.var = "HMX65", grid.resolution = 10) %>%
autoplot()
p2 <- partial(bag1, pred.var = "MMXT", grid.resolution = 10) %>%
autoplot()
p3 <- partial(bag1, pred.var = "PRICE",grid.resolution = 10) %>%
autoplot()
p4 <- partial(bag1, pred.var ="GDP",grid.resolution = 10) %>%
autoplot()
p5 <- partial(bag1, pred.var = "VISIB", grid.resolution = 10) %>%
autoplot()
p6 <- partial(bag1, pred.var = "CMN65", grid.resolution = 10) %>%
autoplot()
p7 <- partial(bag1, pred.var = "DSND",grid.resolution = 10) %>%
autoplot()
p8 <- partial(bag1, pred.var ="EMNT",grid.resolution = 10) %>%
autoplot()
p9 <- partial(bag1, pred.var = "EMXT", grid.resolution = 10) %>%
autoplot()
p10 <- partial(bag1, pred.var = "MMNT",grid.resolution = 10) %>%
autoplot()
p11 <- partial(bag1, pred.var ="UNEMPRATE",grid.resolution = 10) %>%
autoplot()
# Display plots side by side
gridExtra::grid.arrange(p1, p2, ncol = 2)
gridExtra::grid.arrange(p3, p4, ncol = 2)
gridExtra::grid.arrange(p5, p6, ncol = 2)
gridExtra::grid.arrange(p7, p8, ncol = 2)
gridExtra::grid.arrange(p9, p10, ncol = 2)
gridExtra::grid.arrange(p11, ncol = 2)
save(list = ls(all=T),file = "./Group26_RData(RESE)_FinalData.RData")
|
stepping.cadence.bands.folder.two.stepping.groups <-
function(input_folder, output_folder,generate_charts=TRUE){
#' Processes events files to produce histograms showing the distribution of stepping and
#' weighted median stepping cadence across two groups of stepping bout duration.
#' @description Processes a folder of events files and generates a faceted set of histograms
#' for each events file showing the duration of stepping in different cadence bands (each
#' cadence band has a width of 10 steps per minute) for stepping bouts of duration 10
#' seconds to 1 minute and 1 minute plus. The weighted median cadence of stepping at
#' each stepping bout duration is also calculated and indicated on the histogram.
#' The values of the weighted median cadence for each stepping duration across all the
#' processed events files is also returned as a data.frame.
#' @param input_folder The filepath for the folder where the events files to be processed are saved
#' @param output_folder The filepath for the folder where the generated files are to be saved
#' @param generate_charts Set TRUE if stacked histograms showing the distribution of stepping
#' cadences are to be generated for each events file. Default = TRUE
#' @export
#' @examples input_folder <- paste(system.file("extdata", "", package = "activPAL"),"/",sep="")
#' output_folder <- paste(tempdir(),"/",sep="")
#'
#' activPAL::stepping.cadence.bands.folder.two.stepping.groups(input_folder,output_folder,TRUE)
#' # Omitting a value for generate_charts results in the charts being saved in the output folder.
#' activPAL::stepping.cadence.bands.folder.two.stepping.groups(input_folder,output_folder)
if(!valid.folder.path(input_folder)){
stop("A valid folder to search for events files has not been provided.")
}
if(!valid.folder.path(output_folder)){
stop("A valid folder to save the generated output has not been provided.")
}
# set the minimum and maximum duration for each stepping duration
lower_limit <- c(10,60)
upper_limit <- c(60,86400)
stepping.cadence.bands.folder(input_folder,lower_limit,upper_limit,output_folder,generate_charts)
}
stepping.cadence.bands.folder.four.stepping.groups <-
function(input_folder, output_folder,generate_charts=TRUE){
#' Processes events files to produce histograms showing the distribution of stepping and
#' weighted median stepping cadence across four groups of stepping bout duration.
#' @description Processes a folder of events files and generates a faceted set of histograms
#' for each events file showing the duration of stepping in different cadence bands
#' (each cadence band has a width of 10 steps per minute) for stepping bouts of duration
#' 10 seconds to 1 minute, 1 minute to 5 minutes, 5 minutes to 10 minutes and 10 minutes plus.
#' The weighted median cadence of stepping at each stepping bout duration is also calculated
#' and indicated on the histogram. The values of the weighted median cadence for each
#' stepping duration across all the processed events files is also returned as a data.frame.
#' @param input_folder The filepath for the folder where the events files to be processed are saved
#' @param output_folder The filepath for the folder where the generated files are to be saved
#' @param generate_charts Set TRUE if stacked histograms showing the distribution of stepping cadences are to be generated for each events file
#' @export
#' @examples input_folder <- paste(system.file("extdata", "", package = "activPAL"),"/",sep="")
#' output_folder <- paste(tempdir(),"/",sep="")
#'
#' activPAL::stepping.cadence.bands.folder.four.stepping.groups(input_folder,output_folder,TRUE)
#' # Omitting a value for generate_charts results in the charts being saved in the output folder.
#' activPAL::stepping.cadence.bands.folder.four.stepping.groups(input_folder,output_folder)
if(!valid.folder.path(input_folder)){
stop("A valid folder to search for events files has not been provided.")
}
if(!valid.folder.path(output_folder)){
stop("A valid folder to save the generated output has not been provided.")
}
# set the minimum and maximum duration for each stepping duration
lower_limit <- c(10,60,300,600)
upper_limit <- c(60,300,600,86400)
stepping.cadence.bands.folder(input_folder,lower_limit,upper_limit,output_folder,generate_charts)
}
stepping.cadence.bands.folder <-
function(input_folder,lower_bound,upper_bound,output_folder,generate_charts=FALSE){
# Draw a stacked histogram showing the distribution of stepping cadence and median cadence
# for different stepping durations
file_list <- list.files(input_folder,pattern = "Events*.csv")
cadence_summary <- data.frame(matrix(ncol = 3, nrow = 0))
colnames(cadence_summary) <- c("bout_duration", "weighted_median_cadence", "file_id")
for (i in file_list){
file_name <- substr(i,1,gregexpr("Event",i)[[1]][1]-1)
events_data <- pre.process.events.file(i,input_folder)
if(nrow(events_data) > 0){
stepping_summary <- stepping.cadence.bands.file(events_data,lower_bound,upper_bound)
median_cadence_by_group <- stepping_summary %>% dplyr::group_by(.data$group) %>%
dplyr::summarise(median_cadence = weighted.median(.data$cadence,.data$interval))
if(generate_charts){
stepping.cadence.bands.generate.histogram(stepping_summary,median_cadence_by_group,output_folder,file_name)
}
median_cadence_by_group$file_id <- file_name
cadence_summary <- rbind(cadence_summary,median_cadence_by_group)
}
}
cadence_summary <- cadence_summary[,c(ncol(cadence_summary),1:(ncol(cadence_summary)-1))]
cadence_summary <- tidyr::spread(cadence_summary,2,3)
write.csv(cadence_summary,paste(output_folder,"median_cadence_summary.csv",sep=""),row.names = FALSE)
return(cadence_summary)
}
stepping.cadence.bands.file <-
function(events_file,lower_bound,upper_bound){
events_file <- events_file[which(events_file$activity == 2 & events_file$interval >= lower_bound[1]),]
events_file$cadence <- events_file$steps / (events_file$interval / 60)
events_file$group <- ""
for (i in (1:length(lower_bound))){
in_group <- which(events_file$interval >= lower_bound[i] & events_file$interval < upper_bound[i])
if(length(in_group) > 0 ){
# Convert the maximum and minimum duration into a label
duration_label <- paste(lubridate::seconds_to_period(lower_bound[i]),"to",lubridate::seconds_to_period(upper_bound[i]))
duration_label <- gsub("d"," day",duration_label)
duration_label <- gsub("S"," seconds",duration_label)
duration_label <- gsub("M"," minutes",duration_label)
duration_label <- gsub("H"," hours",duration_label)
duration_label <- gsub(" 0 seconds","",duration_label)
duration_label <- gsub(" 0 minutes","",duration_label)
duration_label <- gsub(" 0 hours","",duration_label)
duration_label <- gsub(" 1 minutes"," 1 minute",duration_label)
duration_label <- gsub(" 1 hours"," 1 hour",duration_label)
events_file[in_group,]$group <- duration_label
}
}
return(events_file)
}
stepping.cadence.bands.generate.histogram <-
function(events_file,median_cadence_list,output_folder,file_name){
box_colour <- c("#d7191c","#fdae61","#ffffbf","#abd9e9","#2c7bb6")
line_colour <- c("#377eb8","#4daf4a","#984ea3","#ff7f00","#ffff33")
events_file$group <- factor(events_file$group,
levels = unique(events_file[order(events_file$interval),]$group))
median_cadence_list$group <- factor(median_cadence_list$group,levels = levels(events_file$group))
events_file$interval <- events_file$interval / 60
output_file <- paste(output_folder,file_name,"-cadence-histogram.png",sep="")
graph_data <- ggplot2::ggplot(events_file,ggplot2::aes(x = .data$cadence,fill = .data$group)) +
ggplot2::geom_histogram(ggplot2::aes(weight = .data$interval),breaks = seq(0, 160, 10)) +
ggplot2::geom_vline(data = median_cadence_list, ggplot2::aes(xintercept = .data$median_cadence,colour=.data$group),size = 2) +
ggplot2::scale_x_continuous(breaks = seq(0, 160, 10)) +
ggplot2::scale_fill_manual("Bout Duration",values = box_colour) +
ggplot2::scale_color_manual("Bout Duration\nMedian Cadence",values = line_colour) +
ggplot2::xlab("Stepping Cadence") +
ggplot2::ylab("Stepping Duration (minutes)") +
ggplot2::facet_grid(rows = vars(.data$group), scales = "fixed")
ggplot2::ggsave(filename = output_file, plot = graph_data, width = 10, height = 8)
}
| /R/stepping.cadence.histogram.R | no_license | cran/activPAL | R | false | false | 9,061 | r | stepping.cadence.bands.folder.two.stepping.groups <-
function(input_folder, output_folder,generate_charts=TRUE){
#' Processes events files to produce histograms showing the distribution of stepping and
#' weighted median stepping cadence across two groups of stepping bout duration.
#' @description Processes a folder of events files and generates a faceted set of histograms
#' for each events file showing the duration of stepping in different cadence bands (each
#' cadence band has a width of 10 steps per minute) for stepping bouts of duration 10
#' seconds to 1 minute and 1 minute plus. The weighted median cadence of stepping at
#' each stepping bout duration is also calculated and indicated on the histogram.
#' The values of the weighted median cadence for each stepping duration across all the
#' processed events files is also returned as a data.frame.
#' @param input_folder The filepath for the folder where the events files to be processed are saved
#' @param output_folder The filepath for the folder where the generated files are to be saved
#' @param generate_charts Set TRUE if stacked histograms showing the distribution of stepping
#' cadences are to be generated for each events file. Default = TRUE
#' @export
#' @examples input_folder <- paste(system.file("extdata", "", package = "activPAL"),"/",sep="")
#' output_folder <- paste(tempdir(),"/",sep="")
#'
#' activPAL::stepping.cadence.bands.folder.two.stepping.groups(input_folder,output_folder,TRUE)
#' # Omitting a value for generate_charts results in the charts being saved in the output folder.
#' activPAL::stepping.cadence.bands.folder.two.stepping.groups(input_folder,output_folder)
if(!valid.folder.path(input_folder)){
stop("A valid folder to search for events files has not been provided.")
}
if(!valid.folder.path(output_folder)){
stop("A valid folder to save the generated output has not been provided.")
}
# set the minimum and maximum duration for each stepping duration
lower_limit <- c(10,60)
upper_limit <- c(60,86400)
stepping.cadence.bands.folder(input_folder,lower_limit,upper_limit,output_folder,generate_charts)
}
stepping.cadence.bands.folder.four.stepping.groups <-
function(input_folder, output_folder,generate_charts=TRUE){
#' Processes events files to produce histograms showing the distribution of stepping and
#' weighted median stepping cadence across four groups of stepping bout duration.
#' @description Processes a folder of events files and generates a faceted set of histograms
#' for each events file showing the duration of stepping in different cadence bands
#' (each cadence band has a width of 10 steps per minute) for stepping bouts of duration
#' 10 seconds to 1 minute, 1 minute to 5 minutes, 5 minutes to 10 minutes and 10 minutes plus.
#' The weighted median cadence of stepping at each stepping bout duration is also calculated
#' and indicated on the histogram. The values of the weighted median cadence for each
#' stepping duration across all the processed events files is also returned as a data.frame.
#' @param input_folder The filepath for the folder where the events files to be processed are saved
#' @param output_folder The filepath for the folder where the generated files are to be saved
#' @param generate_charts Set TRUE if stacked histograms showing the distribution of stepping cadences are to be generated for each events file
#' @export
#' @examples input_folder <- paste(system.file("extdata", "", package = "activPAL"),"/",sep="")
#' output_folder <- paste(tempdir(),"/",sep="")
#'
#' activPAL::stepping.cadence.bands.folder.four.stepping.groups(input_folder,output_folder,TRUE)
#' # Omitting a value for generate_charts results in the charts being saved in the output folder.
#' activPAL::stepping.cadence.bands.folder.four.stepping.groups(input_folder,output_folder)
if(!valid.folder.path(input_folder)){
stop("A valid folder to search for events files has not been provided.")
}
if(!valid.folder.path(output_folder)){
stop("A valid folder to save the generated output has not been provided.")
}
# set the minimum and maximum duration for each stepping duration
lower_limit <- c(10,60,300,600)
upper_limit <- c(60,300,600,86400)
stepping.cadence.bands.folder(input_folder,lower_limit,upper_limit,output_folder,generate_charts)
}
stepping.cadence.bands.folder <-
function(input_folder,lower_bound,upper_bound,output_folder,generate_charts=FALSE){
# Draw a stacked histogram showing the distribution of stepping cadence and median cadence
# for different stepping durations
file_list <- list.files(input_folder,pattern = "Events*.csv")
cadence_summary <- data.frame(matrix(ncol = 3, nrow = 0))
colnames(cadence_summary) <- c("bout_duration", "weighted_median_cadence", "file_id")
for (i in file_list){
file_name <- substr(i,1,gregexpr("Event",i)[[1]][1]-1)
events_data <- pre.process.events.file(i,input_folder)
if(nrow(events_data) > 0){
stepping_summary <- stepping.cadence.bands.file(events_data,lower_bound,upper_bound)
median_cadence_by_group <- stepping_summary %>% dplyr::group_by(.data$group) %>%
dplyr::summarise(median_cadence = weighted.median(.data$cadence,.data$interval))
if(generate_charts){
stepping.cadence.bands.generate.histogram(stepping_summary,median_cadence_by_group,output_folder,file_name)
}
median_cadence_by_group$file_id <- file_name
cadence_summary <- rbind(cadence_summary,median_cadence_by_group)
}
}
cadence_summary <- cadence_summary[,c(ncol(cadence_summary),1:(ncol(cadence_summary)-1))]
cadence_summary <- tidyr::spread(cadence_summary,2,3)
write.csv(cadence_summary,paste(output_folder,"median_cadence_summary.csv",sep=""),row.names = FALSE)
return(cadence_summary)
}
stepping.cadence.bands.file <-
function(events_file,lower_bound,upper_bound){
events_file <- events_file[which(events_file$activity == 2 & events_file$interval >= lower_bound[1]),]
events_file$cadence <- events_file$steps / (events_file$interval / 60)
events_file$group <- ""
for (i in (1:length(lower_bound))){
in_group <- which(events_file$interval >= lower_bound[i] & events_file$interval < upper_bound[i])
if(length(in_group) > 0 ){
# Convert the maximum and minimum duration into a label
duration_label <- paste(lubridate::seconds_to_period(lower_bound[i]),"to",lubridate::seconds_to_period(upper_bound[i]))
duration_label <- gsub("d"," day",duration_label)
duration_label <- gsub("S"," seconds",duration_label)
duration_label <- gsub("M"," minutes",duration_label)
duration_label <- gsub("H"," hours",duration_label)
duration_label <- gsub(" 0 seconds","",duration_label)
duration_label <- gsub(" 0 minutes","",duration_label)
duration_label <- gsub(" 0 hours","",duration_label)
duration_label <- gsub(" 1 minutes"," 1 minute",duration_label)
duration_label <- gsub(" 1 hours"," 1 hour",duration_label)
events_file[in_group,]$group <- duration_label
}
}
return(events_file)
}
stepping.cadence.bands.generate.histogram <-
function(events_file,median_cadence_list,output_folder,file_name){
box_colour <- c("#d7191c","#fdae61","#ffffbf","#abd9e9","#2c7bb6")
line_colour <- c("#377eb8","#4daf4a","#984ea3","#ff7f00","#ffff33")
events_file$group <- factor(events_file$group,
levels = unique(events_file[order(events_file$interval),]$group))
median_cadence_list$group <- factor(median_cadence_list$group,levels = levels(events_file$group))
events_file$interval <- events_file$interval / 60
output_file <- paste(output_folder,file_name,"-cadence-histogram.png",sep="")
graph_data <- ggplot2::ggplot(events_file,ggplot2::aes(x = .data$cadence,fill = .data$group)) +
ggplot2::geom_histogram(ggplot2::aes(weight = .data$interval),breaks = seq(0, 160, 10)) +
ggplot2::geom_vline(data = median_cadence_list, ggplot2::aes(xintercept = .data$median_cadence,colour=.data$group),size = 2) +
ggplot2::scale_x_continuous(breaks = seq(0, 160, 10)) +
ggplot2::scale_fill_manual("Bout Duration",values = box_colour) +
ggplot2::scale_color_manual("Bout Duration\nMedian Cadence",values = line_colour) +
ggplot2::xlab("Stepping Cadence") +
ggplot2::ylab("Stepping Duration (minutes)") +
ggplot2::facet_grid(rows = vars(.data$group), scales = "fixed")
ggplot2::ggsave(filename = output_file, plot = graph_data, width = 10, height = 8)
}
|
# Applied hierarchical modeling in ecology
# Modeling distribution, abundance and species richness using R and BUGS
# Volume 2: Dynamic and Advanced models
# Marc Kéry & J. Andy Royle
#
# Chapter 10 : INTEGRATED MODELS FOR MULTIPLE TYPES OF DATA
# =========================================================
# Code from proofs dated 2020-08-19
library(AHMbook)
# library(unmarked)
# 10.2 A simulation game to improve your intuition about point, abundance,
# and occurrence patterns
# ========================================================================
# Function call with explicit default arguments(requires AHMbook)
str(dat <- simPPe(lscape.size = 150, buffer.width = 25, variance.X = 1,
theta.X = 10, M = 250, beta = 1, quads.along.side = 6))
# Produce Fig. 10.2
set.seed(117, sample.kind="Rounding")
str(dat <- simPPe(lscape.size = 200, buffer.width = 25, variance.X = 1,
theta.X = 70, M = 200, beta = 1, quads.along.side = 6))
# Smaller study area, fewer individuals (M)
str(dat <- simPPe(lscape.size = 24, buffer.width = 2, variance.X = 1,
theta.X = 10, M = 50, beta = 1, quads.along.side = 6))
# Stronger habitat heterogeneity (variance.X): more aggregation
str(dat <- simPPe(lscape.size = 24, buffer.width = 2, variance.X = 10,
theta.X = 10, M = 50, beta = 1, quads.along.side = 6))
# Longer habitat gradient (theta.X)
str(dat <- simPPe(lscape.size = 24, buffer.width = 2, variance.X = 1,
theta.X = 250, M = 250, beta = 1, quads.along.side = 6))
# No habitat variability (variance.X): homogeneous point process
str(dat <- simPPe(lscape.size = 24, buffer.width = 2, variance.X = 0,
theta.X = 10, M = 100, beta = 1, quads.along.side = 6))
# No habitat preference (beta): homogeneous point process
str(dat <- simPPe(lscape.size = 24, buffer.width = 2, variance.X = 1,
theta.X = 10, M = 100, beta = 0, quads.along.side = 6))
# Habitat heterogeneity at very small scale (theta.X) -> (almost)
# homogeneous point process (in spite of strong habitat preference)
str(dat <- simPPe(lscape.size = 1000, buffer.width = 20, variance.X = 1,
theta.X = 0.001, M = 250, beta = 1, quads.along.side = 6))
str(simPPe(M = 1)) # This often produces no point at all
str(simPPe(M = 10))
str(simPPe(M = 100))
str(simPPe(M = 1000))
str(simPPe(M = 20, quads.along.side = 50)) # Lots of small sites
str(simPPe(M = 20, quads.along.side = 10))
str(simPPe(M = 20, quads.along.side = 5))
str(simPPe(M = 20, quads.along.side = 1)) # study area is one single site
| /AHM2_ch10/AHM2_10.2.R | no_license | trashbirdecology/AHM_code | R | false | false | 2,516 | r | # Applied hierarchical modeling in ecology
# Modeling distribution, abundance and species richness using R and BUGS
# Volume 2: Dynamic and Advanced models
# Marc Kéry & J. Andy Royle
#
# Chapter 10 : INTEGRATED MODELS FOR MULTIPLE TYPES OF DATA
# =========================================================
# Code from proofs dated 2020-08-19
library(AHMbook)
# library(unmarked)
# 10.2 A simulation game to improve your intuition about point, abundance,
# and occurrence patterns
# ========================================================================
# Function call with explicit default arguments(requires AHMbook)
str(dat <- simPPe(lscape.size = 150, buffer.width = 25, variance.X = 1,
theta.X = 10, M = 250, beta = 1, quads.along.side = 6))
# Produce Fig. 10.2
set.seed(117, sample.kind="Rounding")
str(dat <- simPPe(lscape.size = 200, buffer.width = 25, variance.X = 1,
theta.X = 70, M = 200, beta = 1, quads.along.side = 6))
# Smaller study area, fewer individuals (M)
str(dat <- simPPe(lscape.size = 24, buffer.width = 2, variance.X = 1,
theta.X = 10, M = 50, beta = 1, quads.along.side = 6))
# Stronger habitat heterogeneity (variance.X): more aggregation
str(dat <- simPPe(lscape.size = 24, buffer.width = 2, variance.X = 10,
theta.X = 10, M = 50, beta = 1, quads.along.side = 6))
# Longer habitat gradient (theta.X)
str(dat <- simPPe(lscape.size = 24, buffer.width = 2, variance.X = 1,
theta.X = 250, M = 250, beta = 1, quads.along.side = 6))
# No habitat variability (variance.X): homogeneous point process
str(dat <- simPPe(lscape.size = 24, buffer.width = 2, variance.X = 0,
theta.X = 10, M = 100, beta = 1, quads.along.side = 6))
# No habitat preference (beta): homogeneous point process
str(dat <- simPPe(lscape.size = 24, buffer.width = 2, variance.X = 1,
theta.X = 10, M = 100, beta = 0, quads.along.side = 6))
# Habitat heterogeneity at very small scale (theta.X) -> (almost)
# homogeneous point process (in spite of strong habitat preference)
str(dat <- simPPe(lscape.size = 1000, buffer.width = 20, variance.X = 1,
theta.X = 0.001, M = 250, beta = 1, quads.along.side = 6))
str(simPPe(M = 1)) # This often produces no point at all
str(simPPe(M = 10))
str(simPPe(M = 100))
str(simPPe(M = 1000))
str(simPPe(M = 20, quads.along.side = 50)) # Lots of small sites
str(simPPe(M = 20, quads.along.side = 10))
str(simPPe(M = 20, quads.along.side = 5))
str(simPPe(M = 20, quads.along.side = 1)) # study area is one single site
|
#' @importFrom yaImpute ann
#' @importFrom Rcpp sourceCpp
#' @useDynLib stlplus
.loess_stlplus <- function(x = NULL, y, span, degree, weights = NULL,
m = c(1:length(y)), y_idx = !is.na(y), noNA = all(y_idx), blend = 0,
jump = ceiling(span / 10), at = c(1:length(y))) {
nextodd <- function(x) {
x <- round(x)
x2 <- ifelse(x %% 2 == 0, x + 1, x)
as.integer(x2)
}
n <- length(y[y_idx])
if(is.null(x)) x <- c(1:length(y))
if(is.null(weights)) weights <- rep(1, length(y))
n_m <- length(m)
if((span %% 2) == 0) {
span <- span + 1
warning(paste("Span must be odd! Changed span from ",
span - 1, " to ", span, sep = ""))
}
s2 <- (span + 1) / 2
# set up indices in R - easier
if(noNA) {
if((diff(range(x))) < span) {
l_idx <- rep(1, n_m)
r_idx <- rep(n, n_m)
} else{
l_idx <- c(rep(1, length(m[m < s2])), m[m >= s2 & m <= n - s2] - s2 + 1,
rep(n - span + 1, length(m[m > n - s2])))
r_idx <- l_idx + span - 1
}
aa <- abs(m - x[l_idx])
bb <- abs(x[r_idx] - m)
max_dist <- ifelse(aa > bb, aa, bb)
# max_dist <- apply(cbind(abs(m - x[l_idx]), abs(x[r_idx] - m)), 1, max)
} else {
span3 <- min(span, n)
x2 <- x[y_idx]
# another approach
a <- yaImpute::ann(ref = as.matrix(x2), target = as.matrix(m), tree.type = "kd",
k = span3, eps = 0, verbose = FALSE)$knnIndexDist[,1:span3]
l_idx <- apply(a, 1, min)
r_idx <- apply(a, 1, max)
max_dist <- apply(cbind(abs(m - x2[l_idx]), abs(x2[r_idx] - m)), 1, max)
}
if(span >= n)
# max_dist <- max_dist * (span / n)
max_dist <- max_dist + (span - n) / 2
out <- c_loess(x[y_idx], y[y_idx], degree, span, weights[y_idx],
m, l_idx - 1, as.double(max_dist))
res1 <- out$result
# do interpolation
if(jump > 1)
res1 <- .interp(m, out$result, out$slope, at)
# res1 <- approx(x = m, y = out$result, xout = at)$y
if(blend > 0 && blend <= 1 && degree >= 1) {
if(degree == 2)
sp0 <- nextodd((span + 1) / 2)
if(degree == 1)
sp0 <- span
n.b <- as.integer(span / 2)
blend <- 1 - blend # originally programmed backwards - easier to fix this way
# indices for left and right blending points
# take into account if n_m is too small
mid <- median(m)
bl_idx <- m <= n.b + jump & m < mid
br_idx <- m >= n - n.b - jump + 1 & m >= mid
left <- m[bl_idx]
right <- m[br_idx]
bl_idx_interp <- at <= max(left)
br_idx_interp <- at >= min(right)
left_interp <- at[bl_idx_interp]
right_interp <- at[br_idx_interp]
# left_interp <- at[bl_idx_interp]
# right_interp <- at[br_idx_interp]
l_idx2 <- l_idx[bl_idx | br_idx]
r_idx2 <- r_idx[bl_idx | br_idx]
max_dist2 <- max_dist[bl_idx | br_idx]
m2 <- c(left, right)
n_m2 <- length(m2)
# speed this up later by only getting the loess smooth at the tails.
# right now, a lot of unnecessary calculation is done at the interior
# where blending doesn't matter
tmp <- c_loess(x[y_idx], y[y_idx], 0, sp0, weights[y_idx],
m2, l_idx2-1, max_dist2)
if(jump > 1) {
res2_left <- .interp(left,
head(tmp$result, length(left)),
head(tmp$slope, length(left)), left_interp)
res2_right <- .interp(right,
tail(tmp$result, length(right)),
tail(tmp$slope, length(right)), right_interp)
} else {
res2_left <- head(tmp$result, length(left))
res2_right <- tail(tmp$result, length(right))
}
# res2 <- approx(x = m, y = tmp$result, xout = at)$y
p.left <- ((1 - blend) / (n.b - 1)) * (left_interp - 1) + blend
p.right <- ((blend - 1) / (n.b - 1)) * (right_interp - (n - n.b + 1)) + 1
p.left[p.left < blend] <- blend
p.left[p.left > 1] <- 1
p.right[p.right < blend] <- blend
p.right[p.right > 1] <- 1
res1[bl_idx_interp] <- res1[bl_idx_interp] * p.left + res2_left * (1 - p.left)
res1[br_idx_interp] <- res1[br_idx_interp] * p.right + res2_right * (1 - p.right)
# xxx <- x[y_idx]
# yyy <- y[y_idx]
# tmp2 <- predict(loess(yyy ~ xxx, deg = 0, span=(sp0 + 0.00000001) / length(yyy), control = loess.control(surface = "direct")), newdata = m2)
# tmp3 <- predict(loess(yyy ~ xxx, deg = degree, span=(span + 0.00000001) / length(yyy), control = loess.control(surface = "direct")), newdata = m)
}
res1
}
| /stlplus/R/loess_stl.R | permissive | akhikolla/TestedPackages-NoIssues | R | false | false | 4,363 | r | #' @importFrom yaImpute ann
#' @importFrom Rcpp sourceCpp
#' @useDynLib stlplus
.loess_stlplus <- function(x = NULL, y, span, degree, weights = NULL,
m = c(1:length(y)), y_idx = !is.na(y), noNA = all(y_idx), blend = 0,
jump = ceiling(span / 10), at = c(1:length(y))) {
nextodd <- function(x) {
x <- round(x)
x2 <- ifelse(x %% 2 == 0, x + 1, x)
as.integer(x2)
}
n <- length(y[y_idx])
if(is.null(x)) x <- c(1:length(y))
if(is.null(weights)) weights <- rep(1, length(y))
n_m <- length(m)
if((span %% 2) == 0) {
span <- span + 1
warning(paste("Span must be odd! Changed span from ",
span - 1, " to ", span, sep = ""))
}
s2 <- (span + 1) / 2
# set up indices in R - easier
if(noNA) {
if((diff(range(x))) < span) {
l_idx <- rep(1, n_m)
r_idx <- rep(n, n_m)
} else{
l_idx <- c(rep(1, length(m[m < s2])), m[m >= s2 & m <= n - s2] - s2 + 1,
rep(n - span + 1, length(m[m > n - s2])))
r_idx <- l_idx + span - 1
}
aa <- abs(m - x[l_idx])
bb <- abs(x[r_idx] - m)
max_dist <- ifelse(aa > bb, aa, bb)
# max_dist <- apply(cbind(abs(m - x[l_idx]), abs(x[r_idx] - m)), 1, max)
} else {
span3 <- min(span, n)
x2 <- x[y_idx]
# another approach
a <- yaImpute::ann(ref = as.matrix(x2), target = as.matrix(m), tree.type = "kd",
k = span3, eps = 0, verbose = FALSE)$knnIndexDist[,1:span3]
l_idx <- apply(a, 1, min)
r_idx <- apply(a, 1, max)
max_dist <- apply(cbind(abs(m - x2[l_idx]), abs(x2[r_idx] - m)), 1, max)
}
if(span >= n)
# max_dist <- max_dist * (span / n)
max_dist <- max_dist + (span - n) / 2
out <- c_loess(x[y_idx], y[y_idx], degree, span, weights[y_idx],
m, l_idx - 1, as.double(max_dist))
res1 <- out$result
# do interpolation
if(jump > 1)
res1 <- .interp(m, out$result, out$slope, at)
# res1 <- approx(x = m, y = out$result, xout = at)$y
if(blend > 0 && blend <= 1 && degree >= 1) {
if(degree == 2)
sp0 <- nextodd((span + 1) / 2)
if(degree == 1)
sp0 <- span
n.b <- as.integer(span / 2)
blend <- 1 - blend # originally programmed backwards - easier to fix this way
# indices for left and right blending points
# take into account if n_m is too small
mid <- median(m)
bl_idx <- m <= n.b + jump & m < mid
br_idx <- m >= n - n.b - jump + 1 & m >= mid
left <- m[bl_idx]
right <- m[br_idx]
bl_idx_interp <- at <= max(left)
br_idx_interp <- at >= min(right)
left_interp <- at[bl_idx_interp]
right_interp <- at[br_idx_interp]
# left_interp <- at[bl_idx_interp]
# right_interp <- at[br_idx_interp]
l_idx2 <- l_idx[bl_idx | br_idx]
r_idx2 <- r_idx[bl_idx | br_idx]
max_dist2 <- max_dist[bl_idx | br_idx]
m2 <- c(left, right)
n_m2 <- length(m2)
# speed this up later by only getting the loess smooth at the tails.
# right now, a lot of unnecessary calculation is done at the interior
# where blending doesn't matter
tmp <- c_loess(x[y_idx], y[y_idx], 0, sp0, weights[y_idx],
m2, l_idx2-1, max_dist2)
if(jump > 1) {
res2_left <- .interp(left,
head(tmp$result, length(left)),
head(tmp$slope, length(left)), left_interp)
res2_right <- .interp(right,
tail(tmp$result, length(right)),
tail(tmp$slope, length(right)), right_interp)
} else {
res2_left <- head(tmp$result, length(left))
res2_right <- tail(tmp$result, length(right))
}
# res2 <- approx(x = m, y = tmp$result, xout = at)$y
p.left <- ((1 - blend) / (n.b - 1)) * (left_interp - 1) + blend
p.right <- ((blend - 1) / (n.b - 1)) * (right_interp - (n - n.b + 1)) + 1
p.left[p.left < blend] <- blend
p.left[p.left > 1] <- 1
p.right[p.right < blend] <- blend
p.right[p.right > 1] <- 1
res1[bl_idx_interp] <- res1[bl_idx_interp] * p.left + res2_left * (1 - p.left)
res1[br_idx_interp] <- res1[br_idx_interp] * p.right + res2_right * (1 - p.right)
# xxx <- x[y_idx]
# yyy <- y[y_idx]
# tmp2 <- predict(loess(yyy ~ xxx, deg = 0, span=(sp0 + 0.00000001) / length(yyy), control = loess.control(surface = "direct")), newdata = m2)
# tmp3 <- predict(loess(yyy ~ xxx, deg = degree, span=(span + 0.00000001) / length(yyy), control = loess.control(surface = "direct")), newdata = m)
}
res1
}
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# Usage: R --vanilla --args graphfile G tol 1e-8 maxiter 100 < HITS.R
#parseCommandArgs()
# JUnit test class: dml.test.integration.applications.HITSTest.java
# command line invocation assuming $HITS_HOME is set to the home of the R script
# Rscript $HITS_HOME/HITSTest.R $HITS_HOME/in/ 2 0.000001 $HITS_HOME/expected/
args <- commandArgs(TRUE)
library("Matrix")
maxiter = as.integer(args[2]);
tol = as.double(args[3]);
G = readMM(paste(args[1], "G.mtx", sep=""));
authorities = round(G);
hubs = authorities
#N = nrow(G)
#D = ncol(G)
# HITS = power iterations to compute leading left/right singular vectors
#authorities = matrix(1.0/N,N,1)
#hubs = matrix(1.0/N,N,1)
converge = FALSE
iter = 0
while(!converge) {
hubs_old = hubs
hubs = G %*% authorities
authorities_old = authorities
authorities = t(G) %*% hubs
hubs = hubs/max(hubs)
authorities = authorities/max(authorities)
delta_hubs = sum((hubs - hubs_old)^2)
delta_authorities = sum((authorities - authorities_old)^2)
converge = ((abs(delta_hubs) < tol) & (abs(delta_authorities) < tol) | (iter>maxiter))
iter = iter + 1
print(paste("Iterations :", iter, " delta_hubs :", delta_hubs, " delta_authorities :", delta_authorities))
}
writeMM(as(hubs,"CsparseMatrix"),paste(args[4], "hubs", sep=""));
writeMM(as(authorities,"CsparseMatrix"),paste(args[4], "authorities",sep=""));
| /src/test/scripts/applications/hits/HITS.R | permissive | apache/systemds | R | false | false | 2,279 | r | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# Usage: R --vanilla --args graphfile G tol 1e-8 maxiter 100 < HITS.R
#parseCommandArgs()
# JUnit test class: dml.test.integration.applications.HITSTest.java
# command line invocation assuming $HITS_HOME is set to the home of the R script
# Rscript $HITS_HOME/HITSTest.R $HITS_HOME/in/ 2 0.000001 $HITS_HOME/expected/
args <- commandArgs(TRUE)
library("Matrix")
maxiter = as.integer(args[2]);
tol = as.double(args[3]);
G = readMM(paste(args[1], "G.mtx", sep=""));
authorities = round(G);
hubs = authorities
#N = nrow(G)
#D = ncol(G)
# HITS = power iterations to compute leading left/right singular vectors
#authorities = matrix(1.0/N,N,1)
#hubs = matrix(1.0/N,N,1)
converge = FALSE
iter = 0
while(!converge) {
hubs_old = hubs
hubs = G %*% authorities
authorities_old = authorities
authorities = t(G) %*% hubs
hubs = hubs/max(hubs)
authorities = authorities/max(authorities)
delta_hubs = sum((hubs - hubs_old)^2)
delta_authorities = sum((authorities - authorities_old)^2)
converge = ((abs(delta_hubs) < tol) & (abs(delta_authorities) < tol) | (iter>maxiter))
iter = iter + 1
print(paste("Iterations :", iter, " delta_hubs :", delta_hubs, " delta_authorities :", delta_authorities))
}
writeMM(as(hubs,"CsparseMatrix"),paste(args[4], "hubs", sep=""));
writeMM(as(authorities,"CsparseMatrix"),paste(args[4], "authorities",sep=""));
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postset.R
\name{post_set}
\alias{post_set}
\title{Postset}
\usage{
post_set(PN, node)
}
\arguments{
\item{PN}{A Petri Net}
\item{node}{A place or transition in the petri net}
}
\description{
Get the postset of a transition or place in a Petri Net
}
| /man/post_set.Rd | no_license | gertjanssenswillen/petrinetR | R | false | true | 328 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postset.R
\name{post_set}
\alias{post_set}
\title{Postset}
\usage{
post_set(PN, node)
}
\arguments{
\item{PN}{A Petri Net}
\item{node}{A place or transition in the petri net}
}
\description{
Get the postset of a transition or place in a Petri Net
}
|
# colAUC calculates for a vector with true values the Area Under the ROC Curve (AUC) for a matrix of samples.
# Matrix rows contain samples while the columns contain features/variables.
# The function is used to calculate different multiclass AUC measures AU1P, AU1U, AUNP, AUNU,
# following the definition by Ferri et al.:
# https://www.math.ucdavis.edu/~saito/data/roc/ferri-class-perf-metrics.pdf
colAUC = function(samples, truth) {
y = as.factor(truth)
X = as.matrix(samples)
if (nrow(X) == 1)
X = t(X)
nr = nrow(X)
nc = ncol(X)
ny = table(y)
ul = as.factor(rownames(ny))
nl = length(ny)
if (nl <= 1)
stop("colAUC: List of labels 'y' have to contain at least 2 class labels.")
if (!is.numeric(X))
stop("colAUC: 'X' must be numeric")
if (nr != length(y))
stop("colAUC: length(y) and nrow(X) must be the same")
l = matrix(rep(ul, each = nr), nr, nl)
per = t(utils::combn(1:nl, 2))
np = nrow(per)
auc = matrix(0.5, np, nc)
rownames(auc) = paste(ul[per[, 1]], " vs. ", ul[per[, 2]], sep = "")
colnames(auc) = colnames(X)
# Wilcoxon AUC
idxl = vector(mode = "list", length = nl)
for (i in 1:nl) idxl[[i]] = which(y == ul[i])
for (j in 1:nc) {
for (i in 1:np) {
c1 = per[i, 1]
c2 = per[i, 2]
n1 = as.numeric(ny[c1])
n2 = as.numeric(ny[c2])
if (n1 > 0 & n2 > 0) {
r = rank(c(X[idxl[[c1]], j], X[idxl[[c2]], j]))
auc[i, j] = (sum(r[1:n1]) - n1 * (n1 + 1) / 2) / (n1 * n2)
}
}
}
auc = pmax(auc, 1 - auc)
return(auc)
}
| /R/Measure_colAUC.R | no_license | fabulthuis/mlr | R | false | false | 1,543 | r | # colAUC calculates for a vector with true values the Area Under the ROC Curve (AUC) for a matrix of samples.
# Matrix rows contain samples while the columns contain features/variables.
# The function is used to calculate different multiclass AUC measures AU1P, AU1U, AUNP, AUNU,
# following the definition by Ferri et al.:
# https://www.math.ucdavis.edu/~saito/data/roc/ferri-class-perf-metrics.pdf
colAUC = function(samples, truth) {
y = as.factor(truth)
X = as.matrix(samples)
if (nrow(X) == 1)
X = t(X)
nr = nrow(X)
nc = ncol(X)
ny = table(y)
ul = as.factor(rownames(ny))
nl = length(ny)
if (nl <= 1)
stop("colAUC: List of labels 'y' have to contain at least 2 class labels.")
if (!is.numeric(X))
stop("colAUC: 'X' must be numeric")
if (nr != length(y))
stop("colAUC: length(y) and nrow(X) must be the same")
l = matrix(rep(ul, each = nr), nr, nl)
per = t(utils::combn(1:nl, 2))
np = nrow(per)
auc = matrix(0.5, np, nc)
rownames(auc) = paste(ul[per[, 1]], " vs. ", ul[per[, 2]], sep = "")
colnames(auc) = colnames(X)
# Wilcoxon AUC
idxl = vector(mode = "list", length = nl)
for (i in 1:nl) idxl[[i]] = which(y == ul[i])
for (j in 1:nc) {
for (i in 1:np) {
c1 = per[i, 1]
c2 = per[i, 2]
n1 = as.numeric(ny[c1])
n2 = as.numeric(ny[c2])
if (n1 > 0 & n2 > 0) {
r = rank(c(X[idxl[[c1]], j], X[idxl[[c2]], j]))
auc[i, j] = (sum(r[1:n1]) - n1 * (n1 + 1) / 2) / (n1 * n2)
}
}
}
auc = pmax(auc, 1 - auc)
return(auc)
}
|
# Define Ornstein-Uhlenbeck parameters
eq_price <- 1.0; sigma_r <- 0.02
the_ta <- 0.01; len_gth <- 1000
drif_t <- the_ta*eq_price
theta_1 <- 1-the_ta
# Simulate Ornstein-Uhlenbeck process
in_nov <- sigma_r*rnorm(len_gth)
price_s <- numeric(len_gth)
price_s[1] <- in_nov[1]
for (i in 2:len_gth) {
price_s[i] <- theta_1*price_s[i-1] +
in_nov[i] + drif_t
} # end for
plot(price_s, type="l",
xlab="periods", ylab="prices",
main="Ornstein-Uhlenbeck process")
legend("topright",
title=paste(c(paste0("sigma_r = ", sigma_r),
paste0("eq_price = ", eq_price),
paste0("the_ta = ", the_ta)),
collapse="\n"),
legend="", cex=0.8,
inset=0.1, bg="white", bty="n")
abline(h=eq_price, col='red', lwd=2)
re_turns <- rutils::diff_it(price_s)
lag_price <- rutils::lag_it(price_s)
for_mula <- re_turns ~ lag_price
l_m <- lm(for_mula)
summary(l_m)
# Plot regression
plot(for_mula, main="OU Returns Versus Lagged Prices")
abline(l_m, lwd=2, col="red")
# volatility parameter
c(sigma_r, sd(re_turns))
# Extract OU parameters from regression
co_eff <- summary(l_m)$coefficients
# theta strength of mean reversion
round(co_eff[2, ], 3)
# Equilibrium price
co_eff[1, 1]/co_eff[2, 1]
# Parameter and t-values
co_eff <- cbind(c(the_ta*eq_price, the_ta),
co_eff[, 1:2])
rownames(co_eff) <- c("drift", "theta")
round(co_eff, 3)
# Simulate Schwartz process
re_turns <- numeric(len_gth)
price_s <- numeric(len_gth)
price_s[1] <- eq_price
set.seed(1121) # Reset random numbers
for (i in 2:len_gth) {
re_turns[i] <- the_ta*(eq_price - price_s[i-1]) +
sigma_r*rnorm(1)
price_s[i] <- price_s[i-1] * exp(re_turns[i])
} # end for
plot(price_s, type="l",
xlab="periods", ylab="prices",
main="Log-normal Ornstein-Uhlenbeck process")
legend("topright",
title=paste(c(paste0("sigma_r = ", sigma_r),
paste0("eq_price = ", eq_price),
paste0("the_ta = ", the_ta)),
collapse="\n"),
legend="", cex=0.8,
inset=0.12, bg="white", bty="n")
abline(h=eq_price, col='red', lwd=2)
# Verify that rtools are working properly:
devtools::find_rtools()
devtools::has_devel()
# Load package Rcpp
library(Rcpp)
# Get documentation for package Rcpp
# Get short description
packageDescription("Rcpp")
# Load help page
help(package="Rcpp")
# list all datasets in "Rcpp"
data(package="Rcpp")
# list all objects in "Rcpp"
ls("package:Rcpp")
# Remove Rcpp from search path
detach("package:Rcpp")
# Define Rcpp function
Rcpp::cppFunction("
int times_two(int x)
{ return 2 * x;}
") # end cppFunction
# Run Rcpp function
times_two(3)
# Source Rcpp functions from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/mult_rcpp.cpp")
# Multiply two numbers
mult_rcpp(2, 3)
mult_rcpp(1:3, 6:4)
# Multiply two vectors
mult_vec_rcpp(2, 3)
mult_vec_rcpp(1:3, 6:4)
# Define Rcpp function with loop
Rcpp::cppFunction("
double inner_mult(NumericVector x, NumericVector y) {
int x_size = x.size();
int y_size = y.size();
if (x_size != y_size) {
return 0;
} else {
double total = 0;
for(int i = 0; i < x_size; ++i) {
total += x[i] * y[i];
}
return total;
}
}") # end cppFunction
# Run Rcpp function
inner_mult(1:3, 6:4)
inner_mult(1:3, 6:3)
# Define Rcpp Sugar function with loop
Rcpp::cppFunction("
double inner_mult_sugar(NumericVector x, NumericVector y) {
return sum(x * y);
}") # end cppFunction
# Run Rcpp Sugar function
inner_mult_sugar(1:3, 6:4)
inner_mult_sugar(1:3, 6:3)
# Define R function with loop
inner_mult_r <- function(x, y) {
to_tal <- 0
for(i in 1:NROW(x)) {
to_tal <- to_tal + x[i] * y[i]
}
to_tal
} # end inner_mult_r
# Run R function
inner_mult_r(1:3, 6:4)
inner_mult_r(1:3, 6:3)
# Compare speed of Rcpp and R
library(microbenchmark)
summary(microbenchmark(
pure_r=inner_mult_r(1:10000, 1:10000),
inner_r=1:10000 %*% 1:10000,
r_cpp=inner_mult(1:10000, 1:10000),
r_cpp_sugar=inner_mult_sugar(1:10000, 1:10000),
times=10))[, c(1, 4, 5)]
# Define Ornstein-Uhlenbeck function in R
sim_ou <- function(len_gth=1000, eq_price=5.0,
vol_at=0.01, the_ta=0.01) {
re_turns <- numeric(len_gth)
price_s <- numeric(len_gth)
price_s[1] <- eq_price
for (i in 2:len_gth) {
re_turns[i] <- the_ta*(eq_price - price_s[i-1]) + vol_at*rnorm(1)
price_s[i] <- price_s[i-1] * exp(re_turns[i])
} # end for
price_s
} # end sim_ou
# Simulate Ornstein-Uhlenbeck process in R
eq_price <- 5.0; sigma_r <- 0.01
the_ta <- 0.01; len_gth <- 1000
set.seed(1121) # Reset random numbers
ou_sim <- sim_ou(len_gth=len_gth, eq_price=eq_price, vol_at=sigma_r, the_ta=the_ta)
# Define Ornstein-Uhlenbeck function in Rcpp
Rcpp::cppFunction("
NumericVector sim_ou_rcpp(double eq_price,
double vol_at,
double the_ta,
NumericVector in_nov) {
int len_gth = in_nov.size();
NumericVector price_s(len_gth);
NumericVector re_turns(len_gth);
price_s[0] = eq_price;
for (int it = 1; it < len_gth; it++) {
re_turns[it] = the_ta*(eq_price - price_s[it-1]) + vol_at*in_nov[it-1];
price_s[it] = price_s[it-1] * exp(re_turns[it]);
} // end for
return price_s;
}") # end cppFunction
# Simulate Ornstein-Uhlenbeck process in Rcpp
set.seed(1121) # Reset random numbers
ou_sim_rcpp <- sim_ou_rcpp(eq_price=eq_price,
vol_at=sigma_r,
the_ta=the_ta,
in_nov=rnorm(len_gth))
all.equal(ou_sim, ou_sim_rcpp)
# Compare speed of Rcpp and R
library(microbenchmark)
summary(microbenchmark(
pure_r=sim_ou(len_gth=len_gth, eq_price=eq_price, vol_at=sigma_r, the_ta=the_ta),
r_cpp=sim_ou_rcpp(eq_price=eq_price, vol_at=sigma_r, the_ta=the_ta, in_nov=rnorm(len_gth)),
times=10))[, c(1, 4, 5)]
# Source Rcpp function for Ornstein-Uhlenbeck process from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/sim_ou.cpp")
# Simulate Ornstein-Uhlenbeck process in Rcpp
set.seed(1121) # Reset random numbers
ou_sim_rcpp <- sim_ou_rcpp(eq_price=eq_price,
vol_at=sigma_r,
the_ta=the_ta,
in_nov=rnorm(len_gth))
all.equal(ou_sim, ou_sim_rcpp)
# Compare speed of Rcpp and R
library(microbenchmark)
summary(microbenchmark(
pure_r=sim_ou(len_gth=len_gth, eq_price=eq_price, vol_at=sigma_r, the_ta=the_ta),
r_cpp=sim_ou_rcpp(eq_price=eq_price, vol_at=sigma_r, the_ta=the_ta, in_nov=rnorm(len_gth)),
times=10))[, c(1, 4, 5)]
# Calculate uniformly distributed pseudo-random sequence
uni_form <- function(see_d, len_gth=10) {
out_put <- numeric(len_gth)
out_put[1] <- see_d
for (i in 2:len_gth) {
out_put[i] <- 4*out_put[i-1]*(1-out_put[i-1])
} # end for
acos(1-2*out_put)/pi
} # end uni_form
# Source Rcpp functions from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/uni_form.cpp")
# Microbenchmark Rcpp code
library(microbenchmark)
summary(microbenchmark(
pure_r=runif(1e5),
r_loop=uni_form(0.3, 1e5),
r_cpp=uniform_rcpp(0.3, 1e5),
times=10))[, c(1, 4, 5)]
library(RcppArmadillo)
# Source Rcpp functions from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/armadillo_functions.cpp")
vec1 <- runif(1e5)
vec2 <- runif(1e5)
vec_in(vec1, vec2)
vec1 %*% vec2
# Microbenchmark RcppArmadillo code
summary(microbenchmark(
vec_in=vec_in(vec1, vec2),
r_code=(vec1 %*% vec2),
times=100))[, c(1, 4, 5)] # end microbenchmark summary
# Microbenchmark shows:
# vec_in() is several times faster than %*%, especially for longer vectors.
# expr mean median
# 1 vec_in 110.7067 110.4530
# 2 r_code 585.5127 591.3575
# Source Rcpp functions from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/sim_arima.cpp")
# Define AR(2) coefficients
co_eff <- c(0.9, 0.09)
len_gth <- 1e4
set.seed(1121)
in_nov <- rnorm(len_gth)
# Simulate ARIMA using filter()
arima_filter <- filter(x=in_nov,
filter=co_eff, method="recursive")
# Simulate ARIMA using sim_arima()
ari_ma <- sim_arima(in_nov, rev(co_eff))
all.equal(drop(ari_ma),
as.numeric(arima_filter))
# Microbenchmark RcppArmadillo code
summary(microbenchmark(
filter=filter(x=in_nov, filter=co_eff, method="recursive"),
sim_arima=sim_arima(in_nov, rev(co_eff)),
times=100))[, c(1, 4, 5)] # end microbenchmark summary
library(RcppArmadillo)
# Source Rcpp functions from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/armadillo_functions.cpp")
mat_rix <- matrix(runif(1e5), nc=1e3)
# De-mean using apply()
new_mat <- apply(mat_rix, 2,
function(x) (x-mean(x)))
# De-mean using demean_mat()
demean_mat(mat_rix)
all.equal(new_mat, mat_rix)
# Microbenchmark RcppArmadillo code
summary(microbenchmark(
demean_mat=demean_mat(mat_rix),
apply=(apply(mat_rix, 2, mean)),
times=100))[, c(1, 4, 5)] # end microbenchmark summary
# Microbenchmark shows:
# Demean_mat() is over 70 times faster than apply()
# expr mean median
# 1 demean_mat 127.7539 125.604
# 2 apply 10781.7534 9291.674
# Perform matrix inversion
# Create random positive semi-definite matrix
mat_rix <- matrix(runif(25), nc=5)
mat_rix <- t(mat_rix) %*% mat_rix
# Invert the matrix
matrix_inv <- solve(mat_rix)
inv_mat(mat_rix)
all.equal(inv_mat, mat_rix)
# Microbenchmark RcppArmadillo code
library(microbenchmark)
summary(microbenchmark(
inv_mat=inv_mat(mat_rix),
solve=solve(mat_rix),
times=100))[, c(1, 4, 5)] # end microbenchmark summary
# Microbenchmark shows:
# inv_mat() is over 10 times faster than solve()
# expr mean median
# 1 inv_mat 3.42669 2.933
# 2 solve 32.00254 31.280
library(RcppArmadillo)
# Source Rcpp functions from file
Rcpp::sourceCpp("C:/Develop/lecture_slides/scripts/calc_weights.cpp")
# Create random matrix of returns
mat_rix <- matrix(rnorm(300), nc=5)
# Regularized inverse of covariance matrix
max_eigen <- 4
ei_gen <- eigen(cov(mat_rix))
cov_inv <- ei_gen$vectors[, 1:max_eigen] %*%
(t(ei_gen$vectors[, 1:max_eigen]) / ei_gen$values[1:max_eigen])
# Regularized inverse using RcppArmadillo
cov_inv_arma <- calc_inv(mat_rix, max_eigen)
all.equal(cov_inv, cov_inv_arma)
# Microbenchmark RcppArmadillo code
library(microbenchmark)
summary(microbenchmark(
pure_r={
ei_gen <- eigen(cov(mat_rix))
ei_gen$vectors[, 1:max_eigen] %*%
(t(ei_gen$vectors[, 1:max_eigen]) / ei_gen$values[1:max_eigen])
},
r_cpp=calc_inv(mat_rix, max_eigen),
times=100))[, c(1, 4, 5)] # end microbenchmark summary
# futures contracts codes
future_s <- rbind(c("S&P500 index", "ES"),
c("10yr Treasury", "ZN"),
c("VIX index", "VX"),
c("Gold", "GC"),
c("Oil", "CL"),
c("Euro FX", "EC"),
c("Swiss franc", "SF"),
c("Japanese Yen", "JY"))
colnames(future_s) <- c("Futures contract", "Code")
print(xtable::xtable(future_s), comment=FALSE, size="scriptsize", include.rownames=FALSE, latex.environments="flushleft")
# Monthly futures contract codes
month_codes <- cbind(c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"),
c("F", "G", "H", "J", "K", "M", "N", "Q", "U", "V", "X", "Z"))
colnames(month_codes) <- c("Month", "Code")
print(xtable::xtable(month_codes), comment=FALSE, size="scriptsize", include.rownames=FALSE, latex.environments="flushright")
# futures contracts codes
future_s <- rbind(c("S&P500 index", "SP", "ES"),
c("10yr Treasury", "ZN", "ZN"),
c("VIX index", "VX", "delisted"),
c("Gold", "GC", "YG"),
c("Oil", "CL", "QM"),
c("Euro FX", "EC", "E7"),
c("Swiss franc", "SF", "MSF"),
c("Japanese Yen", "JY", "J7"))
colnames(future_s) <- c("Futures contract", "Standard", "E-mini")
print(xtable::xtable(future_s), comment=FALSE, size="scriptsize", include.rownames=FALSE, latex.environments="flushleft")
# Load data for S&P Emini futures June 2019 contract
sym_bol <- "ES"
dir_name <- "C:/Develop/data/ib_data"
file_name <- file.path(dir_name, paste0(sym_bol, ".csv"))
# Read a data table from CSV file
price_s <- data.table::fread(file_name)
# Coerce price_s into data frame
data.table::setDF(price_s)
# Or
# price_s <- data.table:::as.data.frame.data.table(
# data.table::fread(file_name))
# first column of price_s is a numeric date-time
tail(price_s)
# Coerce price_s into xts series
price_s <- xts::xts(price_s[, 2:6],
order.by=as.Date(as.POSIXct.numeric(price_s[, 1],
tz="America/New_York",
origin="1970-01-01")))
colnames(price_s) <- c("Open", "High", "Low", "Close", "Volume")
tail(price_s)
# Plot OHLC data in x11 window
x11(width=5, height=4) # Open x11 for plotting
par(mar=c(5, 5, 2, 1), oma=c(0, 0, 0, 0))
chart_Series(x=price_s, TA="add_Vo()",
name="S&P500 futures")
# Plot dygraph
dygraphs::dygraph(price_s[, 1:4], main="OHLC prices") %>%
dyCandlestick()
# Load ESU8 data
dir_name <- "C:/Develop/data/ib_data"
file_name <- file.path(dir_name, "ESU8.csv")
ES_U8 <- data.table::fread(file_name)
data.table::setDF(ES_U8)
ES_U8 <- xts::xts(ES_U8[, 2:6],
order.by=as.Date(as.POSIXct.numeric(ES_U8[, 1],
tz="America/New_York", origin="1970-01-01")))
colnames(ES_U8) <- c("Open", "High", "Low", "Close", "Volume")
# Load ESM8 data
file_name <- file.path(dir_name, "ESM8.csv")
ES_M8 <- data.table::fread(file_name)
data.table::setDF(ES_M8)
ES_M8 <- xts::xts(ES_M8[, 2:6],
order.by=as.Date(as.POSIXct.numeric(ES_M8[, 1],
tz="America/New_York", origin="1970-01-01")))
colnames(ES_M8) <- c("Open", "High", "Low", "Close", "Volume")
x11(width=6, height=5) # Open x11 for plotting
# Plot last month of ESU8 and ESM8 volume data
en_d <- end(ES_M8)
star_t <- (en_d - 30)
vol_ume <- cbind(Vo(ES_U8),
Vo(ES_M8))[paste0(star_t, "/", en_d)]
colnames(vol_ume) <- c("ESU8", "ESM8")
col_ors <- c("blue", "green")
plot(vol_ume, col=col_ors, lwd=3, major.ticks="days",
format.labels="%b-%d", observation.based=TRUE,
main="Volumes of ESU8 and ESM8 futures")
legend("topleft", legend=colnames(vol_ume), col=col_ors,
title=NULL, bty="n", lty=1, lwd=6, inset=0.1, cex=0.7)
# Find date when ESU8 volume exceeds ESM8
exceed_s <- (vol_ume[, "ESU8"] > vol_ume[, "ESM8"])
in_dex <- match(TRUE, exceed_s)
# in_dex <- min(which(exceed_s))
# Scale the ES_M8 prices
in_dex <- index(exceed_s[in_dex])
fac_tor <- as.numeric(Cl(ES_U8[in_dex])/Cl(ES_M8[in_dex]))
ES_M8[, 1:4] <- fac_tor*ES_M8[, 1:4]
# Calculate continuous contract prices
chain_ed <- rbind(ES_M8[index(ES_M8) < in_dex],
ES_U8[index(ES_U8) >= in_dex])
# Or
# Chain_ed <- rbind(ES_M8[paste0("/", in_dex-1)],
# ES_U8[paste0(in_dex, "/")])
# Plot continuous contract prices
chart_Series(x=chain_ed["2018"], TA="add_Vo()",
name="S&P500 chained futures")
# Download VIX index data from CBOE
vix_index <- data.table::fread("http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vixcurrent.csv", skip=1)
class(vix_index)
dim(vix_index)
tail(vix_index)
sapply(vix_index, class)
vix_index <- xts(vix_index[, -1],
order.by=as.Date(vix_index$Date, format="%m/%d/%Y"))
colnames(vix_index) <- c("Open", "High", "Low", "Close")
# Save the VIX data to binary file
load(file="C:/Develop/data/ib_data/vix_cboe.RData")
ls(vix_env)
vix_env$vix_index <- vix_index
ls(vix_env)
save(vix_env, file="C:/Develop/data/ib_data/vix_cboe.RData")
# Plot OHLC data in x11 window
chart_Series(x=vix_index["2018"], name="VIX Index")
# Plot dygraph
dygraphs::dygraph(vix_index, main="VIX Index") %>%
dyCandlestick()
# Read CBOE monthly futures expiration dates
date_s <- read.csv(
file="C:/Develop/data/vix_data/vix_dates.csv",
stringsAsFactors=FALSE)
date_s <- as.Date(date_s[, 1])
year_s <- format(date_s, format="%Y")
year_s <- substring(year_s, 4)
# Monthly futures contract codes
month_codes <-
c("F", "G", "H", "J", "K", "M",
"N", "Q", "U", "V", "X", "Z")
sym_bols <- paste0("VX", month_codes, year_s)
date_s <- as.data.frame(date_s)
colnames(date_s) <- "exp_dates"
rownames(date_s) <- sym_bols
# Write dates to CSV file, with row names
write.csv(date_s, row.names=TRUE,
file="C:/Develop/data/vix_data/vix_futures.csv")
# Read back CBOE futures expiration dates
date_s <- read.csv(file="C:/Develop/data/vix_data/vix_futures.csv",
stringsAsFactors=FALSE, row.names=1)
date_s[, 1] <- as.Date(date_s[, 1])
# Load VIX futures data from binary file
load(file="C:/Develop/data/vix_data/vix_cboe.RData")
# Get all VIX futures for 2018 except January
sym_bols <- ls(vix_env)
sym_bols <- sym_bols[grep("*8", sym_bols)]
sym_bols <- sym_bols[2:9]
# Specify dates for curves
low_vol <- as.Date("2018-01-11")
hi_vol <- as.Date("2018-02-05")
# Extract all VIX futures prices on the dates
curve_s <- lapply(sym_bols, function(sym_bol) {
x_ts <- get(x=sym_bol, envir=vix_env)
Cl(x_ts[c(low_vol, hi_vol)])
}) # end lapply
curve_s <- rutils::do_call(cbind, curve_s)
colnames(curve_s) <- sym_bols
curve_s <- t(coredata(curve_s))
colnames(curve_s) <- c("Contango 01/11/2018",
"Backwardation 02/05/2018")
x11(width=7, height=5)
par(mar=c(3, 2, 1, 1), oma=c(0, 0, 0, 0))
plot(curve_s[, 1], type="l", lty=1, col="blue", lwd=3,
xaxt="n", xlab="", ylab="", ylim=range(curve_s),
main="VIX Futures Curves")
axis(1, at=(1:NROW(curve_s)), labels=rownames(curve_s))
lines(curve_s[, 2], lty=1, lwd=3, col="red")
legend(x="topright", legend=colnames(curve_s),
inset=0.05, cex=1.0, bty="n",
col=c("blue", "red"), lwd=6, lty=1)
# Load VIX futures data from binary file
load(file="C:/Develop/data/vix_data/vix_cboe.RData")
# Read CBOE futures expiration dates
date_s <- read.csv(file="C:/Develop/data/vix_data/vix_futures.csv",
stringsAsFactors=FALSE, row.names=1)
sym_bols <- rownames(date_s)
date_s <- as.Date(date_s[, 1])
to_day <- as.Date("2018-05-07")
maturi_ty <- (to_day + 30)
# Find neighboring futures contracts
in_dex <- match(TRUE, date_s > maturi_ty)
front_date <- date_s[in_dex-1]
back_date <- date_s[in_dex]
front_symbol <- sym_bols[in_dex-1]
back_symbol <- sym_bols[in_dex]
front_price <- get(x=front_symbol, envir=vix_env)
front_price <- as.numeric(Cl(front_price[to_day]))
back_price <- get(x=back_symbol, envir=vix_env)
back_price <- as.numeric(Cl(back_price[to_day]))
# Calculate the constant maturity 30-day futures price
ra_tio <- as.numeric(maturi_ty - front_date) /
as.numeric(back_date - front_date)
pric_e <- (ra_tio*back_price + (1-ra_tio)*front_price)
library(HighFreq)
x11(width=5, height=3) # Open x11 for plotting
# Load VIX futures data from binary file
load(file="C:/Develop/data/vix_data/vix_cboe.RData")
# Plot VIX and SVXY data in x11 window
plot_theme <- chart_theme()
plot_theme$col$line.col <- "blue"
chart_Series(x=Cl(vix_env$vix_index["2007/"]),
theme=plot_theme, name="VIX Index")
chart_Series(x=Cl(rutils::etf_env$VTI["2007/"]),
theme=plot_theme, name="VTI ETF")
chart_Series(x=Cl(vix_env$vix_index["2017/2018"]),
theme=plot_theme, name="VIX Index")
chart_Series(x=Cl(rutils::etf_env$SVXY["2017/2018"]),
theme=plot_theme, name="SVXY ETF")
# Install package IBrokers
install.packages("IBrokers")
# Load package IBrokers
library(IBrokers)
# Get documentation for package IBrokers
# Get short description
packageDescription("IBrokers")
# Load help page
help(package="IBrokers")
# List all datasets in "IBrokers"
data(package="IBrokers")
# List all objects in "IBrokers"
ls("package:IBrokers")
# Remove IBrokers from search path
detach("package:IBrokers")
# Install package IBrokers2
devtools::install_github(repo="algoquant/IBrokers2")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Or connect to IB Gateway
# Ib_connect <- ibgConnect(port=4002)
# Check connection
IBrokers::isConnected(ib_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Or connect to IB Gateway
# Ib_connect <- ibgConnect(port=4002)
# Download account information from IB
ac_count <- "DU1215081"
ib_account <- IBrokers::reqAccountUpdates(conn=ib_connect,
acctCode=ac_count)
# Extract account balances
balance_s <- ib_account[[1]]
balance_s$AvailableFunds
# Extract contract names, net positions, and profits and losses
IBrokers::twsPortfolioValue(ib_account)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Define AAPL stock contract (object)
con_tract <- IBrokers::twsEquity("AAPL", primary="SMART")
# Define CHF currency contract
con_tract <- IBrokers::twsCurrency("CHF", currency="USD")
# Define S&P Emini future June 2019 contract
con_tract <- IBrokers::twsFuture(symbol="ES",
exch="GLOBEX", expiry="201906")
# Define 10yr Treasury future June 2019 contract
con_tract <- IBrokers::twsFuture(symbol="ZN",
exch="ECBOT", expiry="201906")
# Define euro currency future June 2019 contract
con_tract <- IBrokers::twsFuture(symbol="EUR",
exch="GLOBEX", expiry="201906")
# Define Gold future June 2019 contract
con_tract <- IBrokers::twsFuture(symbol="GC",
exch="NYMEX", expiry="201906")
# Define Oil future January 2019 contract
con_tract <- IBrokers::twsFuture(symbol="QM",
exch="NYMEX", expiry="201901")
# Test if contract object is correct
IBrokers::is.twsContract(con_tract)
# Get list with instrument information
IBrokers::reqContractDetails(conn=ib_connect, Contract=con_tract)
# Install the package twsInstrument
install.packages("twsInstrument", repos="http://r-forge.r-project.org")
# Define euro future using getContract() and Conid
con_tract <- twsInstrument::getContract("317631411")
# Get list with instrument information
IBrokers::reqContractDetails(conn=ib_connect, Contract=con_tract)
# Define VIX monthly and weekly futures June 2019 contract
sym_bol <- "VIX"
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
exch="CFE", expiry="201906")
# Define VIX monthly futures June 2019 contract
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
local="VXV8", exch="CFE", expiry="201906")
# Define VIX weekly futures October 3rd 2018 contract
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
local="VX40V8", exch="CFE", expiry="201906")
# Get list with instrument information
IBrokers::reqContractDetails(conn=ib_connect,
Contract=con_tract)
# Define S&P Emini futures June 2019 contract
sym_bol <- "ES"
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
exch="GLOBEX", expiry="201906")
# Open file for data download
dir_name <- "C:/Develop/data/ib_data"
dir.create(dir_name)
file_name <- file.path(dir_name, paste0(sym_bol, "_201906.csv"))
file_connect <- file(file_name, open="w")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Write header to file
cat(paste(paste(sym_bol, c("Index", "Open", "High", "Low", "Close", "Volume", "WAP", "Count"), sep="."), collapse=","), "\n", file=file_connect)
# Download historical data to file
IBrokers::reqHistoricalData(conn=ib_connect,
Contract=con_tract,
barSize="1 day", duration="6 M",
file=file_connect)
# Close data file
close(file_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Define IB contract objects for stock symbols
sym_bols <- c("AAPL", "F", "MSFT")
con_tracts <- lapply(sym_bols, IBrokers::twsEquity, primary="SMART")
names(con_tracts) <- sym_bols
# Open file connections for data download
dir_name <- "C:/Develop/data/ib_data"
file_names <- file.path(dir_name, paste0(sym_bols, format(Sys.time(), format="_%m_%d_%Y_%H_%M"), ".csv"))
file_connects <- lapply(file_names, function(file_name) file(file_name, open="w"))
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Download historical 1-minute bar data to files
for (it in 1:NROW(sym_bols)) {
sym_bol <- sym_bols[it]
file_connect <- file_connects[[it]]
con_tract <- con_tracts[[it]]
cat("Downloading data for: ", sym_bol, "\n")
# Write header to file
cat(paste(paste(sym_bol, c("Index", "Open", "High", "Low", "Close", "Volume", "WAP", "XTRA", "Count"), sep="."), collapse=","), "\n", file=file_connect)
IBrokers::reqHistoricalData(conn=ib_connect,
Contract=con_tract,
barSize="1 min", duration="2 D",
file=file_connect)
Sys.sleep(10) # 10s pause to avoid IB pacing violation
} # end for
# Close data files
for (file_connect in file_connects) close(file_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Define S&P Emini futures June 2018 contract
sym_bol <- "ES"
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
include_expired="1",
exch="GLOBEX", expiry="201806")
# Open file connection for ESM8 data download
file_name <- file.path(dir_name, paste0(sym_bol, "M8.csv"))
file_connect <- file(file_name, open="w")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Download historical data to file
IBrokers::reqHistoricalData(conn=ib_connect,
Contract=con_tract,
barSize="1 day", duration="2 Y",
file=file_connect)
# Close data file
close(file_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Load OHLC data and coerce it into xts series
price_s <- data.table::fread(file_name)
data.table::setDF(price_s)
price_s <- xts::xts(price_s[, 2:6],
order.by=as.Date(as.POSIXct.numeric(price_s[, 1],
tz="America/New_York", origin="1970-01-01")))
colnames(price_s) <- c("Open", "High", "Low", "Close", "Volume")
# Plot OHLC data in x11 window
chart_Series(x=price_s, TA="add_Vo()",
name="S&P500 ESM8 futures")
# Plot dygraph
dygraphs::dygraph(price_s[, 1:4], main="S&P500 ESM8 futures") %>%
dyCandlestick()
# Define S&P Emini futures June 2019 contract
sym_bol <- "ES"
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
exch="GLOBEX", expiry="201906")
# Open file connection for data download
dir_name <- "C:/Develop/data/ib_data"
# Dir.create(dir_name)
file_name <- file.path(dir_name, paste0(sym_bol, "_taq_live.csv"))
file_connect <- file(file_name, open="w")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Download live data to file
IBrokers::reqMktData(conn=ib_connect,
Contract=con_tract,
eventWrapper=eWrapper.MktData.CSV(1),
file=file_connect)
# Close data file
close(file_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Define S&P Emini futures June 2019 contract
sym_bol <- "ES"
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
exch="GLOBEX", expiry="201906")
# Open file connection for data download
dir_name <- "C:/Develop/data/ib_data"
# Dir.create(dir_name)
file_name <- file.path(dir_name, paste0(sym_bol, "_ohlc_live.csv"))
file_connect <- file(file_name, open="w")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Download live data to file
IBrokers::reqRealTimeBars(conn=ib_connect,
Contract=con_tract, barSize="1",
eventWrapper=eWrapper.RealTimeBars.CSV(1),
file=file_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Close data file
close(file_connect)
# Load OHLC data and coerce it into xts series
library(data.table)
price_s <- data.table::fread(file_name)
price_s <- xts::xts(price_s[, paste0("V", 2:6)],
as.POSIXct.numeric(as.numeric(price_s[, V1]), tz="America/New_York", origin="1970-01-01"))
colnames(price_s) <- c("Open", "High", "Low", "Close", "Volume")
# Plot OHLC data in x11 window
x11()
chart_Series(x=price_s, TA="add_Vo()",
name="S&P500 ESM9 futures")
# Plot dygraph
library(dygraphs)
dygraphs::dygraph(price_s[, 1:4], main="S&P500 ESM9 futures") %>%
dyCandlestick()
library(IBrokers)
# Define list of S&P futures and 10yr Treasury contracts
con_tracts <- list(ES=IBrokers::twsFuture(symbol="ES", exch="GLOBEX", expiry="201906"),
ZN=IBrokers::twsFuture(symbol="ZN", exch="ECBOT", expiry="201906"))
# Open the file connection for storing the bar data
dir_name <- "C:/Develop/data/ib_data"
file_names <- file.path(dir_name, paste0(c("ES_", "ZN_"), format(Sys.time(), format="%m_%d_%Y_%H_%M"), ".csv"))
file_connects <- lapply(file_names, function(file_name) file(file_name, open="w"))
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Download live data to file
IBrokers::reqRealTimeBars(conn=ib_connect,
Contract=con_tracts,
barSize="1", useRTH=FALSE,
eventWrapper=eWrapper.RealTimeBars.CSV(NROW(con_tracts)),
file=file_connects)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Close data files
for (file_connect in file_connects)
close(file_connect)
library(data.table)
# Load ES futures June 2019 contract and coerce it into xts series
price_s <- data.table::fread(file_names[1])
price_s <- xts::xts(price_s[, paste0("V", 2:6)],
as.POSIXct.numeric(as.numeric(price_s[, V1]), tz="America/New_York", origin="1970-01-01"))
colnames(price_s) <- c("Open", "High", "Low", "Close", "Volume")
# Plot dygraph
library(dygraphs)
dygraphs::dygraph(price_s[, 1:4], main="S&P500 ESM9 futures") %>%
dyCandlestick()
# Load ZN 10yr Treasury futures June 2019 contract
price_s <- data.table::fread(file_names[2])
price_s <- xts::xts(price_s[, paste0("V", 2:6)],
as.POSIXct.numeric(as.numeric(price_s[, V1]), tz="America/New_York", origin="1970-01-01"))
colnames(price_s) <- c("Open", "High", "Low", "Close", "Volume")
# Plot dygraph
dygraphs::dygraph(price_s[, 1:4], main="ZN 10yr Treasury futures") %>%
dyCandlestick()
# Define S&P Emini future June 2019 contract
con_tract <- IBrokers::twsFuture(symbol="ES", exch="GLOBEX", expiry="201906")
# Define euro currency contract EUR.USD
con_tract <- IBrokers::twsCurrency("EUR", currency="USD")
# Define euro currency E-mini futures June 2019 contract E7Z8
con_tract <- IBrokers::twsFuture(symbol="E7", exch="GLOBEX", expiry="201906")
# Define Japanese yen currency contract JPY.USD
con_tract <- IBrokers::twsCurrency("JPY", currency="USD")
# Define Japanese yen currency E-mini futures June 2019 contract J7Z8
con_tract <- IBrokers::twsFuture(symbol="J7", exch="GLOBEX", expiry="201906")
# Define Japanese yen currency futures June 2019 contract 6JZ8
con_tract <- IBrokers::twsFuture(symbol="JPY", exch="GLOBEX", expiry="201906")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
IBrokers::reqContractDetails(conn=ib_connect, Contract=con_tract)
# Request trade order ID
order_id <- IBrokers::reqIds(ib_connect)
# Create buy market order object
ib_order <- IBrokers::twsOrder(order_id,
orderType="MKT", action="BUY", totalQuantity=1)
# Place trade order
IBrokers::placeOrder(ib_connect, con_tract, ib_order)
# Execute sell market order
order_id <- IBrokers::reqIds(ib_connect)
ib_order <- IBrokers::twsOrder(order_id,
orderType="MKT", action="SELL", totalQuantity=1)
IBrokers::placeOrder(ib_connect, con_tract, ib_order)
# Execute buy market order
order_id <- IBrokers::reqIds(ib_connect)
ib_order <- IBrokers::twsOrder(order_id,
orderType="MKT", action="BUY", totalQuantity=1)
IBrokers::placeOrder(ib_connect, con_tract, ib_order)
# Request trade order ID
order_id <- IBrokers::reqIds(ib_connect)
# Create buy limit order object
ib_order <- IBrokers::twsOrder(order_id, orderType="LMT",
lmtPrice="1.1511", action="BUY", totalQuantity=1)
# Place trade order
IBrokers::placeOrder(ib_connect, con_tract, ib_order)
# Cancel trade order
IBrokers::cancelOrder(ib_connect, order_id)
# Execute sell limit order
order_id <- IBrokers::reqIds(ib_connect)
ib_order <- IBrokers::twsOrder(order_id, orderType="LMT",
lmtPrice="1.1512", action="SELL", totalQuantity=1)
IBrokers::placeOrder(ib_connect, con_tract, ib_order)
# Cancel trade order
IBrokers::cancelOrder(ib_connect, order_id)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
eWrapper_realtimebars <- function(n = 1) {
eW <- eWrapper_new(NULL)
# eW <- IBrokers::eWrapper(NULL)
eW$assign.Data("data", rep(list(structure(.xts(matrix(rep(NA_real_, 7), ncol = 7), 0), .Dimnames = list(NULL, c("Open", "High", "Low", "Close", "Volume", "WAP", "Count")))), n))
eW$realtimeBars <- function(curMsg, msg, timestamp, file, ...) {
id <- as.numeric(msg[2])
file <- file[[id]]
data <- eW$get.Data("data")
attr(data[[id]], "index") <- as.numeric(msg[3])
nr.data <- NROW(data[[id]])
# Write to file
cat(paste(msg[3], msg[4], msg[5], msg[6], msg[7], msg[8], msg[9], msg[10], sep = ","), "\n", file = file, append = TRUE)
# Write to console
# eW$count_er <- eW$count_er + 1
eW$assign.Data("count_er", eW$get.Data("count_er")+1)
cat(paste0("count_er=", eW$get.Data("count_er"), "\tOpen=", msg[4], "\tHigh=", msg[5], "\tLow=", msg[6], "\tClose=", msg[7], "\tVolume=", msg[8]), "\n")
# cat(paste0("Open=", msg[4], "\tHigh=", msg[5], "\tLow=", msg[6], "\tClose=", msg[7], "\tVolume=", msg[8]), "\n")
#Trade
# Cancel previous trade orders
buy_id <- eW$get.Data("buy_id")
sell_id <- eW$get.Data("sell_id")
if (buy_id>0) IBrokers::cancelOrder(ib_connect, buy_id)
if (sell_id>0) IBrokers::cancelOrder(ib_connect, sell_id)
# Execute buy limit order
buy_id <- IBrokers::reqIds(ib_connect)
buy_order <- IBrokers::twsOrder(buy_id, orderType="LMT",
lmtPrice=msg[6]-0.25, action="BUY", totalQuantity=1)
IBrokers::placeOrder(ib_connect, con_tract, buy_order)
# Execute sell limit order
sell_id <- IBrokers::reqIds(ib_connect)
sell_order <- IBrokers::twsOrder(sell_id, orderType="LMT",
lmtPrice=msg[5]+0.25, action="SELL", totalQuantity=1)
IBrokers::placeOrder(ib_connect, con_tract, sell_order)
# Copy new trade orders
eW$assign.Data("buy_id", buy_id)
eW$assign.Data("sell_id", sell_id)
#Trade finished
data[[id]][nr.data, 1:7] <- as.numeric(msg[4:10])
eW$assign.Data("data", data)
c(curMsg, msg)
} # end eW$realtimeBars
return(eW)
} # end eWrapper_realtimebars
# Install package data.table
install.packages("data.table")
# Load package data.table
library(data.table)
# get documentation for package data.table
# get short description
packageDescription("data.table")
# Load help page
help(package="data.table")
# List all datasets in "data.table"
data(package="data.table")
# List all objects in "data.table"
ls("package:data.table")
# Remove data.table from search path
detach("package:data.table")
# Create a data table
library(data.table)
data_table <- data.table::data.table(
col1=sample(7), col2=sample(7), col3=sample(7))
# Print data_table
class(data_table); data_table
# column referenced without quotes
data_table[, col2]
# row referenced without a following comma
data_table[2]
# Print option "datatable.print.nrows"
getOption("datatable.print.nrows")
options(datatable.print.nrows=10)
getOption("datatable.print.nrows")
# Number of rows in data_table
NROW(data_table)
# Or
data_table[, NROW(col1)]
# Or
data_table[, .N]
# microbenchmark speed of data.table syntax
library(microbenchmark)
summary(microbenchmark(
dt=data_table[, .N],
pure_r=NROW(data_table),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Read a data table from CSV file
dir_name <- "C:/Develop/lecture_slides/data/"
file_name <- file.path(dir_name, "weather_delays14.csv")
data_table <- data.table::fread(file_name)
class(data_table); dim(data_table)
data_table
# fread() reads the same data as read.csv()
all.equal(read.csv(file_name, stringsAsFactors=FALSE),
setDF(data.table::fread(file_name)))
# fread() is much faster than read.csv()
summary(microbenchmark(
pure_r=read.csv(file_name),
fread=setDF(data.table::fread(file_name)),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Write data table to file in different ways
data.table::fwrite(data_table, file="data_table.csv")
write.csv(data_table, file="data_table2.csv")
cat(unlist(data_table), file="data_table3.csv")
# microbenchmark speed of data.table::fwrite()
library(microbenchmark)
summary(microbenchmark(
fwrite=data.table::fwrite(data_table, file="data_table.csv"),
write_csv=write.csv(data_table, file="data_table2.csv"),
cat=cat(unlist(data_table), file="data_table3.csv"),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Select first five rows of data_table
data_table[1:5]
# Select rows with JFK flights
jfk_flights <- data_table[origin=="JFK"]
# Select rows JFK flights in June
jfk_flights <- data_table[origin=="JFK" & month==6]
# Select rows without JFK flights
jfk_flights <- data_table[!(origin=="JFK")]
# Select flights with carrier_delay
data_table[carrier_delay > 0]
# Select column of data_table and return a vector
head(data_table[, origin])
# Select column of data_table and return a data_table, not vector
head(data_table[, list(origin)])
head(data_table[, .(origin)])
# Select two columns of data_table
data_table[, list(origin, month)]
data_table[, .(origin, month)]
column_s <- c("origin", "month")
data_table[, ..column_s]
data_table[, month, origin]
# Select two columns and rename them
data_table[, .(orig=origin, mon=month)]
# Select all columns except origin
head(data_table[, !c("origin")])
head(data_table[, -c("origin")])
# Select flights with positive carrier_delay
data_table[carrier_delay > 0]
# Number of flights with carrier_delay
data_table[, sum(carrier_delay > 0)]
# Or standard R commands
sum(data_table[, carrier_delay > 0])
# microbenchmark speed of data.table syntax
summary(microbenchmark(
dt=data_table[, sum(carrier_delay > 0)],
pure_r=sum(data_table[, carrier_delay > 0]),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Average carrier_delay
data_table[, mean(carrier_delay)]
# Average carrier_delay and aircraft_delay
data_table[, .(carrier=mean(carrier_delay),
aircraft=mean(aircraft_delay))]
# Average aircraft_delay from JFK
data_table[origin=="JFK", mean(aircraft_delay)]
# Number of flights from JFK
data_table[origin=="JFK", NROW(aircraft_delay)]
# Or
data_table[origin=="JFK", .N]
# Number of flights from each airport
data_table[, .N, by=origin]
# Same, but add names to output
data_table[, .(flights=.N), by=.(airport=origin)]
# Number of AA flights from each airport
data_table[carrier=="AA", .(flights=.N),
by=.(airport=origin)]
# Number of flights from each airport and airline
data_table[, .(flights=.N),
by=.(airport=origin, airline=carrier)]
# Average aircraft_delay
data_table[, mean(aircraft_delay)]
# Average aircraft_delay from JFK
data_table[origin=="JFK", mean(aircraft_delay)]
# Average aircraft_delay from each airport
data_table[, .(delay=mean(aircraft_delay)),
by=.(airport=origin)]
# Average and max delays from each airport and month
data_table[, .(mean_delay=mean(aircraft_delay), max_delay=max(aircraft_delay)),
by=.(airport=origin, month=month)]
# Average and max delays from each airport and month
data_table[, .(mean_delay=mean(aircraft_delay), max_delay=max(aircraft_delay)),
keyby=.(airport=origin, month=month)]
# Sort ascending by origin, then descending by dest
order_table <- data_table[order(origin, -dest)]
order_table
# Doesn't work outside data_table
order(origin, -dest)
# Sort data_table by reference
setorder(data_table, origin, -dest)
all.equal(data_table, order_table)
# setorder() is much faster than order()
summary(microbenchmark(
order=data_table[order(origin, -dest)],
setorder=setorder(data_table, origin, -dest),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Average aircraft_delay by month
order_table[, .(mean_delay=mean(aircraft_delay)),
by=.(month=month)]
# Chained brackets to sort output by month
order_table[, .(mean_delay=mean(aircraft_delay)),
by=.(month=month)][order(month)]
# Select weather_delay and aircraft_delay in two different ways
data_table[1:7, .SD,
.SDcols=c("weather_delay", "aircraft_delay")]
data_table[1:7, .(weather_delay, aircraft_delay)]
# Calculate mean of weather_delay and aircraft_delay
data_table[, sapply(.SD, mean),
.SDcols=c("weather_delay", "aircraft_delay")]
sapply(data_table[, .SD,
.SDcols=c("weather_delay", "aircraft_delay")], mean)
# Return origin and dest, then all other columns
data_table[1:7, .SD, by=.(origin, dest)]
# Return origin and dest, then weather_delay and aircraft_delay columns
data_table[1:7, .SD,
by=.(origin, dest),
.SDcols="weather_delay", "aircraft_delay"]
# Return first two rows from each month
data_table[, head(.SD, 2), by=.(month)]
data_table[, head(.SD, 2), by=.(month),
.SDcols=c("weather_delay", "aircraft_delay")]
# Calculate mean of weather_delay and aircraft_delay, grouped by origin
data_table[, lapply(.SD, mean),
by=.(origin),
.SDcols=c("weather_delay", "aircraft_delay")]
# Or simply
data_table[, .(weather_delay=mean(weather_delay),
aircraft_delay=mean(aircraft_delay)),
by=.(origin)]
# Add tot_delay column
data_table[, tot_delay := (carrier_delay + aircraft_delay)]
head(data_table, 4)
# Delete tot_delay column
data_table[, tot_delay := NULL]
# Add max_delay column grouped by origin and dest
data_table[, max_delay := max(aircraft_delay),
by=.(origin, dest)]
data_table[, max_delay := NULL]
# Add date and tot_delay columns
data_table[, c("date", "tot_delay") :=
list(paste(month, day, year, sep="/"),
(carrier_delay + aircraft_delay))]
# Modify select rows of tot_delay column
data_table[month == 12, tot_delay := carrier_delay]
data_table[, c("date", "tot_delay") := NULL]
# Add several columns
data_table[, c("max_carrier", "max_aircraft") :=
lapply(.SD, max),
by=.(origin, dest),
.SDcols=c("carrier_delay", "aircraft_delay")]
data_table[, c("max_carrier", "max_aircraft") := NULL]
# Modifying by reference is much faster than standard R
summary(microbenchmark(
dt=data_table[, tot_delay := (carrier_delay + aircraft_delay)],
pure_r=(data_table[, "tot_delay"] <- data_table[, "carrier_delay"] + data_table[, "aircraft_delay"]),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Add a key based on the "origin" column
setkey(data_table, origin)
haskey(data_table)
key(data_table)
# Select rows with LGA using the key
data_table["LGA"]
all.equal(data_table["LGA"],
data_table[origin == "LGA"])
# Select rows with LGA and JFK using the key
data_table[c("LGA", "JFK")]
# Add a key based on the "origin" and "dest" columns
setkey(data_table, origin, dest)
key(data_table)
# Select rows with origin from JFK and MIA
data_table[c("JFK", "MIA")]
# Select rows with origin from JFK and dest to MIA
data_table[.("JFK", "MIA")]
all.equal(data_table[.("JFK", "MIA")],
data_table[origin == "JFK" & dest == "MIA"])
# Selecting rows using a key is much faster than standard R
summary(microbenchmark(
with_key=data_table[.("JFK", "MIA")],
standard_r=data_table[origin == "JFK" & dest == "MIA"],
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Create data frame and coerce it to data table
data_table <- data.frame(
col1=sample(7), col2=sample(7), col3=sample(7))
class(data_table); data_table
data.table::setDT(data_table)
class(data_table); data_table
# Coerce data_table into data frame
data.table::setDF(data_table)
class(data_table); data_table
# Or
data_table <- data.table:::as.data.frame.data.table(data_table)
# SetDF() is much faster than as.data.frame()
summary(microbenchmark(
as.data.frame=data.table:::as.data.frame.data.table(data_table),
setDF=data.table::setDF(data_table),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Coerce xts to a data frame
price_s <- rutils::etf_env$VTI
class(price_s); head(price_s)
price_s <- as.data.frame(price_s)
class(price_s); head(price_s)
# Coerce data frame to a data table
data.table::setDT(price_s, keep.rownames=TRUE)
class(price_s); head(price_s)
# Dates are coerced to strings
sapply(price_s, class)
# Coerce xts directly to a data table
data_table <- as.data.table(rutils::etf_env$VTI,
keep.rownames=TRUE)
class(data_table); head(data_table)
# Dates are not coerced to strings
sapply(data_table, class)
all.equal(price_s, data_table, check.attributes=FALSE)
# Install package fst
install.packages("fst")
# Load package fst
library(fst)
# get documentation for package fst
# get short description
packageDescription("fst")
# Load help page
help(package="fst")
# List all datasets in "fst"
data(package="fst")
# List all objects in "fst"
ls("package:fst")
# Remove fst from search path
detach("package:fst")
# Read a data frame from CSV file
dir_name <- "C:/Develop/lecture_slides/data/"
file_name <- file.path(dir_name, "weather_delays14.csv")
data.table::setDF(data_frame)
class(data_frame); dim(data_frame)
# Write data frame to .fst file in different ways
fst::write_fst(data_frame, path="data_frame.fst")
write.csv(data_frame, file="data_frame2.csv")
# microbenchmark speed of fst::write_fst()
library(microbenchmark)
summary(microbenchmark(
fst=fst::write_fst(data_frame, path="data_frame.csv"),
write_csv=write.csv(data_frame, file="data_frame2.csv"),
cat=cat(unlist(data_frame), file="data_frame3.csv"),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# fst::read_fst() reads the same data as read.csv()
all.equal(read.csv(file_name, stringsAsFactors=FALSE),
fst::read_fst("data_frame.fst"))
# fst::read_fst() is 10 times faster than read.csv()
summary(microbenchmark(
fst=fst::read_fst("data_frame.fst"),
read_csv=read.csv(file_name),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Coerce TAQ xts to a data frame
library(HighFreq)
t_aq <- HighFreq::SPY_TAQ
t_aq <- as.data.frame(t_aq)
class(t_aq)
# Coerce data frame to a data table
data.table::setDT(t_aq, keep.rownames=TRUE)
class(t_aq); head(t_aq)
# Get memory size of data table
format(object.size(t_aq), units="MB")
# Save data table to .fst file
fst::write_fst(t_aq, path="C:/Develop/data/taq.fst")
# Create reference to .fst file similar to a data frame
fs_t <- fst::fst("C:/Develop/data/taq.fst")
class(fs_t)
# Memory size of reference to .fst is very small
format(object.size(fs_t), units="MB")
# Get sizes of all objects in workspace
sort(sapply(mget(ls()), object.size))
# reference to .fst can be treated similar to a data table
dim(t_aq); dim(fs_t)
fst:::print.fst_table(fs_t)
# Subset reference to .fst just like a data table
fs_t[1e4:(1e4+5), ]
library(rutils) # Load package rutils
etf_env <- new.env() # new environment for data
# Download data for sym_bols into etf_env from Alpha Vantage
getSymbols.av(sym_bols, adjust=TRUE, env=etf_env,
output.size="full", api.key="T7JPW54ES8G75310")
# getSymbols(sym_bols, env=etf_env, adjust=TRUE, from="2005-01-03")
library(rutils) # Load package rutils
ls(etf_env) # List files in etf_env
# get class of object in etf_env
class(get(x=sym_bols[1], envir=etf_env))
# Another way
class(etf_env$VTI)
colnames(etf_env$VTI)
head(etf_env$VTI, 3)
# get class of all objects in etf_env
eapply(etf_env, class)
# get class of all objects in R workspace
lapply(ls(), function(ob_ject) class(get(ob_ject)))
library(rutils) # Load package rutils
# Check of object is an OHLC time series
is.OHLC(etf_env$VTI)
# Adjust single OHLC object using its name
etf_env$VTI <- adjustOHLC(etf_env$VTI,
use.Adjusted=TRUE)
# Adjust OHLC object using string as name
assign(sym_bols[1], adjustOHLC(
get(x=sym_bols[1], envir=etf_env),
use.Adjusted=TRUE),
envir=etf_env)
# Adjust objects in environment using vector of strings
for (sym_bol in ls(etf_env)) {
assign(sym_bol,
adjustOHLC(get(sym_bol, envir=etf_env),
use.Adjusted=TRUE),
envir=etf_env)
} # end for
library(rutils) # Load package rutils
# Extract and cbind all data, subset by symbols
price_s <- rutils::do_call(cbind,
as.list(etf_env)[sym_bols])
# Or
# price_s <- do.call(cbind,
# as.list(etf_env)[sym_bols])
# Extract and cbind adjusted prices, subset by symbols
price_s <- rutils::do_call(cbind,
lapply(as.list(etf_env)[sym_bols], Ad))
# Same, but works only for OHLC series
price_s <- rutils::do_call(cbind,
eapply(etf_env, Ad)[sym_bols])
# Drop ".Adjusted" from colnames
colnames(price_s) <-
sapply(colnames(price_s),
function(col_name)
strsplit(col_name, split="[.]")[[1]],
USE.NAMES=FALSE)[1, ]
head(price_s[, 1:2], 3)
# Which objects in global environment are class xts?
unlist(eapply(globalenv(), is.xts))
# Save xts to csv file
write.zoo(price_s,
file="etf_series.csv", sep=",")
# Copy price_s into etf_env and save to .RData file
assign("price_s", price_s, envir=etf_env)
save(etf_env, file="etf_data.RData")
# Extract VTI prices
vt_i <- etf_env$price_s[ ,"VTI"]
vt_i <- na.omit(vt_i)
# Calculate percentage returns "by hand"
vti_lag <- as.numeric(vt_i)
vti_lag <- c(vti_lag[1], vti_lag[-NROW(vti_lag)])
vti_lag <- xts(vti_lag, index(vt_i))
vti_returns <- (vt_i-vti_lag)/vti_lag
# Calculate percentage returns using dailyReturn()
daily_returns <- quantmod::dailyReturn(vt_i)
head(cbind(daily_returns, vti_returns))
all.equal(daily_returns, vti_returns, check.attributes=FALSE)
# Calculate returns for all prices in etf_env$price_s
re_turns <- lapply(etf_env$price_s, function(x_ts) {
daily_returns <- quantmod::dailyReturn(na.omit(x_ts))
colnames(daily_returns) <- names(x_ts)
daily_returns
}) # end lapply
# "re_turns" is a list of xts
class(re_turns)
class(re_turns[[1]])
# Flatten list of xts into a single xts
re_turns <- rutils::do_call(cbind, re_turns)
class(re_turns)
dim(re_turns)
# Copy re_turns into etf_env and save to .RData file
assign("re_turns", re_turns, envir=etf_env)
save(etf_env, file="etf_data.RData")
library(quantmod)
start_date <- "2012-05-10"; end_date <- "2013-11-20"
# Select all objects in environment and return as environment
new_env <- as.environment(eapply(etf_env, "[",
paste(start_date, end_date, sep="/")))
# Select only sym_bols in environment and return as environment
new_env <- as.environment(
lapply(as.list(etf_env)[sym_bols], "[",
paste(start_date, end_date, sep="/")))
# Extract and cbind adjusted prices and return to environment
assign("price_s", rutils::do_call(cbind,
lapply(ls(etf_env), function(sym_bol) {
x_ts <- Ad(get(sym_bol, etf_env))
colnames(x_ts) <- sym_bol
x_ts
})), envir=new_env)
# get sizes of OHLC xts series in etf_env
sapply(mget(sym_bols, envir=etf_env), object.size)
# Extract and cbind adjusted prices and return to environment
col_name <- function(x_ts)
strsplit(colnames(x_ts), split="[.]")[[1]][1]
assign("price_s", rutils::do_call(cbind,
lapply(mget(etf_env$sym_bols, envir=etf_env),
function(x_ts) {
x_ts <- Ad(x_ts)
colnames(x_ts) <- col_name(x_ts)
x_ts
})), envir=new_env)
# Load data frame of S&P500 constituents from CSV file
sp_500 <- read.csv(file="C:/Develop/lecture_slides/data/sp500_WRDS_08-30-17.csv", stringsAsFactors=FALSE)
# Inspect data frame of S&P500 constituents
dim(sp_500)
colnames(sp_500)
# Extract tickers from the column co_tic
sym_bols <- sp_500$co_tic
# Get duplicate tickers
ta_ble <- table(sym_bols)
dupli_cate <- ta_ble[ta_ble>1]
dupli_cate <- names(dupli_cate)
# Get duplicate records (rows) of sp_500
sp_500[sym_bols %in% dupli_cate, ]
# Get unique tickers
sym_bols <- unique(sym_bols)
# Find index of ticker "BRK.B"
which(sym_bols=="BRK.B")
# Remove "BRK.B" and later download it separately
sym_bols <- sym_bols[-which(sym_bols=="BRK.B")]
# Load package rutils
library(rutils)
# Create new environment for data
env_sp500 <- new.env()
# Boolean vector of symbols already downloaded
down_loaded <- sym_bols %in% ls(env_sp500)
# Download in while loop from Tiingo and copy into environment
at_tempt <- 0 # number of download attempts
while (((sum(!down_loaded)) > 0) & (at_tempt<5)) {
# Download data and copy it into environment
at_tempt <- at_tempt + 1
cat("Download attempt = ", at_tempt, "\n")
for (sym_bol in sym_bols[!down_loaded]) {
cat("processing: ", sym_bol, "\n")
tryCatch( # With error handler
getSymbols(sym_bol, src="tiingo", adjust=TRUE, auto.assign=TRUE,
from="1990-01-01", env=env_sp500, api.key="d84fc2a9c5bde2d68e33034f65a838092c6b9f10"),
# Error handler captures error condition
error=function(error_cond) {
print(paste("error handler: ", error_cond))
}, # end error handler
finally=print(paste("sym_bol=", sym_bol))
) # end tryCatch
} # end for
# Update vector of symbols already downloaded
down_loaded <- sym_bols %in% ls(env_sp500)
Sys.sleep(10) # Wait 10 seconds until next attempt
} # end while
class(env_sp500$AAPL)
class(index(env_sp500$AAPL))
library(quantmod)
# Rename "LOW" colnames to "LO_WES"
colnames(env_sp500$LOW) <- paste("LO_WES",
sapply(strsplit(colnames(env_sp500$LOW), split="[.]"),
function(col_name) col_name[2]), sep=".")
env_sp500$LO_WES <- env_sp500$LOW[, unique(colnames(env_sp500$LOW))]
rm(LOW, envir=env_sp500)
chart_Series(x=env_sp500$LO_WES["2017-06/"],
TA="add_Vo()", name="LOWES stock")
# Download "BRK.B" separately with auto.assign=FALSE
BRK_B <- getSymbols("BRK-B", auto.assign=FALSE, src="tiingo", adjust=TRUE, from="1990-01-01", api.key="j84ac2b9c5bde2d68e33034f65d838092c6c9f10")
colnames(BRK_B) <- paste("BRK_B",
sapply(strsplit(colnames(BRK_B), split="[.]"),
function(col_name) col_name[2]), sep=".")
env_sp500$BRK_B <- BRK_B
# Rename "BF-B" colnames to "BF_B"
colnames(env_sp500$"BF-B") <- paste("BF_B",
sapply(strsplit(colnames(env_sp500$"BF-B"), split="[.]"),
function(col_name) col_name[2]), sep=".")
names(colnames(env_sp500$"BF-B")) <- NULL
env_sp500$BF_B <- env_sp500$"BF-B"
rm("BF-B", envir=env_sp500)
class(env_sp500$AAPL)
# The date-time index is class POSIXct not Date
class(index(env_sp500$AAPL))
# Coerce time indices from class POSIXct to class Date
for (sym_bol in ls(env_sp500)) {
x_ts <- get(sym_bol, envir=env_sp500)
index(x_ts) <- as.Date(index(x_ts))
assign(sym_bol, x_ts, envir=env_sp500)
} # end for
class(index(env_sp500$AAPL))
# Save the environment to compressed .RData file
dir_name <- "C:/Develop/lecture_slides/data/"
save(env_sp500, file=paste0(dir_name, "sp500.RData"))
# Save the ETF prices into CSV files
dir_name <- "C:/Develop/lecture_slides/data/SP500/"
for (sym_bol in ls(env_sp500)) {
zoo::write.zoo(env_sp500$sym_bol, file=paste0(dir_name, sym_bol, ".csv"))
} # end for
# Or using lapply()
file_names <- lapply(ls(env_sp500), function(sym_bol) {
x_ts <- get(sym_bol, envir=env_sp500)
zoo::write.zoo(x_ts, file=paste0(dir_name, sym_bol, ".csv"))
sym_bol
}) # end lapply
unlist(file_names)
# Or using eapply() and data.table::fwrite()
file_names <- eapply(env_sp500 , function(x_ts) {
file_name <- rutils::get_name(colnames(x_ts)[1])
data.table::fwrite(data.table::as.data.table(x_ts), file=paste0(dir_name, file_name, ".csv"))
file_name
}) # end eapply
unlist(file_names)
# Load the environment from compressed .RData file
dir_name <- "C:/Develop/lecture_slides/data/"
load(file=paste0(dir_name, "sp500.RData"))
# Get all the .csv file names in the directory
dir_name <- "C:/Develop/lecture_slides/data/SP500/"
file_names <- Sys.glob(paste0(dir_name, "*.csv"))
# Create new environment for data
env_sp500 <- new.env()
for (file_name in file_names) {
x_ts <- xts::as.xts(zoo::read.csv.zoo(file_name))
sym_bol <- strsplit(colnames(x_ts), split="[.]")[[1]][1]
assign(sym_bol, x_ts, envir=env_sp500)
} # end for
# Or using fread()
for (file_name in file_names) {
x_ts <- data.table::fread(file_name)
data.table::setDF(x_ts)
x_ts <- xts::xts(x_ts[, -1], as.Date(x_ts[, 1]))
sym_bol <- strsplit(colnames(x_ts), split="[.]")[[1]][1]
assign(sym_bol, x_ts, envir=env_sp500)
} # end for
# Remove all files from environment(if necessary)
rm(list=ls(env_sp500), envir=env_sp500)
# Download in while loop from Alpha Vantage and copy into environment
down_loaded <- sym_bols %in% ls(env_sp500)
at_tempt <- 0
while (((sum(!down_loaded)) > 0) & (at_tempt<10)) {
# Download data and copy it into environment
at_tempt <- at_tempt + 1
for (sym_bol in sym_bols[!down_loaded]) {
cat("processing: ", sym_bol, "\n")
tryCatch( # With error handler
getSymbols(sym_bol, src="av", adjust=TRUE, auto.assign=TRUE, env=env_sp500,
output.size="full", api.key="T7JPW54ES8G75310"),
# error handler captures error condition
error=function(error_cond) {
print(paste("error handler: ", error_cond))
}, # end error handler
finally=print(paste("sym_bol=", sym_bol))
) # end tryCatch
} # end for
# Update vector of symbols already downloaded
down_loaded <- sym_bols %in% ls(env_sp500)
Sys.sleep(10) # Wait 10 seconds until next attempt
} # end while
# Adjust all OHLC prices in environment
for (sym_bol in ls(env_sp500)) {
assign(sym_bol,
adjustOHLC(get(x=sym_bol, envir=env_sp500), use.Adjusted=TRUE),
envir=env_sp500)
} # end for
library(rutils) # Load package rutils
# Assign name SP500 to ^GSPC symbol
setSymbolLookup(
SP500=list(name="^GSPC", src="yahoo"))
getSymbolLookup()
# view and clear options
options("getSymbols.sources")
options(getSymbols.sources=NULL)
# Download S&P500 prices into etf_env
getSymbols("SP500", env=etf_env,
adjust=TRUE, auto.assign=TRUE, from="1990-01-01")
chart_Series(x=etf_env$SP500["2016/"],
TA="add_Vo()",
name="S&P500 index")
library(rutils) # Load package rutils
# Assign name DJIA to ^DJI symbol
setSymbolLookup(
DJIA=list(name="^DJI", src="yahoo"))
getSymbolLookup()
# view and clear options
options("getSymbols.sources")
options(getSymbols.sources=NULL)
# Download DJIA prices into etf_env
getSymbols("DJIA", env=etf_env,
adjust=TRUE, auto.assign=TRUE, from="1990-01-01")
chart_Series(x=etf_env$DJIA["2016/"],
TA="add_Vo()",
name="DJIA index")
library(rutils) # Load package rutils
library(RCurl) # Load package RCurl
library(XML) # Load package XML
# Download text data from URL
sp_500 <- getURL(
"https://en.wikipedia.org/wiki/List_of_S%26P_500_companies")
# Extract tables from the text data
sp_500 <- readHTMLTable(sp_500,
stringsAsFactors=FALSE)
str(sp_500)
# Extract colnames of data frames
lapply(sp_500, colnames)
# Extract S&P500 constituents
sp_500 <- sp_500[[1]]
head(sp_500)
# Create valid R names from symbols containing "-" or "."characters
sp_500$names <- gsub("-", "_", sp_500$Ticker)
sp_500$names <- gsub("[.]", "_", sp_500$names)
# Write data frame of S&P500 constituents to CSV file
write.csv(sp_500,
file="C:/Develop/lecture_slides/data/sp500_Yahoo.csv",
row.names=FALSE)
library(rutils) # Load package rutils
# Load data frame of S&P500 constituents from CSV file
sp_500 <- read.csv(file="C:/Develop/lecture_slides/data/sp500_Yahoo.csv",
stringsAsFactors=FALSE)
# Register symbols corresponding to R names
for (in_dex in 1:NROW(sp_500)) {
cat("processing: ", sp_500$Ticker[in_dex], "\n")
setSymbolLookup(structure(
list(list(name=sp_500$Ticker[in_dex])),
names=sp_500$names[in_dex]))
} # end for
env_sp500 <- new.env() # new environment for data
# Remove all files (if necessary)
rm(list=ls(env_sp500), envir=env_sp500)
# Download data and copy it into environment
rutils::get_symbols(sp_500$names,
env_out=env_sp500, start_date="1990-01-01")
# Or download in loop
for (sym_bol in sp_500$names) {
cat("processing: ", sym_bol, "\n")
rutils::get_symbols(sym_bol,
env_out=env_sp500, start_date="1990-01-01")
} # end for
save(env_sp500, file="C:/Develop/lecture_slides/data/sp500.RData")
chart_Series(x=env_sp500$BRK_B["2016/"], TA="add_Vo()",
name="BRK-B stock")
library(quantmod)
# Download U.S. unemployment rate data
unemp_rate <- getSymbols("UNRATE",
auto.assign=FALSE,
src="FRED")
# Plot U.S. unemployment rate data
chart_Series(unemp_rate["1990/"],
name="U.S. unemployment rate")
library(rutils) # Load package rutils
install.packages("devtools")
library(devtools)
# Install package Quandl from github
install_github("quandl/R-package")
library(Quandl) # Load package Quandl
# Register Quandl API key
Quandl.api_key("pVJi9Nv3V8CD3Js5s7Qx")
# get short description
packageDescription("Quandl")
# Load help page
help(package="Quandl")
# Remove Quandl from search path
detach("package:Quandl")
library(rutils) # Load package rutils
# Download EOD AAPL prices from WIKI free database
price_s <- Quandl(code="WIKI/AAPL",
type="xts", start_date="1990-01-01")
x11(width=14, height=7)
chart_Series(price_s["2016", 1:4],
name="AAPL OHLC prices")
# Add trade volume in extra panel
add_TA(price_s["2016", 5])
# Download euro currency rates
price_s <- Quandl(code="BNP/USDEUR",
start_date="2013-01-01",
end_date="2013-12-01", type="xts")
# Download multiple time series
price_s <- Quandl(code=c("NSE/OIL", "WIKI/AAPL"),
start_date="2013-01-01", type="xts")
# Download AAPL gross profits
prof_it <- Quandl("RAYMOND/AAPL_GROSS_PROFIT_Q",
type="xts")
chart_Series(prof_it, name="AAPL gross profits")
# Download Hurst time series
price_s <- Quandl(code="PE/AAPL_HURST",
start_date="2013-01-01", type="xts")
chart_Series(price_s["2016/", 1],
name="AAPL Hurst")
library(rutils) # Load package rutils
# Load S&P500 stock Quandl codes
sp_500 <- read.csv(
file="C:/Develop/lecture_slides/data/sp500_quandl.csv",
stringsAsFactors=FALSE)
# Replace "-" with "_" in symbols
sp_500$free_code <-
gsub("-", "_", sp_500$free_code)
head(sp_500)
# vector of symbols in sp_500 frame
tick_ers <- gsub("-", "_", sp_500$ticker)
# Or
tick_ers <- matrix(unlist(
strsplit(sp_500$free_code, split="/"),
use.names=FALSE), ncol=2, byrow=TRUE)[, 2]
# Or
tick_ers <- do_call_rbind(
strsplit(sp_500$free_code, split="/"))[, 2]
library(rutils) # Load package rutils
env_sp500 <- new.env() # new environment for data
# Remove all files (if necessary)
rm(list=ls(env_sp500), envir=env_sp500)
# Boolean vector of symbols already downloaded
down_loaded <- tick_ers %in% ls(env_sp500)
# Download data and copy it into environment
for (tick_er in tick_ers[!down_loaded]) {
cat("processing: ", tick_er, "\n")
da_ta <- Quandl(code=paste0("WIKI/", tick_er),
start_date="1990-01-01",
type="xts")[, -(1:7)]
colnames(da_ta) <- paste(tick_er,
c("Open", "High", "Low", "Close", "Volume"), sep=".")
assign(tick_er, da_ta, envir=env_sp500)
} # end for
save(env_sp500, file="C:/Develop/lecture_slides/data/sp500.RData")
chart_Series(x=env_sp500$XOM["2016/"], TA="add_Vo()",
name="XOM stock")
library(rutils)
library(Quandl)
# Register Quandl API key
Quandl.api_key("pVJi9Nv3V8CD3Js5s7Qx")
# Download E-mini S&P500 futures prices
price_s <- Quandl(code="CHRIS/CME_ES1",
type="xts", start_date="1990-01-01")
price_s <- price_s[, c("Open", "High", "Low", "Last", "Volume")]
colnames(price_s)[4] <- "Close"
# Plot the prices
x11(width=5, height=4) # Open x11 for plotting
chart_Series(x=price_s["2008-06/2009-06"],
TA="add_Vo()",
name="S&P500 Futures")
# Plot dygraph
dygraphs::dygraph(price_s["2008-06/2009-06", -5],
main="S&P500 Futures") %>%
dyCandlestick()
# Read CBOE futures expiration dates
date_s <- read.csv(file="C:/Develop/lecture_slides/data/futures_expiration_dates_codes.csv",
stringsAsFactors=FALSE, row.names=1)
dir_name <- "C:/Develop/data/vix_data"
dir.create(dir_name)
sym_bols <- rownames(date_s)
file_names <- file.path(dir_name, paste0(sym_bols, ".csv"))
log_file <- file.path(dir_name, "log_file.txt")
cboe_url <- "https://markets.cboe.com/us/futures/market_statistics/historical_data/products/csv/VX/"
url_s <- paste0(cboe_url, date_s[, 1])
# Download files in loop
for (it in seq_along(url_s)) {
tryCatch( # Warning and error handler
download.file(url_s[it],
destfile=file_names[it], quiet=TRUE),
# Warning handler captures warning condition
warning=function(warning_cond) {
cat(paste("warning handler: ", warning_cond, "\n"), file=log_file, append=TRUE)
}, # end warning handler
# Error handler captures error condition
error=function(error_cond) {
cat(paste("error handler: ", error_cond, "\n"), append=TRUE)
}, # end error handler
finally=cat(paste("Processing file name =", file_names[it], "\n"), append=TRUE)
) # end tryCatch
} # end for
# Create new environment for data
vix_env <- new.env()
# Download VIX data for the months 6, 7, and 8 in 2018
library(qmao)
quantmod::getSymbols("VX", Months=1:12,
Years=2018, src="cfe", auto.assign=TRUE, env=vix_env)
# Or
qmao::getSymbols.cfe(Symbols="VX",
Months=6:8, Years=2018, env=vix_env,
verbose=FALSE, auto.assign=TRUE)
# Calculate the classes of all the objects
# In the environment vix_env
unlist(eapply(vix_env,
function(x) {class(x)[1]}))
class(vix_env$VX_M18)
colnames(vix_env$VX_M18)
# Save the data to a binary file called "vix_cboe.RData".
save(vix_env,
file="C:/Develop/data/vix_data/vix_cboe.RData")
| /FRE7241_Lecture_7.R | no_license | Williamqn/lecture_slides | R | false | false | 64,742 | r | # Define Ornstein-Uhlenbeck parameters
eq_price <- 1.0; sigma_r <- 0.02
the_ta <- 0.01; len_gth <- 1000
drif_t <- the_ta*eq_price
theta_1 <- 1-the_ta
# Simulate Ornstein-Uhlenbeck process
in_nov <- sigma_r*rnorm(len_gth)
price_s <- numeric(len_gth)
price_s[1] <- in_nov[1]
for (i in 2:len_gth) {
price_s[i] <- theta_1*price_s[i-1] +
in_nov[i] + drif_t
} # end for
plot(price_s, type="l",
xlab="periods", ylab="prices",
main="Ornstein-Uhlenbeck process")
legend("topright",
title=paste(c(paste0("sigma_r = ", sigma_r),
paste0("eq_price = ", eq_price),
paste0("the_ta = ", the_ta)),
collapse="\n"),
legend="", cex=0.8,
inset=0.1, bg="white", bty="n")
abline(h=eq_price, col='red', lwd=2)
re_turns <- rutils::diff_it(price_s)
lag_price <- rutils::lag_it(price_s)
for_mula <- re_turns ~ lag_price
l_m <- lm(for_mula)
summary(l_m)
# Plot regression
plot(for_mula, main="OU Returns Versus Lagged Prices")
abline(l_m, lwd=2, col="red")
# volatility parameter
c(sigma_r, sd(re_turns))
# Extract OU parameters from regression
co_eff <- summary(l_m)$coefficients
# theta strength of mean reversion
round(co_eff[2, ], 3)
# Equilibrium price
co_eff[1, 1]/co_eff[2, 1]
# Parameter and t-values
co_eff <- cbind(c(the_ta*eq_price, the_ta),
co_eff[, 1:2])
rownames(co_eff) <- c("drift", "theta")
round(co_eff, 3)
# Simulate Schwartz process
re_turns <- numeric(len_gth)
price_s <- numeric(len_gth)
price_s[1] <- eq_price
set.seed(1121) # Reset random numbers
for (i in 2:len_gth) {
re_turns[i] <- the_ta*(eq_price - price_s[i-1]) +
sigma_r*rnorm(1)
price_s[i] <- price_s[i-1] * exp(re_turns[i])
} # end for
plot(price_s, type="l",
xlab="periods", ylab="prices",
main="Log-normal Ornstein-Uhlenbeck process")
legend("topright",
title=paste(c(paste0("sigma_r = ", sigma_r),
paste0("eq_price = ", eq_price),
paste0("the_ta = ", the_ta)),
collapse="\n"),
legend="", cex=0.8,
inset=0.12, bg="white", bty="n")
abline(h=eq_price, col='red', lwd=2)
# Verify that rtools are working properly:
devtools::find_rtools()
devtools::has_devel()
# Load package Rcpp
library(Rcpp)
# Get documentation for package Rcpp
# Get short description
packageDescription("Rcpp")
# Load help page
help(package="Rcpp")
# list all datasets in "Rcpp"
data(package="Rcpp")
# list all objects in "Rcpp"
ls("package:Rcpp")
# Remove Rcpp from search path
detach("package:Rcpp")
# Define Rcpp function
Rcpp::cppFunction("
int times_two(int x)
{ return 2 * x;}
") # end cppFunction
# Run Rcpp function
times_two(3)
# Source Rcpp functions from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/mult_rcpp.cpp")
# Multiply two numbers
mult_rcpp(2, 3)
mult_rcpp(1:3, 6:4)
# Multiply two vectors
mult_vec_rcpp(2, 3)
mult_vec_rcpp(1:3, 6:4)
# Define Rcpp function with loop
Rcpp::cppFunction("
double inner_mult(NumericVector x, NumericVector y) {
int x_size = x.size();
int y_size = y.size();
if (x_size != y_size) {
return 0;
} else {
double total = 0;
for(int i = 0; i < x_size; ++i) {
total += x[i] * y[i];
}
return total;
}
}") # end cppFunction
# Run Rcpp function
inner_mult(1:3, 6:4)
inner_mult(1:3, 6:3)
# Define Rcpp Sugar function with loop
Rcpp::cppFunction("
double inner_mult_sugar(NumericVector x, NumericVector y) {
return sum(x * y);
}") # end cppFunction
# Run Rcpp Sugar function
inner_mult_sugar(1:3, 6:4)
inner_mult_sugar(1:3, 6:3)
# Define R function with loop
inner_mult_r <- function(x, y) {
to_tal <- 0
for(i in 1:NROW(x)) {
to_tal <- to_tal + x[i] * y[i]
}
to_tal
} # end inner_mult_r
# Run R function
inner_mult_r(1:3, 6:4)
inner_mult_r(1:3, 6:3)
# Compare speed of Rcpp and R
library(microbenchmark)
summary(microbenchmark(
pure_r=inner_mult_r(1:10000, 1:10000),
inner_r=1:10000 %*% 1:10000,
r_cpp=inner_mult(1:10000, 1:10000),
r_cpp_sugar=inner_mult_sugar(1:10000, 1:10000),
times=10))[, c(1, 4, 5)]
# Define Ornstein-Uhlenbeck function in R
sim_ou <- function(len_gth=1000, eq_price=5.0,
vol_at=0.01, the_ta=0.01) {
re_turns <- numeric(len_gth)
price_s <- numeric(len_gth)
price_s[1] <- eq_price
for (i in 2:len_gth) {
re_turns[i] <- the_ta*(eq_price - price_s[i-1]) + vol_at*rnorm(1)
price_s[i] <- price_s[i-1] * exp(re_turns[i])
} # end for
price_s
} # end sim_ou
# Simulate Ornstein-Uhlenbeck process in R
eq_price <- 5.0; sigma_r <- 0.01
the_ta <- 0.01; len_gth <- 1000
set.seed(1121) # Reset random numbers
ou_sim <- sim_ou(len_gth=len_gth, eq_price=eq_price, vol_at=sigma_r, the_ta=the_ta)
# Define Ornstein-Uhlenbeck function in Rcpp
Rcpp::cppFunction("
NumericVector sim_ou_rcpp(double eq_price,
double vol_at,
double the_ta,
NumericVector in_nov) {
int len_gth = in_nov.size();
NumericVector price_s(len_gth);
NumericVector re_turns(len_gth);
price_s[0] = eq_price;
for (int it = 1; it < len_gth; it++) {
re_turns[it] = the_ta*(eq_price - price_s[it-1]) + vol_at*in_nov[it-1];
price_s[it] = price_s[it-1] * exp(re_turns[it]);
} // end for
return price_s;
}") # end cppFunction
# Simulate Ornstein-Uhlenbeck process in Rcpp
set.seed(1121) # Reset random numbers
ou_sim_rcpp <- sim_ou_rcpp(eq_price=eq_price,
vol_at=sigma_r,
the_ta=the_ta,
in_nov=rnorm(len_gth))
all.equal(ou_sim, ou_sim_rcpp)
# Compare speed of Rcpp and R
library(microbenchmark)
summary(microbenchmark(
pure_r=sim_ou(len_gth=len_gth, eq_price=eq_price, vol_at=sigma_r, the_ta=the_ta),
r_cpp=sim_ou_rcpp(eq_price=eq_price, vol_at=sigma_r, the_ta=the_ta, in_nov=rnorm(len_gth)),
times=10))[, c(1, 4, 5)]
# Source Rcpp function for Ornstein-Uhlenbeck process from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/sim_ou.cpp")
# Simulate Ornstein-Uhlenbeck process in Rcpp
set.seed(1121) # Reset random numbers
ou_sim_rcpp <- sim_ou_rcpp(eq_price=eq_price,
vol_at=sigma_r,
the_ta=the_ta,
in_nov=rnorm(len_gth))
all.equal(ou_sim, ou_sim_rcpp)
# Compare speed of Rcpp and R
library(microbenchmark)
summary(microbenchmark(
pure_r=sim_ou(len_gth=len_gth, eq_price=eq_price, vol_at=sigma_r, the_ta=the_ta),
r_cpp=sim_ou_rcpp(eq_price=eq_price, vol_at=sigma_r, the_ta=the_ta, in_nov=rnorm(len_gth)),
times=10))[, c(1, 4, 5)]
# Calculate uniformly distributed pseudo-random sequence
uni_form <- function(see_d, len_gth=10) {
out_put <- numeric(len_gth)
out_put[1] <- see_d
for (i in 2:len_gth) {
out_put[i] <- 4*out_put[i-1]*(1-out_put[i-1])
} # end for
acos(1-2*out_put)/pi
} # end uni_form
# Source Rcpp functions from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/uni_form.cpp")
# Microbenchmark Rcpp code
library(microbenchmark)
summary(microbenchmark(
pure_r=runif(1e5),
r_loop=uni_form(0.3, 1e5),
r_cpp=uniform_rcpp(0.3, 1e5),
times=10))[, c(1, 4, 5)]
library(RcppArmadillo)
# Source Rcpp functions from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/armadillo_functions.cpp")
vec1 <- runif(1e5)
vec2 <- runif(1e5)
vec_in(vec1, vec2)
vec1 %*% vec2
# Microbenchmark RcppArmadillo code
summary(microbenchmark(
vec_in=vec_in(vec1, vec2),
r_code=(vec1 %*% vec2),
times=100))[, c(1, 4, 5)] # end microbenchmark summary
# Microbenchmark shows:
# vec_in() is several times faster than %*%, especially for longer vectors.
# expr mean median
# 1 vec_in 110.7067 110.4530
# 2 r_code 585.5127 591.3575
# Source Rcpp functions from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/sim_arima.cpp")
# Define AR(2) coefficients
co_eff <- c(0.9, 0.09)
len_gth <- 1e4
set.seed(1121)
in_nov <- rnorm(len_gth)
# Simulate ARIMA using filter()
arima_filter <- filter(x=in_nov,
filter=co_eff, method="recursive")
# Simulate ARIMA using sim_arima()
ari_ma <- sim_arima(in_nov, rev(co_eff))
all.equal(drop(ari_ma),
as.numeric(arima_filter))
# Microbenchmark RcppArmadillo code
summary(microbenchmark(
filter=filter(x=in_nov, filter=co_eff, method="recursive"),
sim_arima=sim_arima(in_nov, rev(co_eff)),
times=100))[, c(1, 4, 5)] # end microbenchmark summary
library(RcppArmadillo)
# Source Rcpp functions from file
Rcpp::sourceCpp(file="C:/Develop/lecture_slides/scripts/armadillo_functions.cpp")
mat_rix <- matrix(runif(1e5), nc=1e3)
# De-mean using apply()
new_mat <- apply(mat_rix, 2,
function(x) (x-mean(x)))
# De-mean using demean_mat()
demean_mat(mat_rix)
all.equal(new_mat, mat_rix)
# Microbenchmark RcppArmadillo code
summary(microbenchmark(
demean_mat=demean_mat(mat_rix),
apply=(apply(mat_rix, 2, mean)),
times=100))[, c(1, 4, 5)] # end microbenchmark summary
# Microbenchmark shows:
# Demean_mat() is over 70 times faster than apply()
# expr mean median
# 1 demean_mat 127.7539 125.604
# 2 apply 10781.7534 9291.674
# Perform matrix inversion
# Create random positive semi-definite matrix
mat_rix <- matrix(runif(25), nc=5)
mat_rix <- t(mat_rix) %*% mat_rix
# Invert the matrix
matrix_inv <- solve(mat_rix)
inv_mat(mat_rix)
all.equal(inv_mat, mat_rix)
# Microbenchmark RcppArmadillo code
library(microbenchmark)
summary(microbenchmark(
inv_mat=inv_mat(mat_rix),
solve=solve(mat_rix),
times=100))[, c(1, 4, 5)] # end microbenchmark summary
# Microbenchmark shows:
# inv_mat() is over 10 times faster than solve()
# expr mean median
# 1 inv_mat 3.42669 2.933
# 2 solve 32.00254 31.280
library(RcppArmadillo)
# Source Rcpp functions from file
Rcpp::sourceCpp("C:/Develop/lecture_slides/scripts/calc_weights.cpp")
# Create random matrix of returns
mat_rix <- matrix(rnorm(300), nc=5)
# Regularized inverse of covariance matrix
max_eigen <- 4
ei_gen <- eigen(cov(mat_rix))
cov_inv <- ei_gen$vectors[, 1:max_eigen] %*%
(t(ei_gen$vectors[, 1:max_eigen]) / ei_gen$values[1:max_eigen])
# Regularized inverse using RcppArmadillo
cov_inv_arma <- calc_inv(mat_rix, max_eigen)
all.equal(cov_inv, cov_inv_arma)
# Microbenchmark RcppArmadillo code
library(microbenchmark)
summary(microbenchmark(
pure_r={
ei_gen <- eigen(cov(mat_rix))
ei_gen$vectors[, 1:max_eigen] %*%
(t(ei_gen$vectors[, 1:max_eigen]) / ei_gen$values[1:max_eigen])
},
r_cpp=calc_inv(mat_rix, max_eigen),
times=100))[, c(1, 4, 5)] # end microbenchmark summary
# futures contracts codes
future_s <- rbind(c("S&P500 index", "ES"),
c("10yr Treasury", "ZN"),
c("VIX index", "VX"),
c("Gold", "GC"),
c("Oil", "CL"),
c("Euro FX", "EC"),
c("Swiss franc", "SF"),
c("Japanese Yen", "JY"))
colnames(future_s) <- c("Futures contract", "Code")
print(xtable::xtable(future_s), comment=FALSE, size="scriptsize", include.rownames=FALSE, latex.environments="flushleft")
# Monthly futures contract codes
month_codes <- cbind(c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"),
c("F", "G", "H", "J", "K", "M", "N", "Q", "U", "V", "X", "Z"))
colnames(month_codes) <- c("Month", "Code")
print(xtable::xtable(month_codes), comment=FALSE, size="scriptsize", include.rownames=FALSE, latex.environments="flushright")
# futures contracts codes
future_s <- rbind(c("S&P500 index", "SP", "ES"),
c("10yr Treasury", "ZN", "ZN"),
c("VIX index", "VX", "delisted"),
c("Gold", "GC", "YG"),
c("Oil", "CL", "QM"),
c("Euro FX", "EC", "E7"),
c("Swiss franc", "SF", "MSF"),
c("Japanese Yen", "JY", "J7"))
colnames(future_s) <- c("Futures contract", "Standard", "E-mini")
print(xtable::xtable(future_s), comment=FALSE, size="scriptsize", include.rownames=FALSE, latex.environments="flushleft")
# Load data for S&P Emini futures June 2019 contract
sym_bol <- "ES"
dir_name <- "C:/Develop/data/ib_data"
file_name <- file.path(dir_name, paste0(sym_bol, ".csv"))
# Read a data table from CSV file
price_s <- data.table::fread(file_name)
# Coerce price_s into data frame
data.table::setDF(price_s)
# Or
# price_s <- data.table:::as.data.frame.data.table(
# data.table::fread(file_name))
# first column of price_s is a numeric date-time
tail(price_s)
# Coerce price_s into xts series
price_s <- xts::xts(price_s[, 2:6],
order.by=as.Date(as.POSIXct.numeric(price_s[, 1],
tz="America/New_York",
origin="1970-01-01")))
colnames(price_s) <- c("Open", "High", "Low", "Close", "Volume")
tail(price_s)
# Plot OHLC data in x11 window
x11(width=5, height=4) # Open x11 for plotting
par(mar=c(5, 5, 2, 1), oma=c(0, 0, 0, 0))
chart_Series(x=price_s, TA="add_Vo()",
name="S&P500 futures")
# Plot dygraph
dygraphs::dygraph(price_s[, 1:4], main="OHLC prices") %>%
dyCandlestick()
# Load ESU8 data
dir_name <- "C:/Develop/data/ib_data"
file_name <- file.path(dir_name, "ESU8.csv")
ES_U8 <- data.table::fread(file_name)
data.table::setDF(ES_U8)
ES_U8 <- xts::xts(ES_U8[, 2:6],
order.by=as.Date(as.POSIXct.numeric(ES_U8[, 1],
tz="America/New_York", origin="1970-01-01")))
colnames(ES_U8) <- c("Open", "High", "Low", "Close", "Volume")
# Load ESM8 data
file_name <- file.path(dir_name, "ESM8.csv")
ES_M8 <- data.table::fread(file_name)
data.table::setDF(ES_M8)
ES_M8 <- xts::xts(ES_M8[, 2:6],
order.by=as.Date(as.POSIXct.numeric(ES_M8[, 1],
tz="America/New_York", origin="1970-01-01")))
colnames(ES_M8) <- c("Open", "High", "Low", "Close", "Volume")
x11(width=6, height=5) # Open x11 for plotting
# Plot last month of ESU8 and ESM8 volume data
en_d <- end(ES_M8)
star_t <- (en_d - 30)
vol_ume <- cbind(Vo(ES_U8),
Vo(ES_M8))[paste0(star_t, "/", en_d)]
colnames(vol_ume) <- c("ESU8", "ESM8")
col_ors <- c("blue", "green")
plot(vol_ume, col=col_ors, lwd=3, major.ticks="days",
format.labels="%b-%d", observation.based=TRUE,
main="Volumes of ESU8 and ESM8 futures")
legend("topleft", legend=colnames(vol_ume), col=col_ors,
title=NULL, bty="n", lty=1, lwd=6, inset=0.1, cex=0.7)
# Find date when ESU8 volume exceeds ESM8
exceed_s <- (vol_ume[, "ESU8"] > vol_ume[, "ESM8"])
in_dex <- match(TRUE, exceed_s)
# in_dex <- min(which(exceed_s))
# Scale the ES_M8 prices
in_dex <- index(exceed_s[in_dex])
fac_tor <- as.numeric(Cl(ES_U8[in_dex])/Cl(ES_M8[in_dex]))
ES_M8[, 1:4] <- fac_tor*ES_M8[, 1:4]
# Calculate continuous contract prices
chain_ed <- rbind(ES_M8[index(ES_M8) < in_dex],
ES_U8[index(ES_U8) >= in_dex])
# Or
# Chain_ed <- rbind(ES_M8[paste0("/", in_dex-1)],
# ES_U8[paste0(in_dex, "/")])
# Plot continuous contract prices
chart_Series(x=chain_ed["2018"], TA="add_Vo()",
name="S&P500 chained futures")
# Download VIX index data from CBOE
vix_index <- data.table::fread("http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vixcurrent.csv", skip=1)
class(vix_index)
dim(vix_index)
tail(vix_index)
sapply(vix_index, class)
vix_index <- xts(vix_index[, -1],
order.by=as.Date(vix_index$Date, format="%m/%d/%Y"))
colnames(vix_index) <- c("Open", "High", "Low", "Close")
# Save the VIX data to binary file
load(file="C:/Develop/data/ib_data/vix_cboe.RData")
ls(vix_env)
vix_env$vix_index <- vix_index
ls(vix_env)
save(vix_env, file="C:/Develop/data/ib_data/vix_cboe.RData")
# Plot OHLC data in x11 window
chart_Series(x=vix_index["2018"], name="VIX Index")
# Plot dygraph
dygraphs::dygraph(vix_index, main="VIX Index") %>%
dyCandlestick()
# Read CBOE monthly futures expiration dates
date_s <- read.csv(
file="C:/Develop/data/vix_data/vix_dates.csv",
stringsAsFactors=FALSE)
date_s <- as.Date(date_s[, 1])
year_s <- format(date_s, format="%Y")
year_s <- substring(year_s, 4)
# Monthly futures contract codes
month_codes <-
c("F", "G", "H", "J", "K", "M",
"N", "Q", "U", "V", "X", "Z")
sym_bols <- paste0("VX", month_codes, year_s)
date_s <- as.data.frame(date_s)
colnames(date_s) <- "exp_dates"
rownames(date_s) <- sym_bols
# Write dates to CSV file, with row names
write.csv(date_s, row.names=TRUE,
file="C:/Develop/data/vix_data/vix_futures.csv")
# Read back CBOE futures expiration dates
date_s <- read.csv(file="C:/Develop/data/vix_data/vix_futures.csv",
stringsAsFactors=FALSE, row.names=1)
date_s[, 1] <- as.Date(date_s[, 1])
# Load VIX futures data from binary file
load(file="C:/Develop/data/vix_data/vix_cboe.RData")
# Get all VIX futures for 2018 except January
sym_bols <- ls(vix_env)
sym_bols <- sym_bols[grep("*8", sym_bols)]
sym_bols <- sym_bols[2:9]
# Specify dates for curves
low_vol <- as.Date("2018-01-11")
hi_vol <- as.Date("2018-02-05")
# Extract all VIX futures prices on the dates
curve_s <- lapply(sym_bols, function(sym_bol) {
x_ts <- get(x=sym_bol, envir=vix_env)
Cl(x_ts[c(low_vol, hi_vol)])
}) # end lapply
curve_s <- rutils::do_call(cbind, curve_s)
colnames(curve_s) <- sym_bols
curve_s <- t(coredata(curve_s))
colnames(curve_s) <- c("Contango 01/11/2018",
"Backwardation 02/05/2018")
x11(width=7, height=5)
par(mar=c(3, 2, 1, 1), oma=c(0, 0, 0, 0))
plot(curve_s[, 1], type="l", lty=1, col="blue", lwd=3,
xaxt="n", xlab="", ylab="", ylim=range(curve_s),
main="VIX Futures Curves")
axis(1, at=(1:NROW(curve_s)), labels=rownames(curve_s))
lines(curve_s[, 2], lty=1, lwd=3, col="red")
legend(x="topright", legend=colnames(curve_s),
inset=0.05, cex=1.0, bty="n",
col=c("blue", "red"), lwd=6, lty=1)
# Load VIX futures data from binary file
load(file="C:/Develop/data/vix_data/vix_cboe.RData")
# Read CBOE futures expiration dates
date_s <- read.csv(file="C:/Develop/data/vix_data/vix_futures.csv",
stringsAsFactors=FALSE, row.names=1)
sym_bols <- rownames(date_s)
date_s <- as.Date(date_s[, 1])
to_day <- as.Date("2018-05-07")
maturi_ty <- (to_day + 30)
# Find neighboring futures contracts
in_dex <- match(TRUE, date_s > maturi_ty)
front_date <- date_s[in_dex-1]
back_date <- date_s[in_dex]
front_symbol <- sym_bols[in_dex-1]
back_symbol <- sym_bols[in_dex]
front_price <- get(x=front_symbol, envir=vix_env)
front_price <- as.numeric(Cl(front_price[to_day]))
back_price <- get(x=back_symbol, envir=vix_env)
back_price <- as.numeric(Cl(back_price[to_day]))
# Calculate the constant maturity 30-day futures price
ra_tio <- as.numeric(maturi_ty - front_date) /
as.numeric(back_date - front_date)
pric_e <- (ra_tio*back_price + (1-ra_tio)*front_price)
library(HighFreq)
x11(width=5, height=3) # Open x11 for plotting
# Load VIX futures data from binary file
load(file="C:/Develop/data/vix_data/vix_cboe.RData")
# Plot VIX and SVXY data in x11 window
plot_theme <- chart_theme()
plot_theme$col$line.col <- "blue"
chart_Series(x=Cl(vix_env$vix_index["2007/"]),
theme=plot_theme, name="VIX Index")
chart_Series(x=Cl(rutils::etf_env$VTI["2007/"]),
theme=plot_theme, name="VTI ETF")
chart_Series(x=Cl(vix_env$vix_index["2017/2018"]),
theme=plot_theme, name="VIX Index")
chart_Series(x=Cl(rutils::etf_env$SVXY["2017/2018"]),
theme=plot_theme, name="SVXY ETF")
# Install package IBrokers
install.packages("IBrokers")
# Load package IBrokers
library(IBrokers)
# Get documentation for package IBrokers
# Get short description
packageDescription("IBrokers")
# Load help page
help(package="IBrokers")
# List all datasets in "IBrokers"
data(package="IBrokers")
# List all objects in "IBrokers"
ls("package:IBrokers")
# Remove IBrokers from search path
detach("package:IBrokers")
# Install package IBrokers2
devtools::install_github(repo="algoquant/IBrokers2")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Or connect to IB Gateway
# Ib_connect <- ibgConnect(port=4002)
# Check connection
IBrokers::isConnected(ib_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Or connect to IB Gateway
# Ib_connect <- ibgConnect(port=4002)
# Download account information from IB
ac_count <- "DU1215081"
ib_account <- IBrokers::reqAccountUpdates(conn=ib_connect,
acctCode=ac_count)
# Extract account balances
balance_s <- ib_account[[1]]
balance_s$AvailableFunds
# Extract contract names, net positions, and profits and losses
IBrokers::twsPortfolioValue(ib_account)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Define AAPL stock contract (object)
con_tract <- IBrokers::twsEquity("AAPL", primary="SMART")
# Define CHF currency contract
con_tract <- IBrokers::twsCurrency("CHF", currency="USD")
# Define S&P Emini future June 2019 contract
con_tract <- IBrokers::twsFuture(symbol="ES",
exch="GLOBEX", expiry="201906")
# Define 10yr Treasury future June 2019 contract
con_tract <- IBrokers::twsFuture(symbol="ZN",
exch="ECBOT", expiry="201906")
# Define euro currency future June 2019 contract
con_tract <- IBrokers::twsFuture(symbol="EUR",
exch="GLOBEX", expiry="201906")
# Define Gold future June 2019 contract
con_tract <- IBrokers::twsFuture(symbol="GC",
exch="NYMEX", expiry="201906")
# Define Oil future January 2019 contract
con_tract <- IBrokers::twsFuture(symbol="QM",
exch="NYMEX", expiry="201901")
# Test if contract object is correct
IBrokers::is.twsContract(con_tract)
# Get list with instrument information
IBrokers::reqContractDetails(conn=ib_connect, Contract=con_tract)
# Install the package twsInstrument
install.packages("twsInstrument", repos="http://r-forge.r-project.org")
# Define euro future using getContract() and Conid
con_tract <- twsInstrument::getContract("317631411")
# Get list with instrument information
IBrokers::reqContractDetails(conn=ib_connect, Contract=con_tract)
# Define VIX monthly and weekly futures June 2019 contract
sym_bol <- "VIX"
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
exch="CFE", expiry="201906")
# Define VIX monthly futures June 2019 contract
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
local="VXV8", exch="CFE", expiry="201906")
# Define VIX weekly futures October 3rd 2018 contract
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
local="VX40V8", exch="CFE", expiry="201906")
# Get list with instrument information
IBrokers::reqContractDetails(conn=ib_connect,
Contract=con_tract)
# Define S&P Emini futures June 2019 contract
sym_bol <- "ES"
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
exch="GLOBEX", expiry="201906")
# Open file for data download
dir_name <- "C:/Develop/data/ib_data"
dir.create(dir_name)
file_name <- file.path(dir_name, paste0(sym_bol, "_201906.csv"))
file_connect <- file(file_name, open="w")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Write header to file
cat(paste(paste(sym_bol, c("Index", "Open", "High", "Low", "Close", "Volume", "WAP", "Count"), sep="."), collapse=","), "\n", file=file_connect)
# Download historical data to file
IBrokers::reqHistoricalData(conn=ib_connect,
Contract=con_tract,
barSize="1 day", duration="6 M",
file=file_connect)
# Close data file
close(file_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Define IB contract objects for stock symbols
sym_bols <- c("AAPL", "F", "MSFT")
con_tracts <- lapply(sym_bols, IBrokers::twsEquity, primary="SMART")
names(con_tracts) <- sym_bols
# Open file connections for data download
dir_name <- "C:/Develop/data/ib_data"
file_names <- file.path(dir_name, paste0(sym_bols, format(Sys.time(), format="_%m_%d_%Y_%H_%M"), ".csv"))
file_connects <- lapply(file_names, function(file_name) file(file_name, open="w"))
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Download historical 1-minute bar data to files
for (it in 1:NROW(sym_bols)) {
sym_bol <- sym_bols[it]
file_connect <- file_connects[[it]]
con_tract <- con_tracts[[it]]
cat("Downloading data for: ", sym_bol, "\n")
# Write header to file
cat(paste(paste(sym_bol, c("Index", "Open", "High", "Low", "Close", "Volume", "WAP", "XTRA", "Count"), sep="."), collapse=","), "\n", file=file_connect)
IBrokers::reqHistoricalData(conn=ib_connect,
Contract=con_tract,
barSize="1 min", duration="2 D",
file=file_connect)
Sys.sleep(10) # 10s pause to avoid IB pacing violation
} # end for
# Close data files
for (file_connect in file_connects) close(file_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Define S&P Emini futures June 2018 contract
sym_bol <- "ES"
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
include_expired="1",
exch="GLOBEX", expiry="201806")
# Open file connection for ESM8 data download
file_name <- file.path(dir_name, paste0(sym_bol, "M8.csv"))
file_connect <- file(file_name, open="w")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Download historical data to file
IBrokers::reqHistoricalData(conn=ib_connect,
Contract=con_tract,
barSize="1 day", duration="2 Y",
file=file_connect)
# Close data file
close(file_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Load OHLC data and coerce it into xts series
price_s <- data.table::fread(file_name)
data.table::setDF(price_s)
price_s <- xts::xts(price_s[, 2:6],
order.by=as.Date(as.POSIXct.numeric(price_s[, 1],
tz="America/New_York", origin="1970-01-01")))
colnames(price_s) <- c("Open", "High", "Low", "Close", "Volume")
# Plot OHLC data in x11 window
chart_Series(x=price_s, TA="add_Vo()",
name="S&P500 ESM8 futures")
# Plot dygraph
dygraphs::dygraph(price_s[, 1:4], main="S&P500 ESM8 futures") %>%
dyCandlestick()
# Define S&P Emini futures June 2019 contract
sym_bol <- "ES"
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
exch="GLOBEX", expiry="201906")
# Open file connection for data download
dir_name <- "C:/Develop/data/ib_data"
# Dir.create(dir_name)
file_name <- file.path(dir_name, paste0(sym_bol, "_taq_live.csv"))
file_connect <- file(file_name, open="w")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Download live data to file
IBrokers::reqMktData(conn=ib_connect,
Contract=con_tract,
eventWrapper=eWrapper.MktData.CSV(1),
file=file_connect)
# Close data file
close(file_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Define S&P Emini futures June 2019 contract
sym_bol <- "ES"
con_tract <- IBrokers::twsFuture(symbol=sym_bol,
exch="GLOBEX", expiry="201906")
# Open file connection for data download
dir_name <- "C:/Develop/data/ib_data"
# Dir.create(dir_name)
file_name <- file.path(dir_name, paste0(sym_bol, "_ohlc_live.csv"))
file_connect <- file(file_name, open="w")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Download live data to file
IBrokers::reqRealTimeBars(conn=ib_connect,
Contract=con_tract, barSize="1",
eventWrapper=eWrapper.RealTimeBars.CSV(1),
file=file_connect)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Close data file
close(file_connect)
# Load OHLC data and coerce it into xts series
library(data.table)
price_s <- data.table::fread(file_name)
price_s <- xts::xts(price_s[, paste0("V", 2:6)],
as.POSIXct.numeric(as.numeric(price_s[, V1]), tz="America/New_York", origin="1970-01-01"))
colnames(price_s) <- c("Open", "High", "Low", "Close", "Volume")
# Plot OHLC data in x11 window
x11()
chart_Series(x=price_s, TA="add_Vo()",
name="S&P500 ESM9 futures")
# Plot dygraph
library(dygraphs)
dygraphs::dygraph(price_s[, 1:4], main="S&P500 ESM9 futures") %>%
dyCandlestick()
library(IBrokers)
# Define list of S&P futures and 10yr Treasury contracts
con_tracts <- list(ES=IBrokers::twsFuture(symbol="ES", exch="GLOBEX", expiry="201906"),
ZN=IBrokers::twsFuture(symbol="ZN", exch="ECBOT", expiry="201906"))
# Open the file connection for storing the bar data
dir_name <- "C:/Develop/data/ib_data"
file_names <- file.path(dir_name, paste0(c("ES_", "ZN_"), format(Sys.time(), format="%m_%d_%Y_%H_%M"), ".csv"))
file_connects <- lapply(file_names, function(file_name) file(file_name, open="w"))
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
# Download live data to file
IBrokers::reqRealTimeBars(conn=ib_connect,
Contract=con_tracts,
barSize="1", useRTH=FALSE,
eventWrapper=eWrapper.RealTimeBars.CSV(NROW(con_tracts)),
file=file_connects)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
# Close data files
for (file_connect in file_connects)
close(file_connect)
library(data.table)
# Load ES futures June 2019 contract and coerce it into xts series
price_s <- data.table::fread(file_names[1])
price_s <- xts::xts(price_s[, paste0("V", 2:6)],
as.POSIXct.numeric(as.numeric(price_s[, V1]), tz="America/New_York", origin="1970-01-01"))
colnames(price_s) <- c("Open", "High", "Low", "Close", "Volume")
# Plot dygraph
library(dygraphs)
dygraphs::dygraph(price_s[, 1:4], main="S&P500 ESM9 futures") %>%
dyCandlestick()
# Load ZN 10yr Treasury futures June 2019 contract
price_s <- data.table::fread(file_names[2])
price_s <- xts::xts(price_s[, paste0("V", 2:6)],
as.POSIXct.numeric(as.numeric(price_s[, V1]), tz="America/New_York", origin="1970-01-01"))
colnames(price_s) <- c("Open", "High", "Low", "Close", "Volume")
# Plot dygraph
dygraphs::dygraph(price_s[, 1:4], main="ZN 10yr Treasury futures") %>%
dyCandlestick()
# Define S&P Emini future June 2019 contract
con_tract <- IBrokers::twsFuture(symbol="ES", exch="GLOBEX", expiry="201906")
# Define euro currency contract EUR.USD
con_tract <- IBrokers::twsCurrency("EUR", currency="USD")
# Define euro currency E-mini futures June 2019 contract E7Z8
con_tract <- IBrokers::twsFuture(symbol="E7", exch="GLOBEX", expiry="201906")
# Define Japanese yen currency contract JPY.USD
con_tract <- IBrokers::twsCurrency("JPY", currency="USD")
# Define Japanese yen currency E-mini futures June 2019 contract J7Z8
con_tract <- IBrokers::twsFuture(symbol="J7", exch="GLOBEX", expiry="201906")
# Define Japanese yen currency futures June 2019 contract 6JZ8
con_tract <- IBrokers::twsFuture(symbol="JPY", exch="GLOBEX", expiry="201906")
# Connect to Interactive Brokers TWS
ib_connect <- IBrokers::twsConnect(port=7497)
IBrokers::reqContractDetails(conn=ib_connect, Contract=con_tract)
# Request trade order ID
order_id <- IBrokers::reqIds(ib_connect)
# Create buy market order object
ib_order <- IBrokers::twsOrder(order_id,
orderType="MKT", action="BUY", totalQuantity=1)
# Place trade order
IBrokers::placeOrder(ib_connect, con_tract, ib_order)
# Execute sell market order
order_id <- IBrokers::reqIds(ib_connect)
ib_order <- IBrokers::twsOrder(order_id,
orderType="MKT", action="SELL", totalQuantity=1)
IBrokers::placeOrder(ib_connect, con_tract, ib_order)
# Execute buy market order
order_id <- IBrokers::reqIds(ib_connect)
ib_order <- IBrokers::twsOrder(order_id,
orderType="MKT", action="BUY", totalQuantity=1)
IBrokers::placeOrder(ib_connect, con_tract, ib_order)
# Request trade order ID
order_id <- IBrokers::reqIds(ib_connect)
# Create buy limit order object
ib_order <- IBrokers::twsOrder(order_id, orderType="LMT",
lmtPrice="1.1511", action="BUY", totalQuantity=1)
# Place trade order
IBrokers::placeOrder(ib_connect, con_tract, ib_order)
# Cancel trade order
IBrokers::cancelOrder(ib_connect, order_id)
# Execute sell limit order
order_id <- IBrokers::reqIds(ib_connect)
ib_order <- IBrokers::twsOrder(order_id, orderType="LMT",
lmtPrice="1.1512", action="SELL", totalQuantity=1)
IBrokers::placeOrder(ib_connect, con_tract, ib_order)
# Cancel trade order
IBrokers::cancelOrder(ib_connect, order_id)
# Close the Interactive Brokers API connection
IBrokers::twsDisconnect(ib_connect)
eWrapper_realtimebars <- function(n = 1) {
eW <- eWrapper_new(NULL)
# eW <- IBrokers::eWrapper(NULL)
eW$assign.Data("data", rep(list(structure(.xts(matrix(rep(NA_real_, 7), ncol = 7), 0), .Dimnames = list(NULL, c("Open", "High", "Low", "Close", "Volume", "WAP", "Count")))), n))
eW$realtimeBars <- function(curMsg, msg, timestamp, file, ...) {
id <- as.numeric(msg[2])
file <- file[[id]]
data <- eW$get.Data("data")
attr(data[[id]], "index") <- as.numeric(msg[3])
nr.data <- NROW(data[[id]])
# Write to file
cat(paste(msg[3], msg[4], msg[5], msg[6], msg[7], msg[8], msg[9], msg[10], sep = ","), "\n", file = file, append = TRUE)
# Write to console
# eW$count_er <- eW$count_er + 1
eW$assign.Data("count_er", eW$get.Data("count_er")+1)
cat(paste0("count_er=", eW$get.Data("count_er"), "\tOpen=", msg[4], "\tHigh=", msg[5], "\tLow=", msg[6], "\tClose=", msg[7], "\tVolume=", msg[8]), "\n")
# cat(paste0("Open=", msg[4], "\tHigh=", msg[5], "\tLow=", msg[6], "\tClose=", msg[7], "\tVolume=", msg[8]), "\n")
#Trade
# Cancel previous trade orders
buy_id <- eW$get.Data("buy_id")
sell_id <- eW$get.Data("sell_id")
if (buy_id>0) IBrokers::cancelOrder(ib_connect, buy_id)
if (sell_id>0) IBrokers::cancelOrder(ib_connect, sell_id)
# Execute buy limit order
buy_id <- IBrokers::reqIds(ib_connect)
buy_order <- IBrokers::twsOrder(buy_id, orderType="LMT",
lmtPrice=msg[6]-0.25, action="BUY", totalQuantity=1)
IBrokers::placeOrder(ib_connect, con_tract, buy_order)
# Execute sell limit order
sell_id <- IBrokers::reqIds(ib_connect)
sell_order <- IBrokers::twsOrder(sell_id, orderType="LMT",
lmtPrice=msg[5]+0.25, action="SELL", totalQuantity=1)
IBrokers::placeOrder(ib_connect, con_tract, sell_order)
# Copy new trade orders
eW$assign.Data("buy_id", buy_id)
eW$assign.Data("sell_id", sell_id)
#Trade finished
data[[id]][nr.data, 1:7] <- as.numeric(msg[4:10])
eW$assign.Data("data", data)
c(curMsg, msg)
} # end eW$realtimeBars
return(eW)
} # end eWrapper_realtimebars
# Install package data.table
install.packages("data.table")
# Load package data.table
library(data.table)
# get documentation for package data.table
# get short description
packageDescription("data.table")
# Load help page
help(package="data.table")
# List all datasets in "data.table"
data(package="data.table")
# List all objects in "data.table"
ls("package:data.table")
# Remove data.table from search path
detach("package:data.table")
# Create a data table
library(data.table)
data_table <- data.table::data.table(
col1=sample(7), col2=sample(7), col3=sample(7))
# Print data_table
class(data_table); data_table
# column referenced without quotes
data_table[, col2]
# row referenced without a following comma
data_table[2]
# Print option "datatable.print.nrows"
getOption("datatable.print.nrows")
options(datatable.print.nrows=10)
getOption("datatable.print.nrows")
# Number of rows in data_table
NROW(data_table)
# Or
data_table[, NROW(col1)]
# Or
data_table[, .N]
# microbenchmark speed of data.table syntax
library(microbenchmark)
summary(microbenchmark(
dt=data_table[, .N],
pure_r=NROW(data_table),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Read a data table from CSV file
dir_name <- "C:/Develop/lecture_slides/data/"
file_name <- file.path(dir_name, "weather_delays14.csv")
data_table <- data.table::fread(file_name)
class(data_table); dim(data_table)
data_table
# fread() reads the same data as read.csv()
all.equal(read.csv(file_name, stringsAsFactors=FALSE),
setDF(data.table::fread(file_name)))
# fread() is much faster than read.csv()
summary(microbenchmark(
pure_r=read.csv(file_name),
fread=setDF(data.table::fread(file_name)),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Write data table to file in different ways
data.table::fwrite(data_table, file="data_table.csv")
write.csv(data_table, file="data_table2.csv")
cat(unlist(data_table), file="data_table3.csv")
# microbenchmark speed of data.table::fwrite()
library(microbenchmark)
summary(microbenchmark(
fwrite=data.table::fwrite(data_table, file="data_table.csv"),
write_csv=write.csv(data_table, file="data_table2.csv"),
cat=cat(unlist(data_table), file="data_table3.csv"),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Select first five rows of data_table
data_table[1:5]
# Select rows with JFK flights
jfk_flights <- data_table[origin=="JFK"]
# Select rows JFK flights in June
jfk_flights <- data_table[origin=="JFK" & month==6]
# Select rows without JFK flights
jfk_flights <- data_table[!(origin=="JFK")]
# Select flights with carrier_delay
data_table[carrier_delay > 0]
# Select column of data_table and return a vector
head(data_table[, origin])
# Select column of data_table and return a data_table, not vector
head(data_table[, list(origin)])
head(data_table[, .(origin)])
# Select two columns of data_table
data_table[, list(origin, month)]
data_table[, .(origin, month)]
column_s <- c("origin", "month")
data_table[, ..column_s]
data_table[, month, origin]
# Select two columns and rename them
data_table[, .(orig=origin, mon=month)]
# Select all columns except origin
head(data_table[, !c("origin")])
head(data_table[, -c("origin")])
# Select flights with positive carrier_delay
data_table[carrier_delay > 0]
# Number of flights with carrier_delay
data_table[, sum(carrier_delay > 0)]
# Or standard R commands
sum(data_table[, carrier_delay > 0])
# microbenchmark speed of data.table syntax
summary(microbenchmark(
dt=data_table[, sum(carrier_delay > 0)],
pure_r=sum(data_table[, carrier_delay > 0]),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Average carrier_delay
data_table[, mean(carrier_delay)]
# Average carrier_delay and aircraft_delay
data_table[, .(carrier=mean(carrier_delay),
aircraft=mean(aircraft_delay))]
# Average aircraft_delay from JFK
data_table[origin=="JFK", mean(aircraft_delay)]
# Number of flights from JFK
data_table[origin=="JFK", NROW(aircraft_delay)]
# Or
data_table[origin=="JFK", .N]
# Number of flights from each airport
data_table[, .N, by=origin]
# Same, but add names to output
data_table[, .(flights=.N), by=.(airport=origin)]
# Number of AA flights from each airport
data_table[carrier=="AA", .(flights=.N),
by=.(airport=origin)]
# Number of flights from each airport and airline
data_table[, .(flights=.N),
by=.(airport=origin, airline=carrier)]
# Average aircraft_delay
data_table[, mean(aircraft_delay)]
# Average aircraft_delay from JFK
data_table[origin=="JFK", mean(aircraft_delay)]
# Average aircraft_delay from each airport
data_table[, .(delay=mean(aircraft_delay)),
by=.(airport=origin)]
# Average and max delays from each airport and month
data_table[, .(mean_delay=mean(aircraft_delay), max_delay=max(aircraft_delay)),
by=.(airport=origin, month=month)]
# Average and max delays from each airport and month
data_table[, .(mean_delay=mean(aircraft_delay), max_delay=max(aircraft_delay)),
keyby=.(airport=origin, month=month)]
# Sort ascending by origin, then descending by dest
order_table <- data_table[order(origin, -dest)]
order_table
# Doesn't work outside data_table
order(origin, -dest)
# Sort data_table by reference
setorder(data_table, origin, -dest)
all.equal(data_table, order_table)
# setorder() is much faster than order()
summary(microbenchmark(
order=data_table[order(origin, -dest)],
setorder=setorder(data_table, origin, -dest),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Average aircraft_delay by month
order_table[, .(mean_delay=mean(aircraft_delay)),
by=.(month=month)]
# Chained brackets to sort output by month
order_table[, .(mean_delay=mean(aircraft_delay)),
by=.(month=month)][order(month)]
# Select weather_delay and aircraft_delay in two different ways
data_table[1:7, .SD,
.SDcols=c("weather_delay", "aircraft_delay")]
data_table[1:7, .(weather_delay, aircraft_delay)]
# Calculate mean of weather_delay and aircraft_delay
data_table[, sapply(.SD, mean),
.SDcols=c("weather_delay", "aircraft_delay")]
sapply(data_table[, .SD,
.SDcols=c("weather_delay", "aircraft_delay")], mean)
# Return origin and dest, then all other columns
data_table[1:7, .SD, by=.(origin, dest)]
# Return origin and dest, then weather_delay and aircraft_delay columns
data_table[1:7, .SD,
by=.(origin, dest),
.SDcols="weather_delay", "aircraft_delay"]
# Return first two rows from each month
data_table[, head(.SD, 2), by=.(month)]
data_table[, head(.SD, 2), by=.(month),
.SDcols=c("weather_delay", "aircraft_delay")]
# Calculate mean of weather_delay and aircraft_delay, grouped by origin
data_table[, lapply(.SD, mean),
by=.(origin),
.SDcols=c("weather_delay", "aircraft_delay")]
# Or simply
data_table[, .(weather_delay=mean(weather_delay),
aircraft_delay=mean(aircraft_delay)),
by=.(origin)]
# Add tot_delay column
data_table[, tot_delay := (carrier_delay + aircraft_delay)]
head(data_table, 4)
# Delete tot_delay column
data_table[, tot_delay := NULL]
# Add max_delay column grouped by origin and dest
data_table[, max_delay := max(aircraft_delay),
by=.(origin, dest)]
data_table[, max_delay := NULL]
# Add date and tot_delay columns
data_table[, c("date", "tot_delay") :=
list(paste(month, day, year, sep="/"),
(carrier_delay + aircraft_delay))]
# Modify select rows of tot_delay column
data_table[month == 12, tot_delay := carrier_delay]
data_table[, c("date", "tot_delay") := NULL]
# Add several columns
data_table[, c("max_carrier", "max_aircraft") :=
lapply(.SD, max),
by=.(origin, dest),
.SDcols=c("carrier_delay", "aircraft_delay")]
data_table[, c("max_carrier", "max_aircraft") := NULL]
# Modifying by reference is much faster than standard R
summary(microbenchmark(
dt=data_table[, tot_delay := (carrier_delay + aircraft_delay)],
pure_r=(data_table[, "tot_delay"] <- data_table[, "carrier_delay"] + data_table[, "aircraft_delay"]),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Add a key based on the "origin" column
setkey(data_table, origin)
haskey(data_table)
key(data_table)
# Select rows with LGA using the key
data_table["LGA"]
all.equal(data_table["LGA"],
data_table[origin == "LGA"])
# Select rows with LGA and JFK using the key
data_table[c("LGA", "JFK")]
# Add a key based on the "origin" and "dest" columns
setkey(data_table, origin, dest)
key(data_table)
# Select rows with origin from JFK and MIA
data_table[c("JFK", "MIA")]
# Select rows with origin from JFK and dest to MIA
data_table[.("JFK", "MIA")]
all.equal(data_table[.("JFK", "MIA")],
data_table[origin == "JFK" & dest == "MIA"])
# Selecting rows using a key is much faster than standard R
summary(microbenchmark(
with_key=data_table[.("JFK", "MIA")],
standard_r=data_table[origin == "JFK" & dest == "MIA"],
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Create data frame and coerce it to data table
data_table <- data.frame(
col1=sample(7), col2=sample(7), col3=sample(7))
class(data_table); data_table
data.table::setDT(data_table)
class(data_table); data_table
# Coerce data_table into data frame
data.table::setDF(data_table)
class(data_table); data_table
# Or
data_table <- data.table:::as.data.frame.data.table(data_table)
# SetDF() is much faster than as.data.frame()
summary(microbenchmark(
as.data.frame=data.table:::as.data.frame.data.table(data_table),
setDF=data.table::setDF(data_table),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Coerce xts to a data frame
price_s <- rutils::etf_env$VTI
class(price_s); head(price_s)
price_s <- as.data.frame(price_s)
class(price_s); head(price_s)
# Coerce data frame to a data table
data.table::setDT(price_s, keep.rownames=TRUE)
class(price_s); head(price_s)
# Dates are coerced to strings
sapply(price_s, class)
# Coerce xts directly to a data table
data_table <- as.data.table(rutils::etf_env$VTI,
keep.rownames=TRUE)
class(data_table); head(data_table)
# Dates are not coerced to strings
sapply(data_table, class)
all.equal(price_s, data_table, check.attributes=FALSE)
# Install package fst
install.packages("fst")
# Load package fst
library(fst)
# get documentation for package fst
# get short description
packageDescription("fst")
# Load help page
help(package="fst")
# List all datasets in "fst"
data(package="fst")
# List all objects in "fst"
ls("package:fst")
# Remove fst from search path
detach("package:fst")
# Read a data frame from CSV file
dir_name <- "C:/Develop/lecture_slides/data/"
file_name <- file.path(dir_name, "weather_delays14.csv")
data.table::setDF(data_frame)
class(data_frame); dim(data_frame)
# Write data frame to .fst file in different ways
fst::write_fst(data_frame, path="data_frame.fst")
write.csv(data_frame, file="data_frame2.csv")
# microbenchmark speed of fst::write_fst()
library(microbenchmark)
summary(microbenchmark(
fst=fst::write_fst(data_frame, path="data_frame.csv"),
write_csv=write.csv(data_frame, file="data_frame2.csv"),
cat=cat(unlist(data_frame), file="data_frame3.csv"),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# fst::read_fst() reads the same data as read.csv()
all.equal(read.csv(file_name, stringsAsFactors=FALSE),
fst::read_fst("data_frame.fst"))
# fst::read_fst() is 10 times faster than read.csv()
summary(microbenchmark(
fst=fst::read_fst("data_frame.fst"),
read_csv=read.csv(file_name),
times=10))[, c(1, 4, 5)] # end microbenchmark summary
# Coerce TAQ xts to a data frame
library(HighFreq)
t_aq <- HighFreq::SPY_TAQ
t_aq <- as.data.frame(t_aq)
class(t_aq)
# Coerce data frame to a data table
data.table::setDT(t_aq, keep.rownames=TRUE)
class(t_aq); head(t_aq)
# Get memory size of data table
format(object.size(t_aq), units="MB")
# Save data table to .fst file
fst::write_fst(t_aq, path="C:/Develop/data/taq.fst")
# Create reference to .fst file similar to a data frame
fs_t <- fst::fst("C:/Develop/data/taq.fst")
class(fs_t)
# Memory size of reference to .fst is very small
format(object.size(fs_t), units="MB")
# Get sizes of all objects in workspace
sort(sapply(mget(ls()), object.size))
# reference to .fst can be treated similar to a data table
dim(t_aq); dim(fs_t)
fst:::print.fst_table(fs_t)
# Subset reference to .fst just like a data table
fs_t[1e4:(1e4+5), ]
library(rutils) # Load package rutils
etf_env <- new.env() # new environment for data
# Download data for sym_bols into etf_env from Alpha Vantage
getSymbols.av(sym_bols, adjust=TRUE, env=etf_env,
output.size="full", api.key="T7JPW54ES8G75310")
# getSymbols(sym_bols, env=etf_env, adjust=TRUE, from="2005-01-03")
library(rutils) # Load package rutils
ls(etf_env) # List files in etf_env
# get class of object in etf_env
class(get(x=sym_bols[1], envir=etf_env))
# Another way
class(etf_env$VTI)
colnames(etf_env$VTI)
head(etf_env$VTI, 3)
# get class of all objects in etf_env
eapply(etf_env, class)
# get class of all objects in R workspace
lapply(ls(), function(ob_ject) class(get(ob_ject)))
library(rutils) # Load package rutils
# Check of object is an OHLC time series
is.OHLC(etf_env$VTI)
# Adjust single OHLC object using its name
etf_env$VTI <- adjustOHLC(etf_env$VTI,
use.Adjusted=TRUE)
# Adjust OHLC object using string as name
assign(sym_bols[1], adjustOHLC(
get(x=sym_bols[1], envir=etf_env),
use.Adjusted=TRUE),
envir=etf_env)
# Adjust objects in environment using vector of strings
for (sym_bol in ls(etf_env)) {
assign(sym_bol,
adjustOHLC(get(sym_bol, envir=etf_env),
use.Adjusted=TRUE),
envir=etf_env)
} # end for
library(rutils) # Load package rutils
# Extract and cbind all data, subset by symbols
price_s <- rutils::do_call(cbind,
as.list(etf_env)[sym_bols])
# Or
# price_s <- do.call(cbind,
# as.list(etf_env)[sym_bols])
# Extract and cbind adjusted prices, subset by symbols
price_s <- rutils::do_call(cbind,
lapply(as.list(etf_env)[sym_bols], Ad))
# Same, but works only for OHLC series
price_s <- rutils::do_call(cbind,
eapply(etf_env, Ad)[sym_bols])
# Drop ".Adjusted" from colnames
colnames(price_s) <-
sapply(colnames(price_s),
function(col_name)
strsplit(col_name, split="[.]")[[1]],
USE.NAMES=FALSE)[1, ]
head(price_s[, 1:2], 3)
# Which objects in global environment are class xts?
unlist(eapply(globalenv(), is.xts))
# Save xts to csv file
write.zoo(price_s,
file="etf_series.csv", sep=",")
# Copy price_s into etf_env and save to .RData file
assign("price_s", price_s, envir=etf_env)
save(etf_env, file="etf_data.RData")
# Extract VTI prices
vt_i <- etf_env$price_s[ ,"VTI"]
vt_i <- na.omit(vt_i)
# Calculate percentage returns "by hand"
vti_lag <- as.numeric(vt_i)
vti_lag <- c(vti_lag[1], vti_lag[-NROW(vti_lag)])
vti_lag <- xts(vti_lag, index(vt_i))
vti_returns <- (vt_i-vti_lag)/vti_lag
# Calculate percentage returns using dailyReturn()
daily_returns <- quantmod::dailyReturn(vt_i)
head(cbind(daily_returns, vti_returns))
all.equal(daily_returns, vti_returns, check.attributes=FALSE)
# Calculate returns for all prices in etf_env$price_s
re_turns <- lapply(etf_env$price_s, function(x_ts) {
daily_returns <- quantmod::dailyReturn(na.omit(x_ts))
colnames(daily_returns) <- names(x_ts)
daily_returns
}) # end lapply
# "re_turns" is a list of xts
class(re_turns)
class(re_turns[[1]])
# Flatten list of xts into a single xts
re_turns <- rutils::do_call(cbind, re_turns)
class(re_turns)
dim(re_turns)
# Copy re_turns into etf_env and save to .RData file
assign("re_turns", re_turns, envir=etf_env)
save(etf_env, file="etf_data.RData")
library(quantmod)
start_date <- "2012-05-10"; end_date <- "2013-11-20"
# Select all objects in environment and return as environment
new_env <- as.environment(eapply(etf_env, "[",
paste(start_date, end_date, sep="/")))
# Select only sym_bols in environment and return as environment
new_env <- as.environment(
lapply(as.list(etf_env)[sym_bols], "[",
paste(start_date, end_date, sep="/")))
# Extract and cbind adjusted prices and return to environment
assign("price_s", rutils::do_call(cbind,
lapply(ls(etf_env), function(sym_bol) {
x_ts <- Ad(get(sym_bol, etf_env))
colnames(x_ts) <- sym_bol
x_ts
})), envir=new_env)
# get sizes of OHLC xts series in etf_env
sapply(mget(sym_bols, envir=etf_env), object.size)
# Extract and cbind adjusted prices and return to environment
col_name <- function(x_ts)
strsplit(colnames(x_ts), split="[.]")[[1]][1]
assign("price_s", rutils::do_call(cbind,
lapply(mget(etf_env$sym_bols, envir=etf_env),
function(x_ts) {
x_ts <- Ad(x_ts)
colnames(x_ts) <- col_name(x_ts)
x_ts
})), envir=new_env)
# Load data frame of S&P500 constituents from CSV file
sp_500 <- read.csv(file="C:/Develop/lecture_slides/data/sp500_WRDS_08-30-17.csv", stringsAsFactors=FALSE)
# Inspect data frame of S&P500 constituents
dim(sp_500)
colnames(sp_500)
# Extract tickers from the column co_tic
sym_bols <- sp_500$co_tic
# Get duplicate tickers
ta_ble <- table(sym_bols)
dupli_cate <- ta_ble[ta_ble>1]
dupli_cate <- names(dupli_cate)
# Get duplicate records (rows) of sp_500
sp_500[sym_bols %in% dupli_cate, ]
# Get unique tickers
sym_bols <- unique(sym_bols)
# Find index of ticker "BRK.B"
which(sym_bols=="BRK.B")
# Remove "BRK.B" and later download it separately
sym_bols <- sym_bols[-which(sym_bols=="BRK.B")]
# Load package rutils
library(rutils)
# Create new environment for data
env_sp500 <- new.env()
# Boolean vector of symbols already downloaded
down_loaded <- sym_bols %in% ls(env_sp500)
# Download in while loop from Tiingo and copy into environment
at_tempt <- 0 # number of download attempts
while (((sum(!down_loaded)) > 0) & (at_tempt<5)) {
# Download data and copy it into environment
at_tempt <- at_tempt + 1
cat("Download attempt = ", at_tempt, "\n")
for (sym_bol in sym_bols[!down_loaded]) {
cat("processing: ", sym_bol, "\n")
tryCatch( # With error handler
getSymbols(sym_bol, src="tiingo", adjust=TRUE, auto.assign=TRUE,
from="1990-01-01", env=env_sp500, api.key="d84fc2a9c5bde2d68e33034f65a838092c6b9f10"),
# Error handler captures error condition
error=function(error_cond) {
print(paste("error handler: ", error_cond))
}, # end error handler
finally=print(paste("sym_bol=", sym_bol))
) # end tryCatch
} # end for
# Update vector of symbols already downloaded
down_loaded <- sym_bols %in% ls(env_sp500)
Sys.sleep(10) # Wait 10 seconds until next attempt
} # end while
class(env_sp500$AAPL)
class(index(env_sp500$AAPL))
library(quantmod)
# Rename "LOW" colnames to "LO_WES"
colnames(env_sp500$LOW) <- paste("LO_WES",
sapply(strsplit(colnames(env_sp500$LOW), split="[.]"),
function(col_name) col_name[2]), sep=".")
env_sp500$LO_WES <- env_sp500$LOW[, unique(colnames(env_sp500$LOW))]
rm(LOW, envir=env_sp500)
chart_Series(x=env_sp500$LO_WES["2017-06/"],
TA="add_Vo()", name="LOWES stock")
# Download "BRK.B" separately with auto.assign=FALSE
BRK_B <- getSymbols("BRK-B", auto.assign=FALSE, src="tiingo", adjust=TRUE, from="1990-01-01", api.key="j84ac2b9c5bde2d68e33034f65d838092c6c9f10")
colnames(BRK_B) <- paste("BRK_B",
sapply(strsplit(colnames(BRK_B), split="[.]"),
function(col_name) col_name[2]), sep=".")
env_sp500$BRK_B <- BRK_B
# Rename "BF-B" colnames to "BF_B"
colnames(env_sp500$"BF-B") <- paste("BF_B",
sapply(strsplit(colnames(env_sp500$"BF-B"), split="[.]"),
function(col_name) col_name[2]), sep=".")
names(colnames(env_sp500$"BF-B")) <- NULL
env_sp500$BF_B <- env_sp500$"BF-B"
rm("BF-B", envir=env_sp500)
class(env_sp500$AAPL)
# The date-time index is class POSIXct not Date
class(index(env_sp500$AAPL))
# Coerce time indices from class POSIXct to class Date
for (sym_bol in ls(env_sp500)) {
x_ts <- get(sym_bol, envir=env_sp500)
index(x_ts) <- as.Date(index(x_ts))
assign(sym_bol, x_ts, envir=env_sp500)
} # end for
class(index(env_sp500$AAPL))
# Save the environment to compressed .RData file
dir_name <- "C:/Develop/lecture_slides/data/"
save(env_sp500, file=paste0(dir_name, "sp500.RData"))
# Save the ETF prices into CSV files
dir_name <- "C:/Develop/lecture_slides/data/SP500/"
for (sym_bol in ls(env_sp500)) {
zoo::write.zoo(env_sp500$sym_bol, file=paste0(dir_name, sym_bol, ".csv"))
} # end for
# Or using lapply()
file_names <- lapply(ls(env_sp500), function(sym_bol) {
x_ts <- get(sym_bol, envir=env_sp500)
zoo::write.zoo(x_ts, file=paste0(dir_name, sym_bol, ".csv"))
sym_bol
}) # end lapply
unlist(file_names)
# Or using eapply() and data.table::fwrite()
file_names <- eapply(env_sp500 , function(x_ts) {
file_name <- rutils::get_name(colnames(x_ts)[1])
data.table::fwrite(data.table::as.data.table(x_ts), file=paste0(dir_name, file_name, ".csv"))
file_name
}) # end eapply
unlist(file_names)
# Load the environment from compressed .RData file
dir_name <- "C:/Develop/lecture_slides/data/"
load(file=paste0(dir_name, "sp500.RData"))
# Get all the .csv file names in the directory
dir_name <- "C:/Develop/lecture_slides/data/SP500/"
file_names <- Sys.glob(paste0(dir_name, "*.csv"))
# Create new environment for data
env_sp500 <- new.env()
for (file_name in file_names) {
x_ts <- xts::as.xts(zoo::read.csv.zoo(file_name))
sym_bol <- strsplit(colnames(x_ts), split="[.]")[[1]][1]
assign(sym_bol, x_ts, envir=env_sp500)
} # end for
# Or using fread()
for (file_name in file_names) {
x_ts <- data.table::fread(file_name)
data.table::setDF(x_ts)
x_ts <- xts::xts(x_ts[, -1], as.Date(x_ts[, 1]))
sym_bol <- strsplit(colnames(x_ts), split="[.]")[[1]][1]
assign(sym_bol, x_ts, envir=env_sp500)
} # end for
# Remove all files from environment(if necessary)
rm(list=ls(env_sp500), envir=env_sp500)
# Download in while loop from Alpha Vantage and copy into environment
down_loaded <- sym_bols %in% ls(env_sp500)
at_tempt <- 0
while (((sum(!down_loaded)) > 0) & (at_tempt<10)) {
# Download data and copy it into environment
at_tempt <- at_tempt + 1
for (sym_bol in sym_bols[!down_loaded]) {
cat("processing: ", sym_bol, "\n")
tryCatch( # With error handler
getSymbols(sym_bol, src="av", adjust=TRUE, auto.assign=TRUE, env=env_sp500,
output.size="full", api.key="T7JPW54ES8G75310"),
# error handler captures error condition
error=function(error_cond) {
print(paste("error handler: ", error_cond))
}, # end error handler
finally=print(paste("sym_bol=", sym_bol))
) # end tryCatch
} # end for
# Update vector of symbols already downloaded
down_loaded <- sym_bols %in% ls(env_sp500)
Sys.sleep(10) # Wait 10 seconds until next attempt
} # end while
# Adjust all OHLC prices in environment
for (sym_bol in ls(env_sp500)) {
assign(sym_bol,
adjustOHLC(get(x=sym_bol, envir=env_sp500), use.Adjusted=TRUE),
envir=env_sp500)
} # end for
library(rutils) # Load package rutils
# Assign name SP500 to ^GSPC symbol
setSymbolLookup(
SP500=list(name="^GSPC", src="yahoo"))
getSymbolLookup()
# view and clear options
options("getSymbols.sources")
options(getSymbols.sources=NULL)
# Download S&P500 prices into etf_env
getSymbols("SP500", env=etf_env,
adjust=TRUE, auto.assign=TRUE, from="1990-01-01")
chart_Series(x=etf_env$SP500["2016/"],
TA="add_Vo()",
name="S&P500 index")
library(rutils) # Load package rutils
# Assign name DJIA to ^DJI symbol
setSymbolLookup(
DJIA=list(name="^DJI", src="yahoo"))
getSymbolLookup()
# view and clear options
options("getSymbols.sources")
options(getSymbols.sources=NULL)
# Download DJIA prices into etf_env
getSymbols("DJIA", env=etf_env,
adjust=TRUE, auto.assign=TRUE, from="1990-01-01")
chart_Series(x=etf_env$DJIA["2016/"],
TA="add_Vo()",
name="DJIA index")
library(rutils) # Load package rutils
library(RCurl) # Load package RCurl
library(XML) # Load package XML
# Download text data from URL
sp_500 <- getURL(
"https://en.wikipedia.org/wiki/List_of_S%26P_500_companies")
# Extract tables from the text data
sp_500 <- readHTMLTable(sp_500,
stringsAsFactors=FALSE)
str(sp_500)
# Extract colnames of data frames
lapply(sp_500, colnames)
# Extract S&P500 constituents
sp_500 <- sp_500[[1]]
head(sp_500)
# Create valid R names from symbols containing "-" or "."characters
sp_500$names <- gsub("-", "_", sp_500$Ticker)
sp_500$names <- gsub("[.]", "_", sp_500$names)
# Write data frame of S&P500 constituents to CSV file
write.csv(sp_500,
file="C:/Develop/lecture_slides/data/sp500_Yahoo.csv",
row.names=FALSE)
library(rutils) # Load package rutils
# Load data frame of S&P500 constituents from CSV file
sp_500 <- read.csv(file="C:/Develop/lecture_slides/data/sp500_Yahoo.csv",
stringsAsFactors=FALSE)
# Register symbols corresponding to R names
for (in_dex in 1:NROW(sp_500)) {
cat("processing: ", sp_500$Ticker[in_dex], "\n")
setSymbolLookup(structure(
list(list(name=sp_500$Ticker[in_dex])),
names=sp_500$names[in_dex]))
} # end for
env_sp500 <- new.env() # new environment for data
# Remove all files (if necessary)
rm(list=ls(env_sp500), envir=env_sp500)
# Download data and copy it into environment
rutils::get_symbols(sp_500$names,
env_out=env_sp500, start_date="1990-01-01")
# Or download in loop
for (sym_bol in sp_500$names) {
cat("processing: ", sym_bol, "\n")
rutils::get_symbols(sym_bol,
env_out=env_sp500, start_date="1990-01-01")
} # end for
save(env_sp500, file="C:/Develop/lecture_slides/data/sp500.RData")
chart_Series(x=env_sp500$BRK_B["2016/"], TA="add_Vo()",
name="BRK-B stock")
library(quantmod)
# Download U.S. unemployment rate data
unemp_rate <- getSymbols("UNRATE",
auto.assign=FALSE,
src="FRED")
# Plot U.S. unemployment rate data
chart_Series(unemp_rate["1990/"],
name="U.S. unemployment rate")
library(rutils) # Load package rutils
install.packages("devtools")
library(devtools)
# Install package Quandl from github
install_github("quandl/R-package")
library(Quandl) # Load package Quandl
# Register Quandl API key
Quandl.api_key("pVJi9Nv3V8CD3Js5s7Qx")
# get short description
packageDescription("Quandl")
# Load help page
help(package="Quandl")
# Remove Quandl from search path
detach("package:Quandl")
library(rutils) # Load package rutils
# Download EOD AAPL prices from WIKI free database
price_s <- Quandl(code="WIKI/AAPL",
type="xts", start_date="1990-01-01")
x11(width=14, height=7)
chart_Series(price_s["2016", 1:4],
name="AAPL OHLC prices")
# Add trade volume in extra panel
add_TA(price_s["2016", 5])
# Download euro currency rates
price_s <- Quandl(code="BNP/USDEUR",
start_date="2013-01-01",
end_date="2013-12-01", type="xts")
# Download multiple time series
price_s <- Quandl(code=c("NSE/OIL", "WIKI/AAPL"),
start_date="2013-01-01", type="xts")
# Download AAPL gross profits
prof_it <- Quandl("RAYMOND/AAPL_GROSS_PROFIT_Q",
type="xts")
chart_Series(prof_it, name="AAPL gross profits")
# Download Hurst time series
price_s <- Quandl(code="PE/AAPL_HURST",
start_date="2013-01-01", type="xts")
chart_Series(price_s["2016/", 1],
name="AAPL Hurst")
library(rutils) # Load package rutils
# Load S&P500 stock Quandl codes
sp_500 <- read.csv(
file="C:/Develop/lecture_slides/data/sp500_quandl.csv",
stringsAsFactors=FALSE)
# Replace "-" with "_" in symbols
sp_500$free_code <-
gsub("-", "_", sp_500$free_code)
head(sp_500)
# vector of symbols in sp_500 frame
tick_ers <- gsub("-", "_", sp_500$ticker)
# Or
tick_ers <- matrix(unlist(
strsplit(sp_500$free_code, split="/"),
use.names=FALSE), ncol=2, byrow=TRUE)[, 2]
# Or
tick_ers <- do_call_rbind(
strsplit(sp_500$free_code, split="/"))[, 2]
library(rutils) # Load package rutils
env_sp500 <- new.env() # new environment for data
# Remove all files (if necessary)
rm(list=ls(env_sp500), envir=env_sp500)
# Boolean vector of symbols already downloaded
down_loaded <- tick_ers %in% ls(env_sp500)
# Download data and copy it into environment
for (tick_er in tick_ers[!down_loaded]) {
cat("processing: ", tick_er, "\n")
da_ta <- Quandl(code=paste0("WIKI/", tick_er),
start_date="1990-01-01",
type="xts")[, -(1:7)]
colnames(da_ta) <- paste(tick_er,
c("Open", "High", "Low", "Close", "Volume"), sep=".")
assign(tick_er, da_ta, envir=env_sp500)
} # end for
save(env_sp500, file="C:/Develop/lecture_slides/data/sp500.RData")
chart_Series(x=env_sp500$XOM["2016/"], TA="add_Vo()",
name="XOM stock")
library(rutils)
library(Quandl)
# Register Quandl API key
Quandl.api_key("pVJi9Nv3V8CD3Js5s7Qx")
# Download E-mini S&P500 futures prices
price_s <- Quandl(code="CHRIS/CME_ES1",
type="xts", start_date="1990-01-01")
price_s <- price_s[, c("Open", "High", "Low", "Last", "Volume")]
colnames(price_s)[4] <- "Close"
# Plot the prices
x11(width=5, height=4) # Open x11 for plotting
chart_Series(x=price_s["2008-06/2009-06"],
TA="add_Vo()",
name="S&P500 Futures")
# Plot dygraph
dygraphs::dygraph(price_s["2008-06/2009-06", -5],
main="S&P500 Futures") %>%
dyCandlestick()
# Read CBOE futures expiration dates
date_s <- read.csv(file="C:/Develop/lecture_slides/data/futures_expiration_dates_codes.csv",
stringsAsFactors=FALSE, row.names=1)
dir_name <- "C:/Develop/data/vix_data"
dir.create(dir_name)
sym_bols <- rownames(date_s)
file_names <- file.path(dir_name, paste0(sym_bols, ".csv"))
log_file <- file.path(dir_name, "log_file.txt")
cboe_url <- "https://markets.cboe.com/us/futures/market_statistics/historical_data/products/csv/VX/"
url_s <- paste0(cboe_url, date_s[, 1])
# Download files in loop
for (it in seq_along(url_s)) {
tryCatch( # Warning and error handler
download.file(url_s[it],
destfile=file_names[it], quiet=TRUE),
# Warning handler captures warning condition
warning=function(warning_cond) {
cat(paste("warning handler: ", warning_cond, "\n"), file=log_file, append=TRUE)
}, # end warning handler
# Error handler captures error condition
error=function(error_cond) {
cat(paste("error handler: ", error_cond, "\n"), append=TRUE)
}, # end error handler
finally=cat(paste("Processing file name =", file_names[it], "\n"), append=TRUE)
) # end tryCatch
} # end for
# Create new environment for data
vix_env <- new.env()
# Download VIX data for the months 6, 7, and 8 in 2018
library(qmao)
quantmod::getSymbols("VX", Months=1:12,
Years=2018, src="cfe", auto.assign=TRUE, env=vix_env)
# Or
qmao::getSymbols.cfe(Symbols="VX",
Months=6:8, Years=2018, env=vix_env,
verbose=FALSE, auto.assign=TRUE)
# Calculate the classes of all the objects
# In the environment vix_env
unlist(eapply(vix_env,
function(x) {class(x)[1]}))
class(vix_env$VX_M18)
colnames(vix_env$VX_M18)
# Save the data to a binary file called "vix_cboe.RData".
save(vix_env,
file="C:/Develop/data/vix_data/vix_cboe.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chop.R
\name{chop}
\alias{chop}
\alias{unchop}
\title{Chop and unchop}
\usage{
chop(data, cols)
unchop(data, cols, keep_empty = FALSE, ptype = NULL)
}
\arguments{
\item{data}{A data frame.}
\item{cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to chop or unchop
(automatically quoted).
For \code{unchop()}, each column should be a list-column containing generalised
vectors (e.g. any mix of \code{NULL}s, atomic vector, S3 vectors, a lists,
or data frames).}
\item{keep_empty}{By default, you get one row of output for each element
of the list your unchopping/unnesting. This means that if there's a
size-0 element (like \code{NULL} or an empty data frame), that entire row
will be dropped from the output. If you want to preserve all rows,
use \code{keep_empty = TRUE} to replace size-0 elements with a single row
of missing values.}
\item{ptype}{Optionally, supply a data frame prototype for the output \code{cols},
overriding the default that will be guessed from the combination of
individual values.}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#maturing}{\figure{lifecycle-maturing.svg}{options: alt='[Maturing]'}}}{\strong{[Maturing]}}
Chopping and unchopping preserve the width of a data frame, changing its
length. \code{chop()} makes \code{df} shorter by converting rows within each group
into list-columns. \code{unchop()} makes \code{df} longer by expanding list-columns
so that each element of the list-column gets its own row in the output.
\code{chop()} and \code{unchop()} are building blocks for more complicated functions
(like \code{\link[=unnest]{unnest()}}, \code{\link[=unnest_longer]{unnest_longer()}}, and \code{\link[=unnest_wider]{unnest_wider()}}) and are generally
more suitable for programming than interactive data analysis.
}
\details{
Generally, unchopping is more useful than chopping because it simplifies
a complex data structure, and \code{\link[=nest]{nest()}}ing is usually more appropriate
that \code{chop()}ing` since it better preserves the connections between
observations.
\code{chop()} creates list-columns of class \code{\link[vctrs:list_of]{vctrs::list_of()}} to ensure
consistent behaviour when the chopped data frame is emptied. For
instance this helps getting back the original column types after
the roundtrip chop and unchop. Because \verb{<list_of>} keeps tracks of
the type of its elements, \code{unchop()} is able to reconstitute the
correct vector type even for empty list-columns.
}
\examples{
# Chop ==============================================================
df <- tibble(x = c(1, 1, 1, 2, 2, 3), y = 1:6, z = 6:1)
# Note that we get one row of output for each unique combination of
# non-chopped variables
df \%>\% chop(c(y, z))
# cf nest
df \%>\% nest(data = c(y, z))
# Unchop ============================================================
df <- tibble(x = 1:4, y = list(integer(), 1L, 1:2, 1:3))
df \%>\% unchop(y)
df \%>\% unchop(y, keep_empty = TRUE)
# Incompatible types -------------------------------------------------
# If the list-col contains types that can not be natively
df <- tibble(x = 1:2, y = list("1", 1:3))
try(df \%>\% unchop(y))
# Unchopping data frames -----------------------------------------------------
# Unchopping a list-col of data frames must generate a df-col because
# unchop leaves the column names unchanged
df <- tibble(x = 1:3, y = list(NULL, tibble(x = 1), tibble(y = 1:2)))
df \%>\% unchop(y)
df \%>\% unchop(y, keep_empty = TRUE)
}
| /man/chop.Rd | permissive | averiperny/tidyr | R | false | true | 3,573 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chop.R
\name{chop}
\alias{chop}
\alias{unchop}
\title{Chop and unchop}
\usage{
chop(data, cols)
unchop(data, cols, keep_empty = FALSE, ptype = NULL)
}
\arguments{
\item{data}{A data frame.}
\item{cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to chop or unchop
(automatically quoted).
For \code{unchop()}, each column should be a list-column containing generalised
vectors (e.g. any mix of \code{NULL}s, atomic vector, S3 vectors, a lists,
or data frames).}
\item{keep_empty}{By default, you get one row of output for each element
of the list your unchopping/unnesting. This means that if there's a
size-0 element (like \code{NULL} or an empty data frame), that entire row
will be dropped from the output. If you want to preserve all rows,
use \code{keep_empty = TRUE} to replace size-0 elements with a single row
of missing values.}
\item{ptype}{Optionally, supply a data frame prototype for the output \code{cols},
overriding the default that will be guessed from the combination of
individual values.}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#maturing}{\figure{lifecycle-maturing.svg}{options: alt='[Maturing]'}}}{\strong{[Maturing]}}
Chopping and unchopping preserve the width of a data frame, changing its
length. \code{chop()} makes \code{df} shorter by converting rows within each group
into list-columns. \code{unchop()} makes \code{df} longer by expanding list-columns
so that each element of the list-column gets its own row in the output.
\code{chop()} and \code{unchop()} are building blocks for more complicated functions
(like \code{\link[=unnest]{unnest()}}, \code{\link[=unnest_longer]{unnest_longer()}}, and \code{\link[=unnest_wider]{unnest_wider()}}) and are generally
more suitable for programming than interactive data analysis.
}
\details{
Generally, unchopping is more useful than chopping because it simplifies
a complex data structure, and \code{\link[=nest]{nest()}}ing is usually more appropriate
that \code{chop()}ing` since it better preserves the connections between
observations.
\code{chop()} creates list-columns of class \code{\link[vctrs:list_of]{vctrs::list_of()}} to ensure
consistent behaviour when the chopped data frame is emptied. For
instance this helps getting back the original column types after
the roundtrip chop and unchop. Because \verb{<list_of>} keeps tracks of
the type of its elements, \code{unchop()} is able to reconstitute the
correct vector type even for empty list-columns.
}
\examples{
# Chop ==============================================================
df <- tibble(x = c(1, 1, 1, 2, 2, 3), y = 1:6, z = 6:1)
# Note that we get one row of output for each unique combination of
# non-chopped variables
df \%>\% chop(c(y, z))
# cf nest
df \%>\% nest(data = c(y, z))
# Unchop ============================================================
df <- tibble(x = 1:4, y = list(integer(), 1L, 1:2, 1:3))
df \%>\% unchop(y)
df \%>\% unchop(y, keep_empty = TRUE)
# Incompatible types -------------------------------------------------
# If the list-col contains types that can not be natively
df <- tibble(x = 1:2, y = list("1", 1:3))
try(df \%>\% unchop(y))
# Unchopping data frames -----------------------------------------------------
# Unchopping a list-col of data frames must generate a df-col because
# unchop leaves the column names unchanged
df <- tibble(x = 1:3, y = list(NULL, tibble(x = 1), tibble(y = 1:2)))
df \%>\% unchop(y)
df \%>\% unchop(y, keep_empty = TRUE)
}
|
##----------------------------------------------------------------------------##
## HTTP GET ##
##----------------------------------------------------------------------------##
#' TWIT
#'
#' @description Base function responsible for formulating GET and
#' POST requests to Twitter API's.
#'
#' @param get Logical with the default, \code{get = TRUE},
#' indicating whether the provided url should be passed along via
#' a GET or POST request.
#' @param url Character vector designed to operate like
#' parse_url and build_url functions in the httr package.
#' The easiest way to do this is to work through
#' the call-specific functions as they are designed to simplify
#' the process. However, if one were interested in reverse-
#' engingeering such a thing, I would recommend checking out
#' \code{make_url}.
#' @param \dots Further named parameters, such as config, token,
#' etc, passed on to modify_url in the httr package.
#' @note Occasionally Twitter does recommend using POST requests
#' for data retrieval calls. This is usually the case when requests
#' can involve long strings (containing up to 100 user_ids). For
#' the most part, or at least for any function-specific requests
#' (e.g., \code{get_friends}, take reflect these changes.
#' @return json response object
#' @importFrom httr GET POST timeout write_disk progress
#' @keywords internal
#' @noRd
TWIT <- function(get = TRUE, url, ...) {
if (get) {
GET(url, ...)
} else {
POST(url, ...)
}
}
#' make_url
#'
#' @param restapi logical Default \code{restapi = TRUE}
#' indicates the provided URL components should be
#' specify Twitter's REST API. Set this to FALSE if you wish
#' to make a request URL designed for Twitter's streaming api.
#' @param query Twitter's subsetting/topic identifiers.
#' Although the httr package refers to this as "path",
#' query is used here to maintain consistency with
#' Twitter API's excellent documentation.
#' @param param Additional parameters (arguments) passed
#' along. If none, NULL (default).
#' @return URL used in httr call.
#' @keywords internal
#' @noRd
make_url <- function(restapi = TRUE, query, param = NULL) {
if (restapi) {
hostname <- "api.twitter.com"
} else {
hostname <- "stream.twitter.com"
}
structure(
list(
scheme = "https",
hostname = hostname,
port = NULL,
path = paste0("1.1/", query, ".json"),
query = param,
params = NULL,
fragment = NULL,
username = NULL,
password = NULL),
class = "url")
}
##----------------------------------------------------------------------------##
## scroll ##
##----------------------------------------------------------------------------##
scroller <- function(url, n, n.times, type = NULL, ...) {
## check args
stopifnot(is_n(n), is_url(url))
## if missing set to 1
if (missing(n.times)) n.times <- 1L
## initialize vector and counter
x <- vector("list", n.times)
counter <- 0L
for (i in seq_along(x)) {
## send GET request
x[[i]] <- httr::GET(url, ...)
warn_for_twitter_status(x[[i]])
## if NULL (error) break
if (is.null(x[[i]])) break
## convert from json to R list
x[[i]] <- from_js(x[[i]])
## if length of x or len of statuses == 0, break
if (any(length(x[[i]]) == 0L,
all("statuses" %in% names(x[[i]]),
length(x[[i]][['statuses']]) == 0L))) {
break
}
if (has_name_(x[[i]], "errors")) {
warning(x[[i]]$errors[["message"]], call. = FALSE)
x[[i]] <- list(data.frame())
break
}
## if reach counter, break
counter <- counter +
as.numeric(unique_id_count(x[[i]], type = type))
if (counter > n) break
## check other possible fails
if (break_check(x[[i]], url)) break
## if cursor in URL then update otherwise use max id
if ("cursor" %in% names(url$query)) {
url$query$cursor <- get_max_id(x[[i]])
} else {
url$query$max_id <- get_max_id(x[[i]])
}
}
## drop NULLs
if (is.null(names(x))) {
x <- x[lengths(x) > 0]
}
x
}
unique_id_count <- function(x, type = NULL) {
if (!is.null(type)) {
if (type == "search") return(100)
if (type == "timeline") return(200)
if (type == "followers") return(5000)
}
if (isTRUE(length(x) > 1L)) {
if (!is.null(names(x[[2]]))) {
x <- unlist(
lapply(x, unique_id), use.names = FALSE
)
} else {
x <- unique_id(x)
}
} else {
x <- unique_id(x)
}
if (any(is.null(x), identical(length(x), 0L))) {
return(0)
}
length(unique(x))
}
unique_id <- function(x) {
if ("statuses" %in% tolower(names(x))) {
x <- x[["statuses"]]
}
if ("id_str" %in% names(x)) {
x[["id_str"]]
} else if ("ids" %in% names(x)) {
x[["ids"]]
} else if ("id" %in% names(x)) {
x[["id"]]
} else if ("ids" %in% names(x[[1]])) {
x[[1]][["ids"]]
} else if ("status_id" %in% names(x)) {
x[["status_id"]]
} else if ("user_id" %in% names(x)) {
x[["user_id"]]
}
}
break_check <- function(r, url, count = NULL) {
if (!is.null(count)) {
if (as.numeric(count) <= 0) return(TRUE)
}
if (is.null(r)) return(TRUE)
x <- get_max_id(r)
if (is.null(x)) return(TRUE)
if (any(identical(x, 0), identical(x, "0"))) return(TRUE)
if ("max_id" %in% names(url$query)) {
if (is.null(url$query$max_id)) return(FALSE)
if (identical(as.character(x),
as.character(url$query$max_id))) {
return(TRUE)
}
}
FALSE
}
##----------------------------------------------------------------------------##
## format_date ##
##----------------------------------------------------------------------------#
format_date <- function(x, tz = "UTC") {
o <- tryCatch(
as.POSIXct(
x,
format = "%a %b %d %T %z %Y",
tz = tz
),
error = function(e) return(NULL)
)
if (any(is.null(o), all(is.na.quiet(o)))) {
o <- tryCatch(as.POSIXct(
x,
format = "%a %b %d %H:%M:%S %z %Y",
tz = tz,
origin = "1970-01-01"),
error = function(e) return(NULL))
}
if (any(is.null(o), all(is.na.quiet(o)))) {
o <- tryCatch(as.POSIXct(
x,
format = "%a %b %d %H:%M:%S %z %Y"),
error = function(e) return(NULL))
}
if (any(is.null(o), all(is.na.quiet(o)))) {
curLocale <- Sys.getlocale("LC_TIME")
on.exit(
Sys.setlocale("LC_TIME", curLocale)
##add = TRUE
)
Sys.setlocale("LC_TIME", "C")
o <- tryCatch(as.POSIXct(
x,
tz = tz,
format = "%a, %d %b %Y %H:%M:%S +0000"),
error = function(e) return(NULL)
)
}
if (any(is.null(o), all(is.na.quiet(o)))) {
o <- tryCatch(as.POSIXct(
x, tz = tz,
format = "%a %b %d %H:%M:%S +0000 %Y"),
error = function(e) return(NULL))
}
if (any(is.null(o), all(is.na.quiet(o)))) {
o <- tryCatch(as.POSIXct(
x, format = "%a %b %d %H:%M:%S %z %Y"),
error = function(e) return(NULL))
}
if (any(is.null(o), all(is.na.quiet(o)))) {
o <- x
}
o
}
##----------------------------------------------------------------------------##
## fetch/return features ##
##----------------------------------------------------------------------------##
go_get_var <- function(x, ..., expect_n = NULL) {
vars <- c(...)
success <- FALSE
for (i in vars) {
if (!is.recursive(x)) break
if (has_name_(x, i)) {
x <- x[[i]]
if (i == vars[length(vars)]) {
success <- TRUE
}
} else if (any_recursive(x) && any(sapply(x, has_name_, i))) {
kp <- sapply(x, has_name_, i)
x <- x[kp]
x <- lapply(x, "[[", i)
if (i == vars[length(vars)]) {
success <- TRUE
}
}
}
if (!success && is.null(expect_n)) {
return(NULL)
}
if (any_recursive(x) && is.null(expect_n)) {
return(x)
}
x <- unlist(x)
if (!is.null(expect_n) && length(x) < expect_n) {
x <- c(x, rep(NA, expect_n - length(x)))
}
x
}
all_uq_names <- function(x) {
unique(unlist(lapply(x, names)))
}
return_last <- function(x, n = 1) {
x[length(x) - seq_len(n) + 1]
}
##----------------------------------------------------------------------------##
## check data ##
##----------------------------------------------------------------------------##
has_name_ <- function(x, ...) {
vars <- c(...)
stopifnot(is.character(vars))
if (!is.recursive(x)) {
return(FALSE)
}
all(vars %in% names(x))
}
any_recursive <- function(x) {
if (!is.recursive(x)) {
return(FALSE)
}
any(vapply(x, is.recursive, logical(1)))
}
is_response <- function(x) {
inherits(x, "response")
}
is_json <- function(x) {
grepl("application/json", x$headers[["content-type"]])
}
is.na.quiet <- function(x) {
suppressWarnings(is.na(x))
}
is_n <- function(n) {
if (is.character(n)) {
n <- suppressWarnings(as.numeric(n))
}
length(n) == 1L && is.numeric(n) && !is.na(n) && n > 0L
}
maybe_n <- function(x) {
if (is.character(x)) {
x <- suppressWarnings(as.numeric(x))
}
length(x) == 1L && is.numeric(x) && !is.na(x)
}
is_url <- function(url) {
url_names <- c("scheme", "hostname",
"port", "path", "query")
if (all(length(url) > 1, is.list(url),
url_names %in% names(url))) {
return(TRUE)
} else {
return(FALSE)
}
}
##----------------------------------------------------------------------------##
## wranglers ##
##----------------------------------------------------------------------------##
#' @importFrom jsonlite fromJSON
from_js <- function(rsp) {
stopifnot(is_response(rsp))
if (!is_json(rsp)) {
stop("API did not return json", call. = FALSE)
}
rsp <- httr::content(rsp, as = "text", encoding = "UTF-8")
rsp <- jsonlite::fromJSON(rsp)
if ("statuses" %in% names(rsp) && "full_text" %in% names(rsp$statuses)) {
names(rsp[["statuses"]])[names(rsp[["statuses"]]) == "text"] <- "texttrunc"
names(rsp[["statuses"]])[names(rsp[["statuses"]]) == "full_text"] <- "text"
} else if ("status" %in% names(rsp) && "full_text" %in% names(rsp$status)) {
names(rsp[["status"]])[names(rsp[["status"]]) == "text"] <- "texttrunc"
names(rsp[["status"]])[names(rsp[["status"]]) == "full_text"] <- "text"
} else if ("full_text" %in% names(rsp)) {
names(rsp)[names(rsp) == "text"] <- "texttrunc"
names(rsp)[names(rsp) == "full_text"] <- "text"
}
rsp
}
na_omit <- function(x) {
if (is.atomic(x)) {
x[!is.na(x)]
} else {
x[!vapply(x, function(x) isTRUE(is.na(x)), FUN.VALUE = logical(1))]
}
}
##----------------------------------------------------------------------------##
## user type classifers ##
##----------------------------------------------------------------------------##
.ids_type <- function(x) {
if (is.list(x)) x <- unlist(x, use.names = FALSE)
x <- .id_type(x)
if (length(unique(x)) > 1) {
stop("users must be user_ids OR screen_names, not both.")
}
unique(x)
}
.id_type <- function(x) {
if (is_screen_name(x)) {
return("screen_name")
}
if (is_user_id(x)) {
return("user_id")
}
x <- suppressWarnings(is.na(as.numeric(x)))
if (length(unique(x)) > 1) {
return("screen_name")
}
x <- unique(x)
if (x) {
return("screen_name")
} else {
return("user_id")
}
}
##----------------------------------------------------------------------------##
## flatten data ##
##----------------------------------------------------------------------------##
flatten_rtweet <- function(x) {
lst <- vapply(x, is.list, logical(1))
x[lst] <- lapply(x[lst], vobs2string)
x
}
vobs2string <- function(x, sep = " ") {
x[lengths(x) == 0L] <- NA_character_
x[lengths(x) > 1L] <- vapply(
x[lengths(x) > 1L],
obs2string, sep = sep,
FUN.VALUE = character(1)
)
as.character(x)
}
obs2string <- function(x, sep) {
stopifnot(is.atomic(x))
if (all(is.na(x))) {
return(NA_character_)
}
x[is.na(x)] <- ""
paste(x, collapse = sep)
}
# Enables loading packages when necessary vs import
try_require <- function(pkg, f) {
if (requireNamespace(pkg, quietly = TRUE)) {
library(pkg, character.only = TRUE)
return(invisible())
}
stop("Package `", pkg, "` required for `", f , "`.\n",
"Please install and try again.", call. = FALSE)
}
is.valid.username <- function(username) {
!grepl(' ', username);
}
| /R/utils.R | no_license | saso008/rtweet | R | false | false | 12,832 | r |
##----------------------------------------------------------------------------##
## HTTP GET ##
##----------------------------------------------------------------------------##
#' TWIT
#'
#' @description Base function responsible for formulating GET and
#' POST requests to Twitter API's.
#'
#' @param get Logical with the default, \code{get = TRUE},
#' indicating whether the provided url should be passed along via
#' a GET or POST request.
#' @param url Character vector designed to operate like
#' parse_url and build_url functions in the httr package.
#' The easiest way to do this is to work through
#' the call-specific functions as they are designed to simplify
#' the process. However, if one were interested in reverse-
#' engingeering such a thing, I would recommend checking out
#' \code{make_url}.
#' @param \dots Further named parameters, such as config, token,
#' etc, passed on to modify_url in the httr package.
#' @note Occasionally Twitter does recommend using POST requests
#' for data retrieval calls. This is usually the case when requests
#' can involve long strings (containing up to 100 user_ids). For
#' the most part, or at least for any function-specific requests
#' (e.g., \code{get_friends}, take reflect these changes.
#' @return json response object
#' @importFrom httr GET POST timeout write_disk progress
#' @keywords internal
#' @noRd
TWIT <- function(get = TRUE, url, ...) {
if (get) {
GET(url, ...)
} else {
POST(url, ...)
}
}
#' make_url
#'
#' @param restapi logical Default \code{restapi = TRUE}
#' indicates the provided URL components should be
#' specify Twitter's REST API. Set this to FALSE if you wish
#' to make a request URL designed for Twitter's streaming api.
#' @param query Twitter's subsetting/topic identifiers.
#' Although the httr package refers to this as "path",
#' query is used here to maintain consistency with
#' Twitter API's excellent documentation.
#' @param param Additional parameters (arguments) passed
#' along. If none, NULL (default).
#' @return URL used in httr call.
#' @keywords internal
#' @noRd
make_url <- function(restapi = TRUE, query, param = NULL) {
if (restapi) {
hostname <- "api.twitter.com"
} else {
hostname <- "stream.twitter.com"
}
structure(
list(
scheme = "https",
hostname = hostname,
port = NULL,
path = paste0("1.1/", query, ".json"),
query = param,
params = NULL,
fragment = NULL,
username = NULL,
password = NULL),
class = "url")
}
##----------------------------------------------------------------------------##
## scroll ##
##----------------------------------------------------------------------------##
scroller <- function(url, n, n.times, type = NULL, ...) {
## check args
stopifnot(is_n(n), is_url(url))
## if missing set to 1
if (missing(n.times)) n.times <- 1L
## initialize vector and counter
x <- vector("list", n.times)
counter <- 0L
for (i in seq_along(x)) {
## send GET request
x[[i]] <- httr::GET(url, ...)
warn_for_twitter_status(x[[i]])
## if NULL (error) break
if (is.null(x[[i]])) break
## convert from json to R list
x[[i]] <- from_js(x[[i]])
## if length of x or len of statuses == 0, break
if (any(length(x[[i]]) == 0L,
all("statuses" %in% names(x[[i]]),
length(x[[i]][['statuses']]) == 0L))) {
break
}
if (has_name_(x[[i]], "errors")) {
warning(x[[i]]$errors[["message"]], call. = FALSE)
x[[i]] <- list(data.frame())
break
}
## if reach counter, break
counter <- counter +
as.numeric(unique_id_count(x[[i]], type = type))
if (counter > n) break
## check other possible fails
if (break_check(x[[i]], url)) break
## if cursor in URL then update otherwise use max id
if ("cursor" %in% names(url$query)) {
url$query$cursor <- get_max_id(x[[i]])
} else {
url$query$max_id <- get_max_id(x[[i]])
}
}
## drop NULLs
if (is.null(names(x))) {
x <- x[lengths(x) > 0]
}
x
}
unique_id_count <- function(x, type = NULL) {
if (!is.null(type)) {
if (type == "search") return(100)
if (type == "timeline") return(200)
if (type == "followers") return(5000)
}
if (isTRUE(length(x) > 1L)) {
if (!is.null(names(x[[2]]))) {
x <- unlist(
lapply(x, unique_id), use.names = FALSE
)
} else {
x <- unique_id(x)
}
} else {
x <- unique_id(x)
}
if (any(is.null(x), identical(length(x), 0L))) {
return(0)
}
length(unique(x))
}
unique_id <- function(x) {
if ("statuses" %in% tolower(names(x))) {
x <- x[["statuses"]]
}
if ("id_str" %in% names(x)) {
x[["id_str"]]
} else if ("ids" %in% names(x)) {
x[["ids"]]
} else if ("id" %in% names(x)) {
x[["id"]]
} else if ("ids" %in% names(x[[1]])) {
x[[1]][["ids"]]
} else if ("status_id" %in% names(x)) {
x[["status_id"]]
} else if ("user_id" %in% names(x)) {
x[["user_id"]]
}
}
break_check <- function(r, url, count = NULL) {
if (!is.null(count)) {
if (as.numeric(count) <= 0) return(TRUE)
}
if (is.null(r)) return(TRUE)
x <- get_max_id(r)
if (is.null(x)) return(TRUE)
if (any(identical(x, 0), identical(x, "0"))) return(TRUE)
if ("max_id" %in% names(url$query)) {
if (is.null(url$query$max_id)) return(FALSE)
if (identical(as.character(x),
as.character(url$query$max_id))) {
return(TRUE)
}
}
FALSE
}
##----------------------------------------------------------------------------##
## format_date ##
##----------------------------------------------------------------------------#
format_date <- function(x, tz = "UTC") {
o <- tryCatch(
as.POSIXct(
x,
format = "%a %b %d %T %z %Y",
tz = tz
),
error = function(e) return(NULL)
)
if (any(is.null(o), all(is.na.quiet(o)))) {
o <- tryCatch(as.POSIXct(
x,
format = "%a %b %d %H:%M:%S %z %Y",
tz = tz,
origin = "1970-01-01"),
error = function(e) return(NULL))
}
if (any(is.null(o), all(is.na.quiet(o)))) {
o <- tryCatch(as.POSIXct(
x,
format = "%a %b %d %H:%M:%S %z %Y"),
error = function(e) return(NULL))
}
if (any(is.null(o), all(is.na.quiet(o)))) {
curLocale <- Sys.getlocale("LC_TIME")
on.exit(
Sys.setlocale("LC_TIME", curLocale)
##add = TRUE
)
Sys.setlocale("LC_TIME", "C")
o <- tryCatch(as.POSIXct(
x,
tz = tz,
format = "%a, %d %b %Y %H:%M:%S +0000"),
error = function(e) return(NULL)
)
}
if (any(is.null(o), all(is.na.quiet(o)))) {
o <- tryCatch(as.POSIXct(
x, tz = tz,
format = "%a %b %d %H:%M:%S +0000 %Y"),
error = function(e) return(NULL))
}
if (any(is.null(o), all(is.na.quiet(o)))) {
o <- tryCatch(as.POSIXct(
x, format = "%a %b %d %H:%M:%S %z %Y"),
error = function(e) return(NULL))
}
if (any(is.null(o), all(is.na.quiet(o)))) {
o <- x
}
o
}
##----------------------------------------------------------------------------##
## fetch/return features ##
##----------------------------------------------------------------------------##
go_get_var <- function(x, ..., expect_n = NULL) {
vars <- c(...)
success <- FALSE
for (i in vars) {
if (!is.recursive(x)) break
if (has_name_(x, i)) {
x <- x[[i]]
if (i == vars[length(vars)]) {
success <- TRUE
}
} else if (any_recursive(x) && any(sapply(x, has_name_, i))) {
kp <- sapply(x, has_name_, i)
x <- x[kp]
x <- lapply(x, "[[", i)
if (i == vars[length(vars)]) {
success <- TRUE
}
}
}
if (!success && is.null(expect_n)) {
return(NULL)
}
if (any_recursive(x) && is.null(expect_n)) {
return(x)
}
x <- unlist(x)
if (!is.null(expect_n) && length(x) < expect_n) {
x <- c(x, rep(NA, expect_n - length(x)))
}
x
}
all_uq_names <- function(x) {
unique(unlist(lapply(x, names)))
}
return_last <- function(x, n = 1) {
x[length(x) - seq_len(n) + 1]
}
##----------------------------------------------------------------------------##
## check data ##
##----------------------------------------------------------------------------##
has_name_ <- function(x, ...) {
vars <- c(...)
stopifnot(is.character(vars))
if (!is.recursive(x)) {
return(FALSE)
}
all(vars %in% names(x))
}
any_recursive <- function(x) {
if (!is.recursive(x)) {
return(FALSE)
}
any(vapply(x, is.recursive, logical(1)))
}
is_response <- function(x) {
inherits(x, "response")
}
is_json <- function(x) {
grepl("application/json", x$headers[["content-type"]])
}
is.na.quiet <- function(x) {
suppressWarnings(is.na(x))
}
is_n <- function(n) {
if (is.character(n)) {
n <- suppressWarnings(as.numeric(n))
}
length(n) == 1L && is.numeric(n) && !is.na(n) && n > 0L
}
maybe_n <- function(x) {
if (is.character(x)) {
x <- suppressWarnings(as.numeric(x))
}
length(x) == 1L && is.numeric(x) && !is.na(x)
}
is_url <- function(url) {
url_names <- c("scheme", "hostname",
"port", "path", "query")
if (all(length(url) > 1, is.list(url),
url_names %in% names(url))) {
return(TRUE)
} else {
return(FALSE)
}
}
##----------------------------------------------------------------------------##
## wranglers ##
##----------------------------------------------------------------------------##
#' @importFrom jsonlite fromJSON
from_js <- function(rsp) {
stopifnot(is_response(rsp))
if (!is_json(rsp)) {
stop("API did not return json", call. = FALSE)
}
rsp <- httr::content(rsp, as = "text", encoding = "UTF-8")
rsp <- jsonlite::fromJSON(rsp)
if ("statuses" %in% names(rsp) && "full_text" %in% names(rsp$statuses)) {
names(rsp[["statuses"]])[names(rsp[["statuses"]]) == "text"] <- "texttrunc"
names(rsp[["statuses"]])[names(rsp[["statuses"]]) == "full_text"] <- "text"
} else if ("status" %in% names(rsp) && "full_text" %in% names(rsp$status)) {
names(rsp[["status"]])[names(rsp[["status"]]) == "text"] <- "texttrunc"
names(rsp[["status"]])[names(rsp[["status"]]) == "full_text"] <- "text"
} else if ("full_text" %in% names(rsp)) {
names(rsp)[names(rsp) == "text"] <- "texttrunc"
names(rsp)[names(rsp) == "full_text"] <- "text"
}
rsp
}
na_omit <- function(x) {
if (is.atomic(x)) {
x[!is.na(x)]
} else {
x[!vapply(x, function(x) isTRUE(is.na(x)), FUN.VALUE = logical(1))]
}
}
##----------------------------------------------------------------------------##
## user type classifers ##
##----------------------------------------------------------------------------##
.ids_type <- function(x) {
if (is.list(x)) x <- unlist(x, use.names = FALSE)
x <- .id_type(x)
if (length(unique(x)) > 1) {
stop("users must be user_ids OR screen_names, not both.")
}
unique(x)
}
.id_type <- function(x) {
if (is_screen_name(x)) {
return("screen_name")
}
if (is_user_id(x)) {
return("user_id")
}
x <- suppressWarnings(is.na(as.numeric(x)))
if (length(unique(x)) > 1) {
return("screen_name")
}
x <- unique(x)
if (x) {
return("screen_name")
} else {
return("user_id")
}
}
##----------------------------------------------------------------------------##
## flatten data ##
##----------------------------------------------------------------------------##
flatten_rtweet <- function(x) {
lst <- vapply(x, is.list, logical(1))
x[lst] <- lapply(x[lst], vobs2string)
x
}
vobs2string <- function(x, sep = " ") {
x[lengths(x) == 0L] <- NA_character_
x[lengths(x) > 1L] <- vapply(
x[lengths(x) > 1L],
obs2string, sep = sep,
FUN.VALUE = character(1)
)
as.character(x)
}
obs2string <- function(x, sep) {
stopifnot(is.atomic(x))
if (all(is.na(x))) {
return(NA_character_)
}
x[is.na(x)] <- ""
paste(x, collapse = sep)
}
# Enables loading packages when necessary vs import
try_require <- function(pkg, f) {
if (requireNamespace(pkg, quietly = TRUE)) {
library(pkg, character.only = TRUE)
return(invisible())
}
stop("Package `", pkg, "` required for `", f , "`.\n",
"Please install and try again.", call. = FALSE)
}
is.valid.username <- function(username) {
!grepl(' ', username);
}
|
#' change the document units of a corpus
#'
#' For a corpus, recast the documents down or up a level of aggregation. "Down"
#' would mean going from documents to sentences, for instance. "Up" means from
#' sentences back to documents. This makes it easy to reshape a corpus from a
#' collection of documents into a collection of sentences, for instance.
#' (Because the corpus object records its current "units" status, there is no
#' \code{from} option, only \code{to}.)
#'
#' Note: Only recasting down currently works, but upward recasting is planned.
#' @param x corpus whose document units will be reshaped
#' @param to new documents units for the corpus to be recast in
#' @param ... not used
#' @return A corpus object with the documents defined as the new units,
#' including document-level meta-data identifying the original documents.
#' @examples
#' # simple example
#' mycorpus <- corpus(c(textone = "This is a sentence. Another sentence. Yet another.",
#' textwo = "Premiere phrase. Deuxieme phrase."),
#' docvars = data.frame(country=c("UK", "USA"), year=c(1990, 2000)),
#' metacorpus = list(notes = "Example showing how corpus_reshape() works."))
#' summary(mycorpus)
#' summary(corpus_reshape(mycorpus, to = "sentences"), showmeta=TRUE)
#'
#' # example with inaugural corpus speeches
#' (mycorpus2 <- corpus_subset(data_corpus_inaugural, Year>2004))
#' paragCorpus <- corpus_reshape(mycorpus2, to="paragraphs")
#' paragCorpus
#' summary(paragCorpus, 100, showmeta=TRUE)
#' ## Note that Bush 2005 is recorded as a single paragraph because that text used a single
#' ## \n to mark the end of a paragraph.
#' @export
#' @keywords corpus
corpus_reshape <- function(x, to = c("sentences", "paragraphs", "documents"), ...) {
UseMethod("corpus_reshape")
}
#' @noRd
#' @rdname corpus_reshape
#' @importFrom data.table data.table setnames
#' @export
corpus_reshape.corpus <- function(x, to = c("sentences", "paragraphs", "documents"), ...) {
document <- NULL
if (as.character(match.call()[[1]]) == "changeunits")
.Deprecated("corpus_reshape")
to <- match.arg(to)
if (length(addedArgs <- names(list(...))))
warning("Argument", ifelse(length(addedArgs)>1, "s ", " "), names(addedArgs), " not used.", sep = "")
if (to == "documents") {
if (settings(x, "unitsoriginal") != "documents" & !(settings(x, "units") %in% c("sentences")))
stop("reshape to documents only goes from sentences to documents")
if (settings(x, "units") == "paragraphs") {
spacer <- "\n\n"
} else {
spacer <- " "
}
# reshape into original documents, replace the original text
docs <- data.table(x$documents)
setnames(docs, "_document", "document")
# take just first value of every (repeated) docvar
docs <- docs[, lapply(.SD, function(x) x[1]), by = document]
# concatenate texts
docs[, texts := texts(x, groups = metadoc(x, "document"), spacer = spacer)]
# make the text "empty" if it contains only spaces
docs[stringi::stri_detect_regex(texts, "^\\s+$"), texts := ""]
# remove reshape fields
docs[, "_serialno" := NULL]
newcorpus <- x
newcorpus$documents <- as.data.frame(docs[, -which(names(docs) == "document"), with = FALSE])
rownames(newcorpus$documents) <- docs$document
} else {
# make the new corpus
segmentedTexts <- lapply(texts(x), char_segment, what = to)
lengthSegments <- sapply(segmentedTexts, length)
newcorpus <- corpus(unlist(segmentedTexts))
# repeat the docvars and existing document metadata
docvars(newcorpus, names(docvars(x))) <- as.data.frame(lapply(docvars(x), rep, lengthSegments))
docvars(newcorpus, names(metadoc(x))) <- as.data.frame(lapply(metadoc(x), rep, lengthSegments))
# add original document name as metadata
metadoc(newcorpus, "document") <- rep(names(segmentedTexts), lengthSegments)
# give a serial number (within document) to each sentence
sentenceid <- lapply(lengthSegments, function(n) seq(from=1, to=n))
metadoc(newcorpus, "serialno") <- unlist(sentenceid, use.names=FALSE)
}
# copy settings and corpus metadata
newcorpus$settings <- x$settings
newcorpus$metadata <- x$metadata
# modify settings flag for corpus_reshape info
settings(newcorpus, "unitsoriginal") <- settings(newcorpus, "units")
settings(newcorpus, "units") <- to
newcorpus
}
#' deprecated name for corpus_reshape
#'
#' The deprecated function name for what is now \code{\link{corpus_reshape}}.
#' Please use that instead.
#' @param ... all arguments
#' @keywords internal deprecated
#' @export
changeunits <- function(...) {
.Deprecated("corpus_reshape")
UseMethod("corpus_reshape")
}
# helper function: rep method for a repeat a data.frame
# Example:
# rep(data.frame(one = 1:2, two = c("a", "b")), 2)
# ## $one
# ## [1] 1 2
# ##
# ## $two
# ## [1] a b
# ## Levels: a b
# ##
# ## $one
# ##
# ## [1] 1 2
# ## $two
# ## [1] a b
# ## Levels: a b
rep.data.frame <- function(x, ...)
as.data.frame(lapply(x, rep, ...))
| /R/corpus_reshape.R | no_license | plablo09/quanteda | R | false | false | 5,352 | r | #' change the document units of a corpus
#'
#' For a corpus, recast the documents down or up a level of aggregation. "Down"
#' would mean going from documents to sentences, for instance. "Up" means from
#' sentences back to documents. This makes it easy to reshape a corpus from a
#' collection of documents into a collection of sentences, for instance.
#' (Because the corpus object records its current "units" status, there is no
#' \code{from} option, only \code{to}.)
#'
#' Note: Only recasting down currently works, but upward recasting is planned.
#' @param x corpus whose document units will be reshaped
#' @param to new documents units for the corpus to be recast in
#' @param ... not used
#' @return A corpus object with the documents defined as the new units,
#' including document-level meta-data identifying the original documents.
#' @examples
#' # simple example
#' mycorpus <- corpus(c(textone = "This is a sentence. Another sentence. Yet another.",
#' textwo = "Premiere phrase. Deuxieme phrase."),
#' docvars = data.frame(country=c("UK", "USA"), year=c(1990, 2000)),
#' metacorpus = list(notes = "Example showing how corpus_reshape() works."))
#' summary(mycorpus)
#' summary(corpus_reshape(mycorpus, to = "sentences"), showmeta=TRUE)
#'
#' # example with inaugural corpus speeches
#' (mycorpus2 <- corpus_subset(data_corpus_inaugural, Year>2004))
#' paragCorpus <- corpus_reshape(mycorpus2, to="paragraphs")
#' paragCorpus
#' summary(paragCorpus, 100, showmeta=TRUE)
#' ## Note that Bush 2005 is recorded as a single paragraph because that text used a single
#' ## \n to mark the end of a paragraph.
#' @export
#' @keywords corpus
corpus_reshape <- function(x, to = c("sentences", "paragraphs", "documents"), ...) {
UseMethod("corpus_reshape")
}
#' @noRd
#' @rdname corpus_reshape
#' @importFrom data.table data.table setnames
#' @export
corpus_reshape.corpus <- function(x, to = c("sentences", "paragraphs", "documents"), ...) {
document <- NULL
if (as.character(match.call()[[1]]) == "changeunits")
.Deprecated("corpus_reshape")
to <- match.arg(to)
if (length(addedArgs <- names(list(...))))
warning("Argument", ifelse(length(addedArgs)>1, "s ", " "), names(addedArgs), " not used.", sep = "")
if (to == "documents") {
if (settings(x, "unitsoriginal") != "documents" & !(settings(x, "units") %in% c("sentences")))
stop("reshape to documents only goes from sentences to documents")
if (settings(x, "units") == "paragraphs") {
spacer <- "\n\n"
} else {
spacer <- " "
}
# reshape into original documents, replace the original text
docs <- data.table(x$documents)
setnames(docs, "_document", "document")
# take just first value of every (repeated) docvar
docs <- docs[, lapply(.SD, function(x) x[1]), by = document]
# concatenate texts
docs[, texts := texts(x, groups = metadoc(x, "document"), spacer = spacer)]
# make the text "empty" if it contains only spaces
docs[stringi::stri_detect_regex(texts, "^\\s+$"), texts := ""]
# remove reshape fields
docs[, "_serialno" := NULL]
newcorpus <- x
newcorpus$documents <- as.data.frame(docs[, -which(names(docs) == "document"), with = FALSE])
rownames(newcorpus$documents) <- docs$document
} else {
# make the new corpus
segmentedTexts <- lapply(texts(x), char_segment, what = to)
lengthSegments <- sapply(segmentedTexts, length)
newcorpus <- corpus(unlist(segmentedTexts))
# repeat the docvars and existing document metadata
docvars(newcorpus, names(docvars(x))) <- as.data.frame(lapply(docvars(x), rep, lengthSegments))
docvars(newcorpus, names(metadoc(x))) <- as.data.frame(lapply(metadoc(x), rep, lengthSegments))
# add original document name as metadata
metadoc(newcorpus, "document") <- rep(names(segmentedTexts), lengthSegments)
# give a serial number (within document) to each sentence
sentenceid <- lapply(lengthSegments, function(n) seq(from=1, to=n))
metadoc(newcorpus, "serialno") <- unlist(sentenceid, use.names=FALSE)
}
# copy settings and corpus metadata
newcorpus$settings <- x$settings
newcorpus$metadata <- x$metadata
# modify settings flag for corpus_reshape info
settings(newcorpus, "unitsoriginal") <- settings(newcorpus, "units")
settings(newcorpus, "units") <- to
newcorpus
}
#' deprecated name for corpus_reshape
#'
#' The deprecated function name for what is now \code{\link{corpus_reshape}}.
#' Please use that instead.
#' @param ... all arguments
#' @keywords internal deprecated
#' @export
changeunits <- function(...) {
.Deprecated("corpus_reshape")
UseMethod("corpus_reshape")
}
# helper function: rep method for a repeat a data.frame
# Example:
# rep(data.frame(one = 1:2, two = c("a", "b")), 2)
# ## $one
# ## [1] 1 2
# ##
# ## $two
# ## [1] a b
# ## Levels: a b
# ##
# ## $one
# ##
# ## [1] 1 2
# ## $two
# ## [1] a b
# ## Levels: a b
rep.data.frame <- function(x, ...)
as.data.frame(lapply(x, rep, ...))
|
library(TSclust)
### Name: diss.CORT
### Title: Dissimilarity Index Combining Temporal Correlation and Raw
### Values Behaviors
### Aliases: diss.CORT
### Keywords: ~kwd1 ~kwd2
### ** Examples
## Create three sample time series
x <- cumsum(rnorm(100))
y <- cumsum(rnorm(100))
z <- sin(seq(0, pi, length.out=100))
## Compute the distance and check for coherent results
diss.CORT(x, y, 2)
diss.CORT(x, z, 2)
diss.CORT(y, z, 2)
#create a dist object for its use with clustering functions like pam or hclust
## Not run:
##D diss( rbind(x,y,z), "CORT", k=3, deltamethod="DTW")
## End(Not run)
| /data/genthat_extracted_code/TSclust/examples/diss.CORT.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 598 | r | library(TSclust)
### Name: diss.CORT
### Title: Dissimilarity Index Combining Temporal Correlation and Raw
### Values Behaviors
### Aliases: diss.CORT
### Keywords: ~kwd1 ~kwd2
### ** Examples
## Create three sample time series
x <- cumsum(rnorm(100))
y <- cumsum(rnorm(100))
z <- sin(seq(0, pi, length.out=100))
## Compute the distance and check for coherent results
diss.CORT(x, y, 2)
diss.CORT(x, z, 2)
diss.CORT(y, z, 2)
#create a dist object for its use with clustering functions like pam or hclust
## Not run:
##D diss( rbind(x,y,z), "CORT", k=3, deltamethod="DTW")
## End(Not run)
|
sys = Sys.info()
flag_local = as.list(sys)$user == "sunxd"
mconf.file = NULL
if (flag_local) {
reticulate::use_python("~/anaconda3/bin/python")
mconf.file = NA
} else {
reticulate::use_condaenv("w_env")
mconf.file = "lrz.batchtools.conf.R"
}
| /benchmark/system.R | no_license | smilesun/reinbo | R | false | false | 252 | r | sys = Sys.info()
flag_local = as.list(sys)$user == "sunxd"
mconf.file = NULL
if (flag_local) {
reticulate::use_python("~/anaconda3/bin/python")
mconf.file = NA
} else {
reticulate::use_condaenv("w_env")
mconf.file = "lrz.batchtools.conf.R"
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.function.R
\name{is.function}
\alias{is.function}
\title{fun_name}
\usage{
is.function(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
| /man/is.function.Rd | no_license | granatb/RapeR | R | false | true | 259 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.function.R
\name{is.function}
\alias{is.function}
\title{fun_name}
\usage{
is.function(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
|
complete <- function(directory, id=1:332){
#filelist stores the list of all csv files present in specdata folder
filelist <- list.files(path=directory, pattern=".csv")
#totalvec stores the total number of complete cases in all csv files
totalvec <- numeric()
#run a for-loop to iterate through all 332 csv files in specdata and store in numeric() vector
for(i in id) {
#store csv file temporarly in data var
data <- read.csv(filelist[i])
#totalvec contains completecases of all csv files
totalvec <- c(totalvec, sum(complete.cases(data)))
}
#create a dataframe from list totalvec
data.frame(id, totalvec)
}
| /specdata/complete.R | no_license | ra753/R-files | R | false | false | 665 | r | complete <- function(directory, id=1:332){
#filelist stores the list of all csv files present in specdata folder
filelist <- list.files(path=directory, pattern=".csv")
#totalvec stores the total number of complete cases in all csv files
totalvec <- numeric()
#run a for-loop to iterate through all 332 csv files in specdata and store in numeric() vector
for(i in id) {
#store csv file temporarly in data var
data <- read.csv(filelist[i])
#totalvec contains completecases of all csv files
totalvec <- c(totalvec, sum(complete.cases(data)))
}
#create a dataframe from list totalvec
data.frame(id, totalvec)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oauth2_objects.R
\name{Tokeninfo}
\alias{Tokeninfo}
\title{Tokeninfo Object}
\usage{
Tokeninfo(access_type = NULL, audience = NULL, email = NULL,
email_verified = NULL, expires_in = NULL, issued_at = NULL,
issued_to = NULL, issuer = NULL, nonce = NULL, scope = NULL,
user_id = NULL, verified_email = NULL)
}
\arguments{
\item{access_type}{The access type granted with this token}
\item{audience}{Who is the intended audience for this token}
\item{email}{The email address of the user}
\item{email_verified}{Boolean flag which is true if the email address is verified}
\item{expires_in}{The expiry time of the token, as number of seconds left until expiry}
\item{issued_at}{The issue time of the token, as number of seconds}
\item{issued_to}{To whom was the token issued to}
\item{issuer}{Who issued the token}
\item{nonce}{Nonce of the id token}
\item{scope}{The space separated list of scopes granted to this token}
\item{user_id}{The obfuscated user id}
\item{verified_email}{Boolean flag which is true if the email address is verified}
}
\value{
Tokeninfo object
}
\description{
Tokeninfo Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
| /googleoauth2v1.auto/man/Tokeninfo.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 1,291 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oauth2_objects.R
\name{Tokeninfo}
\alias{Tokeninfo}
\title{Tokeninfo Object}
\usage{
Tokeninfo(access_type = NULL, audience = NULL, email = NULL,
email_verified = NULL, expires_in = NULL, issued_at = NULL,
issued_to = NULL, issuer = NULL, nonce = NULL, scope = NULL,
user_id = NULL, verified_email = NULL)
}
\arguments{
\item{access_type}{The access type granted with this token}
\item{audience}{Who is the intended audience for this token}
\item{email}{The email address of the user}
\item{email_verified}{Boolean flag which is true if the email address is verified}
\item{expires_in}{The expiry time of the token, as number of seconds left until expiry}
\item{issued_at}{The issue time of the token, as number of seconds}
\item{issued_to}{To whom was the token issued to}
\item{issuer}{Who issued the token}
\item{nonce}{Nonce of the id token}
\item{scope}{The space separated list of scopes granted to this token}
\item{user_id}{The obfuscated user id}
\item{verified_email}{Boolean flag which is true if the email address is verified}
}
\value{
Tokeninfo object
}
\description{
Tokeninfo Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
|
library(RCurl)
existing_cases_file <- getURL("https://docs.google.com/spreadsheets/d/1X5Jp7Q8pTs3KLJ5JBWKhncVACGsg5v4xu6badNs4C7I/pub?gid=0&output=csv")
existing_df <- read.csv(text = existing_cases_file, row.names=1, stringsAsFactor=F)
existing_df[c(1,2,3,4,5,6,15,16,17,18)] <-
lapply( existing_df[c(1,2,3,4,5,6,15,16,17,18)],
function(x) { as.integer(gsub(',', '', x) )})
pca_existing <- prcomp(existing_df, scale. = TRUE)
plot(pca_existing)
scores_existing_df <- as.data.frame(pca_existing$x)
# Show first two PCs for head countries
head(scores_existing_df[1:2])
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid")
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8)
library(scales)
ramp <- colorRamp(c("yellow", "blue"))
colours_by_mean <- rgb(
ramp( as.vector(rescale(rowMeans(existing_df),c(0,1)))),
max = 255 )
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=colours_by_mean)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=colours_by_mean)
ramp <- colorRamp(c("yellow", "blue"))
colours_by_sum <- rgb(
ramp( as.vector(rescale(rowSums(existing_df),c(0,1)))),
max = 255 )
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=colours_by_sum)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=colours_by_sum)
existing_df_change <- existing_df$X2007 - existing_df$X1990
ramp <- colorRamp(c("yellow", "blue"))
colours_by_change <- rgb(
ramp( as.vector(rescale(existing_df_change,c(0,1)))),
max = 255 )
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=colours_by_change)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=colours_by_change)
# K clustering
set.seed(1234)
existing_clustering <- kmeans(existing_df, centers = 3)
existing_cluster_groups <- existing_clustering$cluster
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=existing_cluster_groups)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=existing_cluster_groups)
set.seed(1234)
existing_clustering <- kmeans(existing_df, centers = 4)
existing_cluster_groups <- existing_clustering$cluster
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=existing_cluster_groups)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=existing_cluster_groups)
set.seed(1234)
existing_clustering <- kmeans(existing_df, centers = 5)
existing_cluster_groups <- existing_clustering$cluster
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=existing_cluster_groups)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=existing_cluster_groups)
set.seed(1234)
existing_clustering <- kmeans(existing_df, centers = 6)
existing_cluster_groups <- existing_clustering$cluster
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=existing_cluster_groups)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=existing_cluster_groups)
set.seed(1234)
existing_clustering <- kmeans(existing_df, centers = 5)
existing_cluster_groups <- existing_clustering$cluster
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=existing_cluster_groups)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=existing_cluster_groups)
#Cluster Interpretation
existing_df$cluster <- existing_clustering$cluster
table(existing_df$cluster)
xrange <- 1990:2007
plot(xrange, existing_clustering$centers[1,],
type='l', xlab="Year",
ylab="New cases per 100K",
col = 1,
ylim=c(0,1000))
for (i in 2:nrow(existing_clustering$centers)) {
lines(xrange, existing_clustering$centers[i,],
col = i)
}
legend(x=1990, y=1000,
lty=1, cex = 0.5,
ncol = 5,
col=1:(nrow(existing_clustering$centers)+1),
legend=paste("Cluster",1:nrow(existing_clustering$centers)))
# Cluster 1
# Cluster 1 contains just 16 countries. These are:
rownames(subset(existing_df, cluster==1))
# The centroid that represents them is:
existing_clustering$centers[1,]
# Cluster 2
# Cluster 2 contains 30 countries. These are:
rownames(subset(existing_df, cluster==2))
# The centroid that represents them is:
existing_clustering$centers[2,]
# Cluster 3
# This is an important one. Cluster 3 contains just 20 countries. These are:
rownames(subset(existing_df, cluster==3))
# The centroid that represents them is:
existing_clustering$centers[3,]
# Cluster 4
# The fourth cluster contains 51 countries.
rownames(subset(existing_df, cluster==4))
# The centroid that represents them is:
existing_clustering$centers[4,]
# Cluster 5
# The last and bigger cluster contains 90 countries.
rownames(subset(existing_df, cluster==5))
# The centroid that represents them is:
existing_clustering$centers[5,]
# A Second Level of Clustering
# subset the original dataset
cluster5_df <- subset(existing_df, cluster==5)
# do the clustering
set.seed(1234)
cluster5_clustering <- kmeans(cluster5_df[,-19], centers = 2)
# assign sub-cluster number to the data set for Cluster 5
cluster5_df$cluster <- cluster5_clustering$cluster
xrange <- 1990:2007
plot(xrange, cluster5_clustering$centers[1,],
type='l', xlab="Year",
ylab="Existing cases per 100K",
col = 1,
ylim=c(0,200))
for (i in 2:nrow(cluster5_clustering$centers)) {
lines(xrange, cluster5_clustering$centers[i,],
col = i)
}
legend(x=1990, y=200,
lty=1, cex = 0.5,
ncol = 5,
col=1:(nrow(cluster5_clustering$centers)+1),
legend=paste0("Cluster 5.",1:nrow(cluster5_clustering$centers)))
rownames(subset(cluster5_df, cluster5_df$cluster==2))
rownames(subset(cluster5_df, cluster5_df$cluster==1))
| /Homeworks/Homework9/PURVACHAR_HW9_R.R | permissive | RashmiPurvachar/big-data-python-class | R | false | false | 6,445 | r | library(RCurl)
existing_cases_file <- getURL("https://docs.google.com/spreadsheets/d/1X5Jp7Q8pTs3KLJ5JBWKhncVACGsg5v4xu6badNs4C7I/pub?gid=0&output=csv")
existing_df <- read.csv(text = existing_cases_file, row.names=1, stringsAsFactor=F)
existing_df[c(1,2,3,4,5,6,15,16,17,18)] <-
lapply( existing_df[c(1,2,3,4,5,6,15,16,17,18)],
function(x) { as.integer(gsub(',', '', x) )})
pca_existing <- prcomp(existing_df, scale. = TRUE)
plot(pca_existing)
scores_existing_df <- as.data.frame(pca_existing$x)
# Show first two PCs for head countries
head(scores_existing_df[1:2])
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid")
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8)
library(scales)
ramp <- colorRamp(c("yellow", "blue"))
colours_by_mean <- rgb(
ramp( as.vector(rescale(rowMeans(existing_df),c(0,1)))),
max = 255 )
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=colours_by_mean)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=colours_by_mean)
ramp <- colorRamp(c("yellow", "blue"))
colours_by_sum <- rgb(
ramp( as.vector(rescale(rowSums(existing_df),c(0,1)))),
max = 255 )
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=colours_by_sum)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=colours_by_sum)
existing_df_change <- existing_df$X2007 - existing_df$X1990
ramp <- colorRamp(c("yellow", "blue"))
colours_by_change <- rgb(
ramp( as.vector(rescale(existing_df_change,c(0,1)))),
max = 255 )
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=colours_by_change)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=colours_by_change)
# K clustering
set.seed(1234)
existing_clustering <- kmeans(existing_df, centers = 3)
existing_cluster_groups <- existing_clustering$cluster
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=existing_cluster_groups)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=existing_cluster_groups)
set.seed(1234)
existing_clustering <- kmeans(existing_df, centers = 4)
existing_cluster_groups <- existing_clustering$cluster
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=existing_cluster_groups)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=existing_cluster_groups)
set.seed(1234)
existing_clustering <- kmeans(existing_df, centers = 5)
existing_cluster_groups <- existing_clustering$cluster
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=existing_cluster_groups)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=existing_cluster_groups)
set.seed(1234)
existing_clustering <- kmeans(existing_df, centers = 6)
existing_cluster_groups <- existing_clustering$cluster
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=existing_cluster_groups)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=existing_cluster_groups)
set.seed(1234)
existing_clustering <- kmeans(existing_df, centers = 5)
existing_cluster_groups <- existing_clustering$cluster
plot(PC1~PC2, data=scores_existing_df,
main= "Existing TB cases per 100K distribution",
cex = .1, lty = "solid", col=existing_cluster_groups)
text(PC1~PC2, data=scores_existing_df,
labels=rownames(existing_df),
cex=.8, col=existing_cluster_groups)
#Cluster Interpretation
existing_df$cluster <- existing_clustering$cluster
table(existing_df$cluster)
xrange <- 1990:2007
plot(xrange, existing_clustering$centers[1,],
type='l', xlab="Year",
ylab="New cases per 100K",
col = 1,
ylim=c(0,1000))
for (i in 2:nrow(existing_clustering$centers)) {
lines(xrange, existing_clustering$centers[i,],
col = i)
}
legend(x=1990, y=1000,
lty=1, cex = 0.5,
ncol = 5,
col=1:(nrow(existing_clustering$centers)+1),
legend=paste("Cluster",1:nrow(existing_clustering$centers)))
# Cluster 1
# Cluster 1 contains just 16 countries. These are:
rownames(subset(existing_df, cluster==1))
# The centroid that represents them is:
existing_clustering$centers[1,]
# Cluster 2
# Cluster 2 contains 30 countries. These are:
rownames(subset(existing_df, cluster==2))
# The centroid that represents them is:
existing_clustering$centers[2,]
# Cluster 3
# This is an important one. Cluster 3 contains just 20 countries. These are:
rownames(subset(existing_df, cluster==3))
# The centroid that represents them is:
existing_clustering$centers[3,]
# Cluster 4
# The fourth cluster contains 51 countries.
rownames(subset(existing_df, cluster==4))
# The centroid that represents them is:
existing_clustering$centers[4,]
# Cluster 5
# The last and bigger cluster contains 90 countries.
rownames(subset(existing_df, cluster==5))
# The centroid that represents them is:
existing_clustering$centers[5,]
# A Second Level of Clustering
# subset the original dataset
cluster5_df <- subset(existing_df, cluster==5)
# do the clustering
set.seed(1234)
cluster5_clustering <- kmeans(cluster5_df[,-19], centers = 2)
# assign sub-cluster number to the data set for Cluster 5
cluster5_df$cluster <- cluster5_clustering$cluster
xrange <- 1990:2007
plot(xrange, cluster5_clustering$centers[1,],
type='l', xlab="Year",
ylab="Existing cases per 100K",
col = 1,
ylim=c(0,200))
for (i in 2:nrow(cluster5_clustering$centers)) {
lines(xrange, cluster5_clustering$centers[i,],
col = i)
}
legend(x=1990, y=200,
lty=1, cex = 0.5,
ncol = 5,
col=1:(nrow(cluster5_clustering$centers)+1),
legend=paste0("Cluster 5.",1:nrow(cluster5_clustering$centers)))
rownames(subset(cluster5_df, cluster5_df$cluster==2))
rownames(subset(cluster5_df, cluster5_df$cluster==1))
|
\name{icajade}
\alias{icajade}
\title{
ICA via JADE Algorithm
}
\description{
Computes ICA decomposition using Cardoso and Souloumiac's (1993, 1996) Joint Approximate Diagonalization of Eigenmatrices (JADE) approach.
}
\usage{
icajade(X, nc, center = TRUE, maxit = 100, tol = 1e-6, Rmat = diag(nc))
}
\arguments{
\item{X}{
Data matrix with \code{n} rows (samples) and \code{p} columns (variables).
}
\item{nc}{
Number of components to extract.
}
\item{center}{
If \code{TRUE}, columns of \code{X} are mean-centered before ICA decomposition.
}
\item{maxit}{
Maximum number of algorithm iterations to allow.
}
\item{tol}{
Convergence tolerance.
}
\item{Rmat}{
Initial estimate of the \code{nc}-by-\code{nc} orthogonal rotation matrix.
}
}
\value{
\item{S}{Matrix of source signal estimates (\code{S = Y \%*\% R}).}
\item{M}{Estimated mixing matrix.}
\item{W}{Estimated unmixing matrix (\code{W = crossprod(R, Q)}).}
\item{Y}{Whitened data matrix.}
\item{Q}{Whitening matrix.}
\item{R}{Orthogonal rotation matrix.}
\item{vafs}{Variance-accounted-for by each component.}
\item{iter}{Number of algorithm iterations.}
\item{converged}{Logical indicating if algorithm converged.}
}
\references{
Cardoso, J.F., & Souloumiac, A. (1993). Blind beamforming for non-Gaussian signals. \emph{IEE Proceedings-F, 140}(6), 362-370. \doi{https://doi.org/10.1049/ip-f-2.1993.0054}
Cardoso, J.F., & Souloumiac, A. (1996). Jacobi angles for simultaneous diagonalization. \emph{SIAM Journal on Matrix Analysis and Applications, 17}(1), 161-164. \doi{10.1137/S0895479893259546}
Helwig, N.E. & Hong, S. (2013). A critique of Tensor Probabilistic Independent Component Analysis: Implications and recommendations for multi-subject fMRI data analysis. \emph{Journal of Neuroscience Methods, 213}(2), 263-273. \doi{https://doi.org/10.1016/j.jneumeth.2012.12.009}
}
\author{
Nathaniel E. Helwig <helwig@umn.edu>
}
\details{
\bold{ICA Model}
The ICA model can be written as \code{X = tcrossprod(S, M) + E}, where \code{S} contains the source signals, \code{M} is the mixing matrix, and \code{E} contains the noise signals. Columns of \code{X} are assumed to have zero mean. The goal is to find the unmixing matrix \code{W} such that columns of \code{S = tcrossprod(X, W)} are independent as possible.
\bold{Whitening}
Without loss of generality, we can write \code{M = P \%*\% R} where \code{P} is a tall matrix and \code{R} is an orthogonal rotation matrix. Letting \code{Q} denote the pseudoinverse of \code{P}, we can whiten the data using \code{Y = tcrossprod(X, Q)}. The goal is to find the orthongal rotation matrix \code{R} such that the source signal estimates \code{S = Y \%*\% R} are as independent as possible. Note that \code{W = crossprod(R, Q)}.
\bold{JADE}
The JADE approach finds the orthogonal rotation matrix \code{R} that (approximately) diagonalizes the cumulant array of the source signals. See Cardoso and Souloumiac (1993,1996) and Helwig and Hong (2013) for specifics of the JADE algorithm.
}
\seealso{
\code{\link{icafast}} for FastICA
\code{\link{icaimax}} for ICA via Infomax
}
\examples{
########## EXAMPLE 1 ##########
# generate noiseless data (p == r)
set.seed(123)
nobs <- 1000
Amat <- cbind(icasamp("a", "rnd", nobs), icasamp("b", "rnd", nobs))
Bmat <- matrix(2 * runif(4), nrow = 2, ncol = 2)
Xmat <- tcrossprod(Amat, Bmat)
# ICA via JADE with 2 components
imod <- icajade(Xmat, nc = 2)
acy(Bmat, imod$M)
cor(Amat, imod$S)
########## EXAMPLE 2 ##########
# generate noiseless data (p != r)
set.seed(123)
nobs <- 1000
Amat <- cbind(icasamp("a", "rnd", nobs), icasamp("b", "rnd", nobs))
Bmat <- matrix(2 * runif(200), nrow = 100, ncol = 2)
Xmat <- tcrossprod(Amat, Bmat)
# ICA via JADE with 2 components
imod <- icajade(Xmat, nc = 2)
cor(Amat, imod$S)
########## EXAMPLE 3 ##########
# generate noisy data (p != r)
set.seed(123)
nobs <- 1000
Amat <- cbind(icasamp("a", "rnd", nobs), icasamp("b", "rnd", nobs))
Bmat <- matrix(2 * runif(200), 100, 2)
Emat <- matrix(rnorm(10^5), nrow = 1000, ncol = 100)
Xmat <- tcrossprod(Amat,Bmat) + Emat
# ICA via JADE with 2 components
imod <- icajade(Xmat, nc = 2)
cor(Amat, imod$S)
}
| /man/icajade.Rd | no_license | cran/ica | R | false | false | 4,170 | rd | \name{icajade}
\alias{icajade}
\title{
ICA via JADE Algorithm
}
\description{
Computes ICA decomposition using Cardoso and Souloumiac's (1993, 1996) Joint Approximate Diagonalization of Eigenmatrices (JADE) approach.
}
\usage{
icajade(X, nc, center = TRUE, maxit = 100, tol = 1e-6, Rmat = diag(nc))
}
\arguments{
\item{X}{
Data matrix with \code{n} rows (samples) and \code{p} columns (variables).
}
\item{nc}{
Number of components to extract.
}
\item{center}{
If \code{TRUE}, columns of \code{X} are mean-centered before ICA decomposition.
}
\item{maxit}{
Maximum number of algorithm iterations to allow.
}
\item{tol}{
Convergence tolerance.
}
\item{Rmat}{
Initial estimate of the \code{nc}-by-\code{nc} orthogonal rotation matrix.
}
}
\value{
\item{S}{Matrix of source signal estimates (\code{S = Y \%*\% R}).}
\item{M}{Estimated mixing matrix.}
\item{W}{Estimated unmixing matrix (\code{W = crossprod(R, Q)}).}
\item{Y}{Whitened data matrix.}
\item{Q}{Whitening matrix.}
\item{R}{Orthogonal rotation matrix.}
\item{vafs}{Variance-accounted-for by each component.}
\item{iter}{Number of algorithm iterations.}
\item{converged}{Logical indicating if algorithm converged.}
}
\references{
Cardoso, J.F., & Souloumiac, A. (1993). Blind beamforming for non-Gaussian signals. \emph{IEE Proceedings-F, 140}(6), 362-370. \doi{https://doi.org/10.1049/ip-f-2.1993.0054}
Cardoso, J.F., & Souloumiac, A. (1996). Jacobi angles for simultaneous diagonalization. \emph{SIAM Journal on Matrix Analysis and Applications, 17}(1), 161-164. \doi{10.1137/S0895479893259546}
Helwig, N.E. & Hong, S. (2013). A critique of Tensor Probabilistic Independent Component Analysis: Implications and recommendations for multi-subject fMRI data analysis. \emph{Journal of Neuroscience Methods, 213}(2), 263-273. \doi{https://doi.org/10.1016/j.jneumeth.2012.12.009}
}
\author{
Nathaniel E. Helwig <helwig@umn.edu>
}
\details{
\bold{ICA Model}
The ICA model can be written as \code{X = tcrossprod(S, M) + E}, where \code{S} contains the source signals, \code{M} is the mixing matrix, and \code{E} contains the noise signals. Columns of \code{X} are assumed to have zero mean. The goal is to find the unmixing matrix \code{W} such that columns of \code{S = tcrossprod(X, W)} are independent as possible.
\bold{Whitening}
Without loss of generality, we can write \code{M = P \%*\% R} where \code{P} is a tall matrix and \code{R} is an orthogonal rotation matrix. Letting \code{Q} denote the pseudoinverse of \code{P}, we can whiten the data using \code{Y = tcrossprod(X, Q)}. The goal is to find the orthongal rotation matrix \code{R} such that the source signal estimates \code{S = Y \%*\% R} are as independent as possible. Note that \code{W = crossprod(R, Q)}.
\bold{JADE}
The JADE approach finds the orthogonal rotation matrix \code{R} that (approximately) diagonalizes the cumulant array of the source signals. See Cardoso and Souloumiac (1993,1996) and Helwig and Hong (2013) for specifics of the JADE algorithm.
}
\seealso{
\code{\link{icafast}} for FastICA
\code{\link{icaimax}} for ICA via Infomax
}
\examples{
########## EXAMPLE 1 ##########
# generate noiseless data (p == r)
set.seed(123)
nobs <- 1000
Amat <- cbind(icasamp("a", "rnd", nobs), icasamp("b", "rnd", nobs))
Bmat <- matrix(2 * runif(4), nrow = 2, ncol = 2)
Xmat <- tcrossprod(Amat, Bmat)
# ICA via JADE with 2 components
imod <- icajade(Xmat, nc = 2)
acy(Bmat, imod$M)
cor(Amat, imod$S)
########## EXAMPLE 2 ##########
# generate noiseless data (p != r)
set.seed(123)
nobs <- 1000
Amat <- cbind(icasamp("a", "rnd", nobs), icasamp("b", "rnd", nobs))
Bmat <- matrix(2 * runif(200), nrow = 100, ncol = 2)
Xmat <- tcrossprod(Amat, Bmat)
# ICA via JADE with 2 components
imod <- icajade(Xmat, nc = 2)
cor(Amat, imod$S)
########## EXAMPLE 3 ##########
# generate noisy data (p != r)
set.seed(123)
nobs <- 1000
Amat <- cbind(icasamp("a", "rnd", nobs), icasamp("b", "rnd", nobs))
Bmat <- matrix(2 * runif(200), 100, 2)
Emat <- matrix(rnorm(10^5), nrow = 1000, ncol = 100)
Xmat <- tcrossprod(Amat,Bmat) + Emat
# ICA via JADE with 2 components
imod <- icajade(Xmat, nc = 2)
cor(Amat, imod$S)
}
|
# Function that Caches a matrix argument.
# Returns a vector of parameters for the matrix inverse calculation.
makeCacheMatrix <- function(x=matrix()){
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
# Function that calculates the inverse of a square matrix, if the matrix was
# previously calculated then returns the inverse matrix from cache.
cacheSolve <- function(x, ...) {
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data) %*% data
x$setsolve(s)
s
}
| /cachematrix.R | no_license | Sindoth/ProgrammingAssignment2 | R | false | false | 868 | r | # Function that Caches a matrix argument.
# Returns a vector of parameters for the matrix inverse calculation.
makeCacheMatrix <- function(x=matrix()){
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
# Function that calculates the inverse of a square matrix, if the matrix was
# previously calculated then returns the inverse matrix from cache.
cacheSolve <- function(x, ...) {
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data) %*% data
x$setsolve(s)
s
}
|
## Collapse eXpress transcript level values to gene level
library(rGithubClient)
library(synapseClient)
library(data.table)
library(dplyr)
synapseLogin()
collapse_eXpress <- function(dataID, mappingData) {
expressFile <- synGet(dataID)
expressData <- fread(getFileLocation(expressFile), data.table=FALSE)
expressData %>%
left_join(mappingData) %>%
dplyr::select(-1) %>%
group_by(hg19.kgXref.geneSymbol) %>%
summarise_each(funs(sum))
}
## Get the UCSC id to gene symbol mapping file
mappingFile <- synGet("syn3444900")
mappingData <- tbl_df(fread(getFileLocation(mappingFile), data.table=FALSE))
mappingData <- mappingData %>% rename(target_id=hg19.knownGene.name)
## Set up provenance
repo <- getRepo("Sage-Bionetworks/pcbc_c4_analysis")
thisFile <- getPermlink(repo, "code/R/eXpress_transcripts_to_gene.R")
## Set up annotations
annots <- list(dataType="mRNA", fileType="genomicMatrix", expressionLevel="gene")
effCountID <- "syn5006129"
effCountData <- collapse_eXpress(effCountID, mappingData)
write.csv(effCountData, "eXpress_eff_count_geneSymbol.csv", row.names=FALSE)
f <- File("eXpress_eff_count_geneSymbol.csv", parentId="syn5008578")
synSetAnnotations(f) <- annots
generatedBy(f) <- Activity(used=c(effCountID, mappingFile),
executed=thisFile,
name="Sum", description="Sum over gene symbol")
f <- synStore(f)
| /code/R/eXpress_transcripts_to_gene.R | permissive | kdaily/pcbc_c4_analysis | R | false | false | 1,405 | r | ## Collapse eXpress transcript level values to gene level
library(rGithubClient)
library(synapseClient)
library(data.table)
library(dplyr)
synapseLogin()
collapse_eXpress <- function(dataID, mappingData) {
expressFile <- synGet(dataID)
expressData <- fread(getFileLocation(expressFile), data.table=FALSE)
expressData %>%
left_join(mappingData) %>%
dplyr::select(-1) %>%
group_by(hg19.kgXref.geneSymbol) %>%
summarise_each(funs(sum))
}
## Get the UCSC id to gene symbol mapping file
mappingFile <- synGet("syn3444900")
mappingData <- tbl_df(fread(getFileLocation(mappingFile), data.table=FALSE))
mappingData <- mappingData %>% rename(target_id=hg19.knownGene.name)
## Set up provenance
repo <- getRepo("Sage-Bionetworks/pcbc_c4_analysis")
thisFile <- getPermlink(repo, "code/R/eXpress_transcripts_to_gene.R")
## Set up annotations
annots <- list(dataType="mRNA", fileType="genomicMatrix", expressionLevel="gene")
effCountID <- "syn5006129"
effCountData <- collapse_eXpress(effCountID, mappingData)
write.csv(effCountData, "eXpress_eff_count_geneSymbol.csv", row.names=FALSE)
f <- File("eXpress_eff_count_geneSymbol.csv", parentId="syn5008578")
synSetAnnotations(f) <- annots
generatedBy(f) <- Activity(used=c(effCountID, mappingFile),
executed=thisFile,
name="Sum", description="Sum over gene symbol")
f <- synStore(f)
|
# Run experimental data with fecundity model
setwd('./UO Hallett/Projects/usda-climvar')
# source data
source("./Competition/Data-analysis/coexistence-model_formatting.R")
library(rstan)
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
library(here)
setwd(here("Competition", "Model-fit"))
disturbed <- which(seedsin.seedsout$disturbed == 1)
data <- seedsin.seedsout[-disturbed,]
initials <- list(lambda=250, alpha_trhi=1, alpha_inter=1)
initials1<- list(initials, initials, initials, initials)
dat <- data[data$species == "TRHI" & data$falltreatment == "dry",]
dat2 <- data[data$species == "TRHI" & data$falltreatment == "wet",]
# AVFA
dat_avfa <- dat[dat$background == "Trifolium hirtum" | dat$background == "Avena fatua",]
dat_avfa <- dat_avfa[!is.na(dat_avfa$seedsOut),]
Fecundity <- as.integer(round(dat_avfa$seedsOut))
trhi <- as.integer(dat_avfa$TRHI_seedsIn)
inter <- as.integer(dat_avfa$AVFA_seedsIn)
N <- as.integer(length(Fecundity))
trhi_avfa <- stan(file = "TRHI_pairwise.stan", data = c("N", "Fecundity", "trhi", "inter"),
iter = 40000, chains = 4, thin = 3, control = list(adapt_delta = 0.99, max_treedepth = 20),
init = initials1)
# BRHO
dat_brho <- dat[dat$background == "Trifolium hirtum" | dat$background == "Bromus hordeaceus",]
dat_brho <- dat_brho[!is.na(dat_brho$seedsOut),]
Fecundity <- as.integer(round(dat_brho$seedsOut))
trhi <- as.integer(dat_brho$TRHI_seedsIn)
inter <- as.integer(dat_brho$BRHO_seedsIn)
N <- as.integer(length(Fecundity))
trhi_brho <- stan(file = "TRHI_pairwise.stan", data = c("N", "Fecundity", "trhi", "inter"),
iter = 500000, chains = 4, thin = 3, control = list(adapt_delta = 0.99, max_treedepth = 20),
init = initials1)
# VUMY
dat_vumy <- dat[dat$background == "Trifolium hirtum" | dat$background == "Vulpia myuros",]
dat_vumy <- dat_vumy[!is.na(dat_vumy$seedsOut),]
Fecundity <- as.integer(round(dat_vumy$seedsOut))
trhi <- as.integer(dat_vumy$TRHI_seedsIn)
inter <- as.integer(dat_vumy$VUMY_seedsIn)
N <- as.integer(length(Fecundity))
trhi_vumy <- stan(file = "TRHI_pairwise.stan", data = c("N", "Fecundity", "trhi", "inter"),
iter = 1000000, chains = 4, thin = 3, control = list(adapt_delta = 0.99, max_treedepth = 20),
init = initials1)
# LACA
dat_laca <- dat[dat$background == "Trifolium hirtum" | dat$background == "Lasthenia californica",]
dat_laca <- dat_laca[!is.na(dat_laca$seedsOut),]
Fecundity <- as.integer(round(dat_laca$seedsOut))
trhi <- as.integer(dat_laca$TRHI_seedsIn)
inter <- as.integer(dat_laca$LACA_seedsIn)
N <- as.integer(length(Fecundity))
trhi_laca <- stan(file = "TRHI_pairwise.stan", data = c("N", "Fecundity", "trhi", "inter"),
iter = 40000, chains = 4, thin = 3, control = list(adapt_delta = 0.99, max_treedepth = 20),
init = initials1)
# ESCA
dat_esca <- dat[dat$background == "Trifolium hirtum" | dat$background == "Eschscholzia californica",]
dat_esca <- dat_esca[!is.na(dat_esca$seedsOut),]
Fecundity <- as.integer(round(dat_esca$seedsOut))
trhi <- as.integer(dat_esca$TRHI_seedsIn)
inter <- as.integer(dat_esca$ESCA_seedsIn)
N <- as.integer(length(Fecundity))
trhi_esca <- stan(file = "TRHI_pairwise.stan", data = c("N", "Fecundity", "trhi", "inter"),
iter = 40000, chains = 4, thin = 3, control = list(adapt_delta = 0.99, max_treedepth = 20),
init = initials1) | /Competition/Model-fit/TRHI_pairwise_fits.R | no_license | HallettLab/usda-climvar | R | false | false | 3,517 | r | # Run experimental data with fecundity model
setwd('./UO Hallett/Projects/usda-climvar')
# source data
source("./Competition/Data-analysis/coexistence-model_formatting.R")
library(rstan)
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
library(here)
setwd(here("Competition", "Model-fit"))
disturbed <- which(seedsin.seedsout$disturbed == 1)
data <- seedsin.seedsout[-disturbed,]
initials <- list(lambda=250, alpha_trhi=1, alpha_inter=1)
initials1<- list(initials, initials, initials, initials)
dat <- data[data$species == "TRHI" & data$falltreatment == "dry",]
dat2 <- data[data$species == "TRHI" & data$falltreatment == "wet",]
# AVFA
dat_avfa <- dat[dat$background == "Trifolium hirtum" | dat$background == "Avena fatua",]
dat_avfa <- dat_avfa[!is.na(dat_avfa$seedsOut),]
Fecundity <- as.integer(round(dat_avfa$seedsOut))
trhi <- as.integer(dat_avfa$TRHI_seedsIn)
inter <- as.integer(dat_avfa$AVFA_seedsIn)
N <- as.integer(length(Fecundity))
trhi_avfa <- stan(file = "TRHI_pairwise.stan", data = c("N", "Fecundity", "trhi", "inter"),
iter = 40000, chains = 4, thin = 3, control = list(adapt_delta = 0.99, max_treedepth = 20),
init = initials1)
# BRHO
dat_brho <- dat[dat$background == "Trifolium hirtum" | dat$background == "Bromus hordeaceus",]
dat_brho <- dat_brho[!is.na(dat_brho$seedsOut),]
Fecundity <- as.integer(round(dat_brho$seedsOut))
trhi <- as.integer(dat_brho$TRHI_seedsIn)
inter <- as.integer(dat_brho$BRHO_seedsIn)
N <- as.integer(length(Fecundity))
trhi_brho <- stan(file = "TRHI_pairwise.stan", data = c("N", "Fecundity", "trhi", "inter"),
iter = 500000, chains = 4, thin = 3, control = list(adapt_delta = 0.99, max_treedepth = 20),
init = initials1)
# VUMY
dat_vumy <- dat[dat$background == "Trifolium hirtum" | dat$background == "Vulpia myuros",]
dat_vumy <- dat_vumy[!is.na(dat_vumy$seedsOut),]
Fecundity <- as.integer(round(dat_vumy$seedsOut))
trhi <- as.integer(dat_vumy$TRHI_seedsIn)
inter <- as.integer(dat_vumy$VUMY_seedsIn)
N <- as.integer(length(Fecundity))
trhi_vumy <- stan(file = "TRHI_pairwise.stan", data = c("N", "Fecundity", "trhi", "inter"),
iter = 1000000, chains = 4, thin = 3, control = list(adapt_delta = 0.99, max_treedepth = 20),
init = initials1)
# LACA
dat_laca <- dat[dat$background == "Trifolium hirtum" | dat$background == "Lasthenia californica",]
dat_laca <- dat_laca[!is.na(dat_laca$seedsOut),]
Fecundity <- as.integer(round(dat_laca$seedsOut))
trhi <- as.integer(dat_laca$TRHI_seedsIn)
inter <- as.integer(dat_laca$LACA_seedsIn)
N <- as.integer(length(Fecundity))
trhi_laca <- stan(file = "TRHI_pairwise.stan", data = c("N", "Fecundity", "trhi", "inter"),
iter = 40000, chains = 4, thin = 3, control = list(adapt_delta = 0.99, max_treedepth = 20),
init = initials1)
# ESCA
dat_esca <- dat[dat$background == "Trifolium hirtum" | dat$background == "Eschscholzia californica",]
dat_esca <- dat_esca[!is.na(dat_esca$seedsOut),]
Fecundity <- as.integer(round(dat_esca$seedsOut))
trhi <- as.integer(dat_esca$TRHI_seedsIn)
inter <- as.integer(dat_esca$ESCA_seedsIn)
N <- as.integer(length(Fecundity))
trhi_esca <- stan(file = "TRHI_pairwise.stan", data = c("N", "Fecundity", "trhi", "inter"),
iter = 40000, chains = 4, thin = 3, control = list(adapt_delta = 0.99, max_treedepth = 20),
init = initials1) |
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Info --------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# This script uses the output of Summarise_MSEA_results.R to generate graphic representations of
# the Crossomics results
# R version: 3.6.0 (2019-04-26)
# platform: x86_64-apple-darwin15.6.0 (64-bit)
# OS: macOS Mojave 10.14.6
#
# libraries:
# rstudioapi 0.10
# data.table 1.12.2
# ggplot2 3.1.1
# RColorBrewer 1.1-2
# scales 1.0.0
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Libraries ---------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
library(rstudioapi)
library(data.table)
library(RColorBrewer) # Colour schemes
library(ggplot2)
library(scales)
# library(ggforce) # zoom in specific parts of plot
library(grid) # manual grob adjustment
library(gtable) # manual grob adjustment
library(gridExtra)
library(stringr)
library(svglite)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Adjustable settings -----------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
##### Create general plot aesthetics --------------------------------------
Thresh_labs <- c("Min.-1; Max.1.5","Min.-1.5; Max.2","Min.-3; Max.3","Min.-5; Max.5")
names(Thresh_labs) <- c("-1, 1.5", "-1.5, 2", "-3, 3", "-5, 5")
Rxn_labs <- c("<= 8","<= 10","<= 12","<= 15","<= 17","<= 19")
names(Rxn_labs) <- c(8, 10, 12, 15, 17, 19)
# Colour scheme
my_greens <- rev(brewer.pal(5, "Greens"))[c(2:5)]
my_blues <- rev(brewer.pal(5, "Blues"))[c(1:5)]
my_greens <- my_blues
my_reds <- rev(brewer.pal(5, "Reds"))[c(2:5)]
my_sig_palette <- rev(brewer.pal(6, "RdYlGn"))[2:6]
my_sig_palette <- colorRamps::green2red(5)
# Image resolution
high_res <- 600
low_res <- 100
resolution <- low_res
digit_significance <- 3
##### Other ---------------------------------------------------------------
# Exclude patients / diseases
subset_patients <- FALSE
# trainingset <- "combined" # possible: TRUE, FALSE, NULL (for all data), "combined" (for all, but separated)
# Date of data
date <- "2019-12-10"
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Pre-processing ----------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
##### Read data -----------------------------------------------------------
code_dir <- paste0(dirname(rstudioapi::getActiveDocumentContext()$path),"/../Results/")
DT <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_all.RDS")))
# DT_per_parameter_val <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_validation_per_parameter.RDS")))
# DT_per_patient_val <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_validation_per_patient.RDS")))
# DT_per_parameter_tra <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_training_per_parameter.RDS")))
# DT_per_patient_tra <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_training_per_patient.RDS")))
DT_per_parameter <- readRDS(paste0(code_dir,date,"/MSEA_DT_per_parameter.RDS"))
DT_per_parameter_tra_val_sum <- readRDS(paste0(code_dir,date,"/MSEA_DT_per_parameter_tra_val_summary.RDS"))
# DT_validation_per_parameter <- readRDS(paste0(code_dir,date,"/MSEA_DT_by_validation_per_parameter.RDS"))
# test <- readRDS(paste0(code_dir,date,"/Metabolite_set_sizes.RDS"))
# DT_met_sets <- readRDS(paste0(code_dir,date,"/Metabolite_set_sizes.RDS"))
# DT_per_parameter_tra[, rank_10 := frank(-Prior.frac10)]
# #####
# # See how many disease genes have a metabolite set
# genes_w_mets <- list.files(paste0(code_dir, "../Data/2019-08-12/maxrxn19/mss_5_HMDBtranslated"))
# genes_w_mets <- str_remove(genes_w_mets, ".RDS")
#
# sum(!unique(DT[Include == TRUE, Gene]) %in% genes_w_mets)
# if(trainingset == "combined"){
# train_val <- "trainingset"
# DT1 <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_",train_val,".RDS")))
# DT1[,Set := "training"]
# train_val <- "validationset"
# DT2 <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_",train_val,".RDS")))
# DT2[,Set := "validation"]
# DT <- rbind(DT1, DT2)
# rm(DT1, DT2)
# } else {
# if(is.null(trainingset)) {
# train_val <- "all"
# DT <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_",train_val,".RDS")))
# DT[,Set := "all"]
# } else {
# train_val <- ifelse(trainingset, "trainingset", "validationset")
# DT <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_",train_val,".RDS")))
# ifelse(trainingset, DT[,Set := "training"], DT[,Set := "validation"])
# }
# }
# DT <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_",train_val,".RDS")))
##### Determine parameters ------------------------------------------------
# temp$size_f = factor(temp$size, levels=c('50%','100%','150%','200%'))
# Z_thresholds <- levels(DT$Z_threshold)
# max_rxns <- levels(DT$Max_rxn)
# steps <- levels(DT$Step)
# seeds <- unique(DT$Seed)
##### Exclude predetermined patients and determine output names -----------
if(subset_patients){
patients_excluded <- c("P56","P57","P58","P59","P68")
DT <- DT[!Patient %in% patients_excluded]
sub_name <- "Sub_P"
} else {
sub_name <- "All_P"
}
outdir_name <- paste0(dirname(rstudioapi::getActiveDocumentContext()$path),"/../Plots/",date)
if (!file.exists(outdir_name))dir.create(outdir_name, recursive = TRUE)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Functions ---------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
##### Manual standard grob addition ---------------------------------------
pretty_plot <- function(p, theme = "light", secondary_y_axis = TRUE){
z <- ggplotGrob(p)
right_lab_loc <- 5 + 2*length(Thresh_labs) - secondary_y_axis
right_lab_bottom <- 6 + 2*length(Rxn_labs)
top_lab_width <- 5 + 2*length(Thresh_labs) - 2
# label right strip
z <- gtable_add_cols(z, unit(1, 'cm'), right_lab_loc)
z <- gtable_add_grob(z,
list(rectGrob(gp = gpar(col = NA, fill = ifelse(theme=="dark", "lightgray", gray(0.5)))),
textGrob("Extension stringency", rot = -90, gp = gpar(col = ifelse(theme=="dark", "black", "white")))),
# 8, 15, 18, 15, name = paste(runif(2)))
8, right_lab_loc+1, right_lab_bottom, right_lab_loc+1, name = paste(runif(2)))
# label top strip
z <- gtable_add_rows(z, unit(1, 'cm'), 6)
z <- gtable_add_grob(z,
list(rectGrob(gp = gpar(col = NA, fill = ifelse(theme=="dark", "lightgray", gray(0.5)))),
textGrob("Biochemical stringency", gp = gpar(col = ifelse(theme=="dark", "black", "white")))),
# 4, 5, 4, 13, name = paste(runif(2)))
7, 5, 7, top_lab_width, name = paste(runif(2)))
# margins
# z <- gtable_add_cols(z, unit(1/8, "line"), 7)
# z <- gtable_add_rows(z, unit(1/8, "line"), 3)
return(z)
}
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Simple plots, meant to use as explaning for the bigger, later plots -----
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Empty facet plot
p <- ggplot(DT_per_parameter, aes(x = Step)) +
facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
theme_dark() +
ylab("Disease genes / Total dis. genes") +
xlab("Max distance to primary reaction") +
ggtitle("Correct disease gene prioritisation") +
scale_color_manual(name = "Prioritised Genes",
labels = c("In Top 15", "Missed"),
values=c(my_greens[1],'RED'))
z <- pretty_plot(p, theme = "dark")
ggsave(paste0(outdir_name,"/Empty_Facet_",sub_name,".png"), plot = z,
width = 250, height = 200, dpi=resolution, units = "mm")
# Simple plot, just top 10, all parameter combinations
p <- ggplot(DT_per_parameter, aes(x = Step)) +
geom_line(aes(y = Prior.frac10, colour = "line10", group = 1)) +
geom_point(aes(y = Prior.frac10, colour = "line10"), size=0.5) +
geom_line(aes(y = Missed.frac, colour = "Missed", group = 1)) +
geom_point(aes(y = Missed.frac, colour = "Missed"), size=0.5) +
facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
theme_dark() +
ylab("Disease genes / Total dis. genes") +
xlab("Max distance to primary reaction") +
ggtitle("Correct disease gene prioritisation") +
scale_color_manual(name = "Prioritised Genes",
labels = c("In Top 10", "Missed"),
values=c(my_greens[4],'RED'))
z <- pretty_plot(p, theme = "dark")
ggsave(paste0(outdir_name,"/",train_val,"_Ranks_And_Missed_Top15_",sub_name,".png"), plot = z,
width = 300, height = 200, dpi=resolution, units = "mm")
# Single panel from big facet plot (top 2, 5, 10 and 15)
p <- ggplot(DT_per_parameter[Z_threshold == "-3, 3" & Max_rxn == 15], aes(x = Step)) +
geom_line(aes(y = Prior.frac15, colour = "line15", group = 1)) +
# geom_ribbon(aes(x= as.numeric(Step), ymin=1*15/Max_Tot.Genes, ymax=1*15/Min_Tot.Genes, fill = "band15")) +
# geom_ribbon(aes(x= as.numeric(Step), ymin=1*10/Max_Tot.Genes, ymax=1*10/Min_Tot.Genes, fill = "band10")) +
# geom_ribbon(aes(x= as.numeric(Step), ymin=1*5/Max_Tot.Genes, ymax=1*5/Min_Tot.Genes, fill = "band05")) +
# geom_ribbon(aes(x= as.numeric(Step), ymin=1*2/Max_Tot.Genes, ymax=1*2/Min_Tot.Genes, fill = "band02")) +
geom_line(aes(y = Prior.frac15, colour = "line15", group = 1), size = 1.3) +
geom_point(aes(y = Prior.frac15, colour = "line15"), size=1) +
geom_line(aes(y = Prior.frac10, colour = "line10", group = 1), size = 1.3) +
geom_point(aes(y = Prior.frac10, colour = "line10"), size=1) +
geom_line(aes(y = Prior.frac05, colour = "line05", group = 1), size = 1.3) +
geom_point(aes(y = Prior.frac05, colour = "line05"), size=1) +
geom_line(aes(y = Prior.frac02, colour = "line02", group = 1), size = 1.3) +
geom_point(aes(y = Prior.frac02, colour = "line02"), size=1) +
geom_line(aes(y = Missed.frac, colour = "missed", group = 1), size = 1.3) +
geom_point(aes(y = Missed.frac, colour = "missed"), size=1)
p <- p + theme_dark() +
ylab("Disease genes / Total dis. genes") +
xlab("Max distance to primary reaction") +
ylim(c(0,1)) +
ggtitle("Correct disease gene prioritisation") +
scale_color_manual(name = "Prioritised Genes",
labels = c("In Top 2", "In Top 5", "In Top 10", "In Top 15", "Missed"),
values=c(my_greens,'RED')) +
scale_fill_manual(name = "Prioritised if random Gene",
labels = c("In Top 2", "In Top 5", "In Top 10", "In Top 15", "Missed"),
values=c(my_greens)) +
guides(colour = guide_legend(order = 1, reverse = TRUE),
fill = guide_legend(order = 2, reverse = TRUE))
# colour = adjustcolor(my_greens[4],alpha.f=0.5), fill = my_greens[4], alpha="0.5"
ggsave(paste0(outdir_name,"/",train_val,"_Ranks_And_Missed_Single_Par_Comb_",sub_name,".png"), plot = p,
width = 300, height = 200, dpi=resolution, units = "mm")
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Combination plot of correctly prioritised and missed genes --------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#####
# Disease gene prioritization per parameter, separated by training/validation runs/patients
p <- ggplot(DT_per_parameter_tra_val_sum, aes(x = Step)) +
geom_line(data = DT_per_parameter_tra_val_sum[Validation == TRUE, ], aes(y = Av.prior.frac10, colour = "Validation", group = "Validation")) +
geom_errorbar(data = DT_per_parameter_tra_val_sum[Validation == TRUE, ],
aes(ymax = Av.prior.frac10 + Sd.prior.frac10,
ymin = Av.prior.frac10 - Sd.prior.frac10,
colour = "Validation"),
width = 0.4) +
geom_line(data = DT_per_parameter_tra_val_sum[Validation == FALSE, ], aes(y = Av.prior.frac10, colour = "Training", group = "Training")) +
geom_errorbar(data = DT_per_parameter_tra_val_sum[Validation == FALSE, ],
aes(ymax = Av.prior.frac10 + Sd.prior.frac10,
ymin = Av.prior.frac10 - Sd.prior.frac10,
colour = "Training"),
width = 0.4) +
geom_line(data = DT_per_parameter, aes(y = Missed.frac, colour = "Missed", group = 1)) +
geom_point(data = DT_per_parameter,aes(y = Missed.frac, colour = "Missed"), size=0.5) +
facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
theme_light() +
ylab("Disease genes / Total dis. genes") +
xlab("Max distance to primary reaction") +
ggtitle("Correct disease gene prioritisation") +
ylim(0,1)
for(i in c(1:5)){
p <- p + geom_point(data = DT_per_parameter_tra_val_sum[Validation == FALSE & as.vector(DT_per_parameter_tra_val_sum[Validation == FALSE, "best_order_top10"]==i),],
aes(x=Step, y=Av.prior.frac10, shape = "Best"),
shape = "*",
size=8)
p <- p + geom_text(data = DT_per_parameter_tra_val_sum[Validation == FALSE & as.vector(DT_per_parameter_tra_val_sum[Validation == FALSE, "best_order_top10"]==i),],
aes(x=Step, y=Av.prior.frac10, label = signif(Av.prior.frac10, digits = digit_significance), group = best_order_top10),
size=3,
colour = my_sig_palette[i],
position = position_dodge(width = 2),
vjust = -0.8)
}
z <- pretty_plot(p, theme = "light")
ggsave(paste0(outdir_name,"/CorrectPrior_top10_tra_val_",sub_name,".svg"), plot = z,
width = 300, height = 200, units = "mm")
#####
# Disease gene prioritization per parameter, all runs/patients averaged to one value
p <- ggplot(DT_per_parameter, aes(x = Step)) +
geom_line(aes(y = Missed.frac, colour = "Missed", group = 1)) +
geom_point(aes(y = Missed.frac, colour = "Missed"), size=0.5) +
geom_line(aes(y = Prior.frac10, colour = "line10", group = 1)) +
geom_point(aes(y = Prior.frac10, colour = "line10"), size=0.5) +
facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
theme_light() +
ylab("Disease genes / Total dis. genes") +
xlab("Max distance to primary reaction") +
ggtitle("Correct disease gene prioritisation") +
scale_color_manual(name = "Prioritised Genes",
labels = c("In Top 10", "Missed"),
values=c(my_greens[1],'RED')) +
ylim(0,1)
for(i in c(1:5)){
p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[, "best_order_top10"]==i),],
aes(x=Step, y=Prior.frac10, shape = "Best"),
shape = "*",
size=8)
p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[, "best_order_top10"]==i),],
aes(x=Step, y=Prior.frac10, label = signif(Prior.frac10, digits = digit_significance), group = best_order_top10),
size=3,
colour = my_sig_palette[i],
position = position_dodge(width = 2),
vjust = -0.8)
}
z <- pretty_plot(p, theme = "light")
ggsave(paste0(outdir_name,"/CorrectPrior_top10_all_",sub_name,".svg"), plot = z,
width = 300, height = 200, units = "mm")
# p <- ggplot(DT_per_parameter, aes(x = Step)) +
# geom_line(aes(y = Prior.frac05, colour = "line05", group = 1)) +
# geom_point(aes(y = Prior.frac05, colour = "line05"), size=0.5) +
# geom_line(aes(y = Prior.frac10, colour = "line10", group = 1)) +
# geom_point(aes(y = Prior.frac10, colour = "line10"), size=0.5) +
# geom_line(aes(y = Missed.frac, group = 1, linetype="Training"), colour = my_reds[1]) +
# geom_point(aes(y = Missed.frac), size=0.5, colour = my_reds[1])
# # geom_hline(data = DT_per_parameter[Set == "training"], aes(yintercept = max(Prior.frac05), size = "Training", colour="line05t"), alpha = 0.8, linetype = "dashed") +
# # geom_hline(data = DT_per_parameter[Set == "validation"], aes(yintercept = max(Prior.frac05), size = "Validation", colour="line05v"), alpha = 0.8, linetype = "dashed")
# p <- p + facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- p + theme_light() +
# ylab("Ratio [rank disease gene] / [total genes]") +
# # ylab(expression(paste("Ratio of: ", frac(`disease genes`,`total disease genes`)))) +
# xlab("Maximum distance to primary reaction") +
# ylim(0, 1) +
# # ggtitle("Performance of disease gene prioritization") +
# scale_color_manual(name = NULL,
# labels = c("Correctly prioritized genes (top 5)","Correctly prioritized genes (top 10)"),
# values= c(my_greens[3], my_greens[2]),
# guide = guide_legend(override.aes = list(linetype = "solid",
# shape = 16),
# order = 1)
# ) +
# scale_linetype_manual(name = NULL,
# labels = "Missed genes",
# values = "solid",
# guide = guide_legend(override.aes = list(colour = my_reds[3]),
# order = 2)
# )
# pp <- pretty_plot(p, theme = "light")
# ggsave(paste0(outdir_name,"/",train_val,"_Ranks_And_Missed_",sub_name,".png"), plot = pp,
# width = 300, height = 200, dpi=resolution, units = "mm")
# Some example plot for making multiple legends out of 1 aesthetic
# ggplot(data=dfr, mapping=aes(x=id, y=value)) +
# geom_line(mapping=aes(colour=group), show_guide=TRUE) +
# geom_hline(
# mapping=aes(yintercept=c(-1,1)*qnorm(0.95), fill="95% CI"),
# color="orange"
# ) +
# geom_hline(
# mapping=aes(yintercept=c(-1,1)*qnorm(0.99), fill="99% CI"),
# color="darkred"
# ) +
# scale_color_hue("Group", guide=guide_legend(order=1)) +
# scale_fill_manual("CI horizontal line", values=rep(1,4),
# guide=guide_legend(
# override.aes = list(colour=c("orange", "darkred")),
# order=2
# ),
# labels=c("CI of 95%", "CI of 99%")
# )
# ##### Average rank non-missed genes + total missed genes ------------------
# p <- ggplot(DT_per_parameter, aes(label=Av_non_missed)) +
# geom_line(aes(x = Step, y = Av_non_missed, colour = "Average", group = 1), size = 1.3) +
# geom_point(aes(x = Step, y = Av_non_missed, colour = "Average"), size=0.5) +
# # geom_line(aes(x = Step, y = Missed.frac*15, colour = "Frac.Missed", group = 1), size = 1.3) +
# # geom_point(aes(x = Step, y = Missed.frac*15, colour = "Frac.Missed"), size=0.5) +
# # scale_y_continuous(sec.axis = sec_axis(~./15, name = "Frac. genes Missed", breaks = c(0, 0.5, 1))) +
# theme_dark() +
# scale_color_manual(name = "",
# labels = c("Av. rank of non-missed genes", "Frac. Missed"),
# values=c('Black', my_greens[3]))
# p <- p + facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# # geom_point(data = DT_per_parameter[best_av50==TRUE, ],aes(x=Step, y=Av_top50),shape = "*", size=8, show.legend = FALSE, colour = "black")+
# p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_NM"]==1),],aes(x=Step, y=Av_non_missed, shape = "Best"), size=8, ) +
# scale_shape_manual(name = "",
# labels = "Best param.\nranks (missed \nfrac. <0.5)",
# values = 42)
# # Annotate the 5 best scoring parameter combinations (determined ~l.280)
# for(i in c(1:5)){
# p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_NM"]==i),],aes(x=Step, y=Av_non_missed, shape = "Best"), shape = "*", size=8, colour = my_sig_palette[i])
# p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_NM"]==i),],
# aes(x=Step, y=Av_non_missed, label = signif(Av_non_missed, digits = digit_significance), group = best_order_NM),
# size=3,
# # show.legend = FALSE,
# colour = my_sig_palette[i],
# position = position_dodge(width = 2),
# vjust = -0.5)
# }
# y_val <- ggplot_build(p)$layout$panel_scales_y[[1]]$range$range[2]
# p <- p +
# ylab("Average non-missed disease gene rank") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Av. gene rank for non-missed genes") +
# # geom_hline(yintercept=y_val/2, linetype="dashed", color = "salmon") +
# guides(shape = guide_legend(order = 1),
# colour = guide_legend(order = 2))
# pp <- pretty_plot(p, theme = "dark")
# ggsave(paste0(outdir_name,"/Best_Ranks_With_>0.5_Non_Missed_",sub_name,".png"), plot = pp,
# width = 300, height = 200, dpi=resolution, units = "mm")
##### all ranks, average rank and average standard deviation --------------
# DT_no_missed <- tmpDT[P.value < 1,]
# DT_missed <- tmpDT[P.value == 1,]
# p <- ggplot(DT_no_missed, aes(x = Step)) +
# # geom_point(aes(y = Position), position=position_dodge(width = 5.5)) +
# geom_jitter(aes(y = Position, colour = "Per-patient rank"), size = 0.005) +
# geom_jitter(data =DT_missed, aes(x = Step, y = Position, fill = "Missed genes"), colour = "blue", size = 0.005, alpha = 0.2) +
# geom_line(data = DT_per_parameter, aes(x = Step, y = Av_non_missed, group = 1, colour = "Average rank")) +
# geom_errorbar(data = DT_per_parameter, aes(x = Step,
# ymax = Av_non_missed + Av_Sd_excl_miss,
# ymin = Av_non_missed - Av_Sd_excl_miss,
# colour = "Average rank"),
# # colour = "cornflowerblue",
# width = 0.5) +
# ylim(c(0,40)) +
# geom_hline(aes(yintercept = 5), colour = "darksalmon", linetype="dashed") +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
# theme_dark() +
# ylab("Disease gene rank") +
# xlab("Max distance to primary reaction") +
# ggtitle("Stability of method") +
# geom_point(data = DT_per_parameter[DT_per_parameter$best_order_top05 ==1, ], aes(x=Step, y=35, shape = "Best"), size=8) +
# scale_shape_manual(name = "",
# labels = "Ratio dis.genes\nin top 5",
# values = 42)
# for(i in c(1:5)){
# p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05"]==i),],aes(x=Step, y=35, shape = "Best"), shape = "*", size=8)
# p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05"]==i),],
# aes(x=Step, y=35, label = signif(Prior.frac05, digits = digit_significance), group = best_order_top05),
# size=3,
# # show.legend = FALSE,
# colour = my_sig_palette[6-i],
# position = position_dodge(width = 2),
# vjust = -0.5)
# }
# p <- p + scale_color_manual(name = "Non-missed genes",
# labels = c("Average rank", "Per-patient rank"),
# values=c("red","black")) +
# scale_fill_manual(name = "Missed genes",
# labels = c("Per-patient rank"),
# values=c("blue"))
# p <- pretty_plot(p, secondary_y_axis = FALSE, theme = "dark")
# ggsave(paste0(outdir_name,"/",train_val,"_Average_Patient_And_Ranks_And_Missed_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
# ##### all ranks, average rank and average standard deviation --------------
# # p <- ggplot(tmpDT[Missed == FALSE], aes(x = Step)) +
# p <- ggplot(DT_per_parameter, aes(x = Step)) +
# # geom_point(aes(y = Position), position=position_dodge(width = 5.5)) +
# # geom_jitter(data = tmpDT, aes(x = Step, y = Position, colour = Missed), size = 0.005, alpha = 0.2) +
# # geom_jitter(data = DT, aes(y = Position, colour = Missed), size = 0.005, alpha = 0.2) +
# # geom_jitter(aes(y = Position, colour = "Per-patient rank"), size = 0.005) +
# # geom_jitter(data =DT_test[Missed == TRUE], aes(x = Step, y = Position, fill = "Missed genes"), colour = "blue", size = 0.005, alpha = 0.2) +
# geom_line(aes(x = Step, y = Av_non_missed, group = 1, colour = "Average rank")) +
# geom_errorbar(aes(x = Step,
# ymax = Av_non_missed + Sd_non_missed,
# ymin = Av_non_missed - Sd_non_missed,
# colour = "Average rank"),
# # colour = "cornflowerblue",
# width = 0.5) +
# ylim(c(-5,35)) +
# geom_hline(aes(yintercept = 5), colour = "darksalmon", linetype="dashed") +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
# theme_dark() +
# ylab("Disease gene rank") +
# xlab("Max distance to primary reaction") +
# ggtitle("Gene prioritization performance") +
# geom_point(data = DT_per_parameter[DT_per_parameter$best_order_top05 ==1, ], aes(x=Step, y=35, shape = "Best"), size=8) +
# scale_shape_manual(name = "",
# labels = "Best ratios dis.genes\nin top 5",
# values = 42)
# for(i in c(1:5)){
# # p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05_training"]==i),],aes(x=Step, y=35, shape = "Best"), shape = "*", size=8)
# # p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05_training"]==i),],
# # aes(x=Step, y=35, label = signif(Prior.frac05_training, digits = digit_significance), group = best_order_top05_training),
# # size=3,
# # # show.legend = FALSE,
# # colour = "black",
# # position = position_dodge(width = 2),
# # vjust = -0.5)
# # p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05_validation"]==i),],aes(x=Step, y=35, shape = "Best"), shape = "*", size=8)
# # p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05_validation"]==i),],
# # aes(x=Step, y=35, label = signif(Prior.frac05_validation, digits = digit_significance), group = best_order_top05_validation),
# # size=3,
# # # show.legend = FALSE,
# # colour = "black",
# # position = position_dodge(width = 2),
# # vjust = -0.5)
# p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05"]==i),],aes(x=Step, y=25, shape = "Best"), shape = "*", size=8)
# p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05"]==i),],
# aes(x=Step, y=25, label = signif(Prior.frac05, digits = digit_significance), group = best_order_top05),
# size=3,
# # show.legend = FALSE,
# colour = my_sig_palette[6-i],
# position = position_dodge(width = 2),
# vjust = -0.5)
# }
# p <- p + scale_color_manual(name = "Gene ranks",
# labels = c("Average non-missed", "Per patient", "Missed per patient"),
# values=c("red","black","blue"))
# p <- p + guides(shape = guide_legend(override.aes = list(size = 5)),
# color = guide_legend(override.aes = list(linetype=c(1,NA,NA),
# shape=c(NA,16,16),
# size = c(0.5,2,2),
# alpha = c(NA,1,1))))
# pp <- pretty_plot(p, secondary_y_axis = FALSE, theme = "dark")
# # ggsave(paste0(outdir_name,"/",train_val,"_Average_Patient_And_Ranks_And_Missed_",sub_name,".png"), plot = pp,
# # width = 300, height = 200, dpi=resolution, units = "mm")
# ggsave(paste0(outdir_name,"/test_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
##### Boxplot of a single panel ---------------------------------------
# nm <- 0
# DT[,Patient_follow_number := 0]
# DT[, c("Min_Rank_Patient","Max_Rank_Patient") := list(min(Position),max(Position)), by = .(Step, Z_threshold, Max_rxn, PatientID)]
# for(i in unique(DT$PatientID)){
# nm <- nm + 1
# DT[PatientID == i, Patient_follow_number := nm]
# }
# # Relative ranks
# # p <- ggplot(DT[Z_threshold == "-3, 3" & Max_rxn == 10 & P.value != 1 & Step == 5], aes(x = Patient_follow_number, y = Rank.frac)) +
# p <- ggplot(DT[Z_threshold == "-3, 3" & Max_rxn == 10 & P.value != 1 & Step == 5], aes(x = PatientID, y = Position/201)) +
# geom_boxplot(aes(fill = Patient_follow_number, group = PatientID)) + theme(legend.position = "none") +
# geom_hline(yintercept = 11/201, linetype = "dashed")
# # geom_ribbon(aes(x = Patient_follow_number, ymin=11/201,ymax=11/Min_Rank_Patient),alpha=0.2, fill = "yellow") +
# # geom_ribbon(aes(x = Patient_follow_number, ymin=11/201,ymax=0.6),alpha=0.2, fill = "red") +
# # geom_ribbon(aes(x = Patient_follow_number, ymin = 0, ymax=6/201),alpha=0.2, fill = "green") +
# # theme(axis.text.x = element_text(size = 6, angle = 60, hjust = 1)) +
# # ylim(0,1) +
# # scale_x_continuous(labels = unique(DT[Z_threshold == "-3, 3" & Max_rxn == 10 & P.value != 1 & Step == 5, PatientID]),
# # breaks = unique(DT[Z_threshold == "-3, 3" & Max_rxn == 10 & P.value != 1 & Step == 5,Patient_follow_number]),
# # name = "Patient ID")
# ggsave(paste0(outdir_name,"/",train_val,"Relative_Rank_",sub_name,".png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
# Absolute ranks
tmpDT <- DT[Z_threshold == "-3, 3" & Max_rxn == 15 & P.value != 1 & Step == 4 & Include == TRUE]
tmpDT[ , P.value := as.double(P.value)]
tmpDT <- tmpDT[order(tmpDT[, Gene]),]
tmpDT[, PatientID := paste(Gene, PatientID, sep = "^")]
disease_genes <- NULL
for(i in unique(tmpDT[,PatientID])){
tmp_gene <- unique(tmpDT[PatientID == i, Gene])
cat(tmp_gene, i, "\n")
disease_genes <- c(disease_genes, tmp_gene)
}
p <- ggplot(tmpDT, aes(x = PatientID, y = Position)) +
# geom_boxplot(aes(fill = P.colour, group = PatientID)) +
# geom_boxplot(aes(fill = DBS, group = PatientID)) +
# geom_boxplot(aes(fill = log(P.value), group = PatientID)) +
# geom_boxplot(aes(fill = P.value, group = PatientID)) +
# scale_fill_gradient(high = "#c92222", low = "#22c939") +
geom_boxplot(aes(group = PatientID)) +
# theme(legend.position = "none") +
# geom_boxplot(aes(fill = Patient_follow_number, group = PatientID)) + theme(legend.position = "none") +
geom_hline(yintercept = 10, linetype = "dashed") +
# add DBS to plot
# geom_point(aes(x = Patient_follow_number, y = DBS*5), shape = 8, colour = 'red', alpha = 0.8) +
theme(axis.text.x = element_text(size = 6, angle = 60, hjust = 1))+
ylab("Disease gene rank") +
scale_x_discrete(labels = disease_genes,
name = "Disease genes of individual patients")
ggsave(paste0(outdir_name,"/1Par_Absolute_Rank_",sub_name,".svg"), plot = p,
width = 400, height = 150, units = "mm")
# # visualise Sd_rank & Av_rank ~ DBS per patient
# DT_per_patient[, DBS := factor(DBS)]
# p <- ggplot(DT_per_patient, aes(DBS, Sd_rank_excl_miss)) +
# geom_jitter(colour = "darkgreen", alpha = 0.8) +
# geom_boxplot(alpha = 0.8)
# ggsave(paste0(outdir_name,"/Sd_Abs_Rank_(NoMiss)_vs_DBS.png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
#
# p <- ggplot(DT_per_patient, aes(DBS, Av_rank_excl_miss)) +
# geom_jitter(colour = "darkred", alpha = 0.8) +
# geom_boxplot(alpha = 0.8)
# ggsave(paste0(outdir_name,"/Av_Abs_Rank_(NoMiss)_vs_DBS.png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
#
# # visualise Sd_rank & Av_rank ~ Dataset/Run
# DT_per_patient[, Dataset := factor(Dataset)]
# ggplot(DT_per_patient, aes(Dataset, Sd_rank_excl_miss)) +
# geom_jitter(colour = "darkgreen", alpha = 0.8) +
# geom_boxplot(alpha = 0.8) +
# theme(axis.text.x = element_text(size = 6, angle = 45, hjust = 1))
# ggplot(DT_per_patient, aes(Dataset, Av_rank_excl_miss)) +
# geom_jitter(colour = "darkred", alpha = 0.8) +
# geom_boxplot(aes(fill = Set), alpha = 0.8) +
# theme(axis.text.x = element_text(size = 6, angle = 45, hjust = 1))
#
# # visualise Sd_rank & Av_rank ~ metabolite_set_size per patient (unbinned)
# ggplot(DT_per_patient,aes(Mets_in_set, Sd_rank_excl_miss)) +
# geom_point()
# ggplot(DT_per_patient,aes(Mets_in_set, Av_rank_excl_miss)) +
# geom_point()
#
# # visualise Sd_rank & Av_rank ~ metabolite_set_size per patient (binned)
# DT_per_patient[, Mets_in_set := as.numeric(Mets_in_set)]
# test <- copy(DT_per_patient)
# test[,Bin := "0"]
# test[Mets_in_set > 0, Bin := "0 < x ≤ 5"]
# test[Mets_in_set > 5, Bin := "5 < x ≤ 10"]
# test[Mets_in_set > 10, Bin := "10 < x ≤ 20"]
# test[Mets_in_set > 20, Bin := "20 < x ≤ 40"]
# test[Mets_in_set > 40, Bin := "40 < x ≤ 80"]
# test[Mets_in_set > 80, Bin := "80 < x ≤ 140"]
# test[Mets_in_set > 140, Bin := "140 < x ≤ 200"]
# test[Mets_in_set > 200, Bin := "200 < x ≤ 300"]
# test[Mets_in_set > 300, Bin := "300 < x"]
# test[,Bin := factor(Bin)]
# levels(test$Bin) <- c("0", "0 < x ≤ 5", "5 < x ≤ 10", "10 < x ≤ 20", "20 < x ≤ 40", "40 < x ≤ 80", "80 < x ≤ 140",
# "140 < x ≤ 200", "200 < x ≤ 300", "300 < x")
#
# p <- ggplot(test, aes(x = Bin, y = Sd_rank_excl_miss)) +
# geom_jitter(colour = "darkgreen", alpha = 0.8) +
# geom_boxplot(alpha = 0.8)
# ggsave(paste0(outdir_name,"/Sd_Abs_Rank_(NoMiss)_vs_Met_Set_Size.png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
# p <- ggplot(test, aes(x = Bin, y = Av_rank_excl_miss)) +
# geom_jitter(colour = "darkred", alpha = 0.8) +
# geom_boxplot(alpha = 0.8)
# ggsave(paste0(outdir_name,"/Av_Abs_Rank_(NoMiss)_vs_Met_Set_Size.png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
#####
# difference of #DBS on the rankings
# Determine which genes are present 1. in multiple patients and 2. with different number of DBS
# geneDBS <- unique(DT[, paste(Gene, DBS, sep = ";")])
# geneDBS <- names(which(table(unlist(lapply(geneDBS, function(x) strsplit(x, split = ";")[[1]][1]))) > 1))
#
#
# p <-ggplot(DT_per_patient[Gene %in% geneDBS & Missed == 0, ], aes(x = as.factor(DBS), y = Av_rank)) +
# geom_jitter(aes(colour = Dataset), alpha = 0.8) +
# geom_boxplot(aes(colour = Dataset), alpha = 0.8) +
# # theme(legend.position="bottom") +
# facet_wrap(. ~ Gene, scales='free_x')
# ggsave(paste0(outdir_name,"/Genes_With_Multiple_DBS.png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
#
#
# p2 <- ggplot(DT_per_patient[Gene %in% geneDBS, ], aes(x = as.factor(DBS), y = Sd_rank)) +
# geom_jitter(aes(colour = PatientID), alpha = 0.8) +
# geom_boxplot(aes(colour = PatientID), alpha = 0.8) +
# facet_wrap(. ~ Gene, scales='free_x')
#
#
# p1 <-ggplot(DT_per_patient[Gene=="CPT1A",], aes(x = as.factor(DBS), y = Av_rank)) +
# geom_jitter(aes(colour = PatientID), alpha = 0.8) +
# geom_boxplot(aes(colour = PatientID), alpha = 0.8) +
# theme(legend.position="bottom")
# p2 <- ggplot(DT_per_patient[Gene=="CPT1A",], aes(x = as.factor(DBS), y = Sd_rank)) +
# geom_jitter(aes(colour = PatientID), alpha = 0.8) +
# geom_boxplot(aes(colour = PatientID), alpha = 0.8)
#
# g_legend<-function(a.gplot){
# tmp <- ggplot_gtable(ggplot_build(a.gplot))
# leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
# legend <- tmp$grobs[[leg]]
# return(legend)}
# mylegend<-g_legend(p1)
# p3 <- gridExtra::grid.arrange(arrangeGrob(p1 + theme(legend.position="none"),
# p2 + theme(legend.position="none"),
# nrow=1),
# mylegend, nrow=2,heights=c(10, 1))
# # gridExtra::grid.arrange(p1, p2, ncol=2)
# ggsave(paste0(outdir_name,"/CPT1A_Av_Abs_Rank_And_Sd_vs_DBS.png"), plot = p3,
# width = 400, height = 260, dpi=resolution, units = "mm")
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Per patient plots -------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ##### Average rank per patient --------------------------------------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Av_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/0.25, group = 1, colour = "Genes Missed")) +
# scale_y_continuous(sec.axis = sec_axis(~.*0.25, name = "Tot.Genes Missed")) +
# ylab("Average Rank") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene rank per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/_Av_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 1000, height = 200, dpi=resolution, units = "mm")
#
#
# ##### Standard deviation of average rank per patient ----------------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Sd_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# # geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/4, group = 1, colour = "Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*4, name = "Tot.Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*4, name = "Tot.Genes Missed", breaks = seq(0, 50, 10))) +
# ylab("Sd disease gene rank") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene St.dev. per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/",train_val,"_Sd_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
#
#
# ##### Average relative rank per patient -----------------------------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Av_Rel_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# # geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/50, group = 1, colour = "Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*50, name = "Tot.Genes Missed")) +
# ylab("Average Relative Rank (rank/Tot. genes") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene rel.rank per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/",train_val,"_Av_Rel_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
#
#
# ##### Standard deviation of average relative rank per patient -------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Sd_Rel_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# # geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/800, group = 1, colour = "Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*800, name = "Tot.Genes Missed")) +
# ylab("Sd disease gene relative Rank") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene St.dev. per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/",train_val,"_Sd_Rel_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
#
#
# ##### Average relative rank per patient, reversed (1 = rank 1) ------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Av_Rev_Rel_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# # geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/50, group = 1, colour = "Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*50, name = "Tot.Genes Missed")) +
# ylab("Average reverse Relative Rank (1-((rank-1)/(Tot. genes in set-1))") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene rel.rank per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/",train_val,"_Av_Rev_Rel_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
#
#
# ##### St. dev. of average relative (reversed) rank per patient ------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Sd_Rev_Rel_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# # geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/800, group = 1, colour = "Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*800, name = "Tot.Genes Missed")) +
# ylab("Sd disease gene reverse relative Rank") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene reverse St.dev. per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/",train_val,"_Sd_Rev_Rel_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
| /src_After_Analysis/Prioritised_gene_analysis.R | permissive | UMCUGenetics/Crossomics | R | false | false | 43,685 | r | #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Info --------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# This script uses the output of Summarise_MSEA_results.R to generate graphic representations of
# the Crossomics results
# R version: 3.6.0 (2019-04-26)
# platform: x86_64-apple-darwin15.6.0 (64-bit)
# OS: macOS Mojave 10.14.6
#
# libraries:
# rstudioapi 0.10
# data.table 1.12.2
# ggplot2 3.1.1
# RColorBrewer 1.1-2
# scales 1.0.0
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Libraries ---------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
library(rstudioapi)
library(data.table)
library(RColorBrewer) # Colour schemes
library(ggplot2)
library(scales)
# library(ggforce) # zoom in specific parts of plot
library(grid) # manual grob adjustment
library(gtable) # manual grob adjustment
library(gridExtra)
library(stringr)
library(svglite)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Adjustable settings -----------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
##### Create general plot aesthetics --------------------------------------
Thresh_labs <- c("Min.-1; Max.1.5","Min.-1.5; Max.2","Min.-3; Max.3","Min.-5; Max.5")
names(Thresh_labs) <- c("-1, 1.5", "-1.5, 2", "-3, 3", "-5, 5")
Rxn_labs <- c("<= 8","<= 10","<= 12","<= 15","<= 17","<= 19")
names(Rxn_labs) <- c(8, 10, 12, 15, 17, 19)
# Colour scheme
my_greens <- rev(brewer.pal(5, "Greens"))[c(2:5)]
my_blues <- rev(brewer.pal(5, "Blues"))[c(1:5)]
my_greens <- my_blues
my_reds <- rev(brewer.pal(5, "Reds"))[c(2:5)]
my_sig_palette <- rev(brewer.pal(6, "RdYlGn"))[2:6]
my_sig_palette <- colorRamps::green2red(5)
# Image resolution
high_res <- 600
low_res <- 100
resolution <- low_res
digit_significance <- 3
##### Other ---------------------------------------------------------------
# Exclude patients / diseases
subset_patients <- FALSE
# trainingset <- "combined" # possible: TRUE, FALSE, NULL (for all data), "combined" (for all, but separated)
# Date of data
date <- "2019-12-10"
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Pre-processing ----------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
##### Read data -----------------------------------------------------------
code_dir <- paste0(dirname(rstudioapi::getActiveDocumentContext()$path),"/../Results/")
DT <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_all.RDS")))
# DT_per_parameter_val <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_validation_per_parameter.RDS")))
# DT_per_patient_val <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_validation_per_patient.RDS")))
# DT_per_parameter_tra <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_training_per_parameter.RDS")))
# DT_per_patient_tra <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_training_per_patient.RDS")))
DT_per_parameter <- readRDS(paste0(code_dir,date,"/MSEA_DT_per_parameter.RDS"))
DT_per_parameter_tra_val_sum <- readRDS(paste0(code_dir,date,"/MSEA_DT_per_parameter_tra_val_summary.RDS"))
# DT_validation_per_parameter <- readRDS(paste0(code_dir,date,"/MSEA_DT_by_validation_per_parameter.RDS"))
# test <- readRDS(paste0(code_dir,date,"/Metabolite_set_sizes.RDS"))
# DT_met_sets <- readRDS(paste0(code_dir,date,"/Metabolite_set_sizes.RDS"))
# DT_per_parameter_tra[, rank_10 := frank(-Prior.frac10)]
# #####
# # See how many disease genes have a metabolite set
# genes_w_mets <- list.files(paste0(code_dir, "../Data/2019-08-12/maxrxn19/mss_5_HMDBtranslated"))
# genes_w_mets <- str_remove(genes_w_mets, ".RDS")
#
# sum(!unique(DT[Include == TRUE, Gene]) %in% genes_w_mets)
# if(trainingset == "combined"){
# train_val <- "trainingset"
# DT1 <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_",train_val,".RDS")))
# DT1[,Set := "training"]
# train_val <- "validationset"
# DT2 <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_",train_val,".RDS")))
# DT2[,Set := "validation"]
# DT <- rbind(DT1, DT2)
# rm(DT1, DT2)
# } else {
# if(is.null(trainingset)) {
# train_val <- "all"
# DT <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_",train_val,".RDS")))
# DT[,Set := "all"]
# } else {
# train_val <- ifelse(trainingset, "trainingset", "validationset")
# DT <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_",train_val,".RDS")))
# ifelse(trainingset, DT[,Set := "training"], DT[,Set := "validation"])
# }
# }
# DT <- data.table::as.data.table(readRDS(paste0(code_dir,date,"/MSEA_DT_compiled_",train_val,".RDS")))
##### Determine parameters ------------------------------------------------
# temp$size_f = factor(temp$size, levels=c('50%','100%','150%','200%'))
# Z_thresholds <- levels(DT$Z_threshold)
# max_rxns <- levels(DT$Max_rxn)
# steps <- levels(DT$Step)
# seeds <- unique(DT$Seed)
##### Exclude predetermined patients and determine output names -----------
if(subset_patients){
patients_excluded <- c("P56","P57","P58","P59","P68")
DT <- DT[!Patient %in% patients_excluded]
sub_name <- "Sub_P"
} else {
sub_name <- "All_P"
}
outdir_name <- paste0(dirname(rstudioapi::getActiveDocumentContext()$path),"/../Plots/",date)
if (!file.exists(outdir_name))dir.create(outdir_name, recursive = TRUE)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Functions ---------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
##### Manual standard grob addition ---------------------------------------
pretty_plot <- function(p, theme = "light", secondary_y_axis = TRUE){
z <- ggplotGrob(p)
right_lab_loc <- 5 + 2*length(Thresh_labs) - secondary_y_axis
right_lab_bottom <- 6 + 2*length(Rxn_labs)
top_lab_width <- 5 + 2*length(Thresh_labs) - 2
# label right strip
z <- gtable_add_cols(z, unit(1, 'cm'), right_lab_loc)
z <- gtable_add_grob(z,
list(rectGrob(gp = gpar(col = NA, fill = ifelse(theme=="dark", "lightgray", gray(0.5)))),
textGrob("Extension stringency", rot = -90, gp = gpar(col = ifelse(theme=="dark", "black", "white")))),
# 8, 15, 18, 15, name = paste(runif(2)))
8, right_lab_loc+1, right_lab_bottom, right_lab_loc+1, name = paste(runif(2)))
# label top strip
z <- gtable_add_rows(z, unit(1, 'cm'), 6)
z <- gtable_add_grob(z,
list(rectGrob(gp = gpar(col = NA, fill = ifelse(theme=="dark", "lightgray", gray(0.5)))),
textGrob("Biochemical stringency", gp = gpar(col = ifelse(theme=="dark", "black", "white")))),
# 4, 5, 4, 13, name = paste(runif(2)))
7, 5, 7, top_lab_width, name = paste(runif(2)))
# margins
# z <- gtable_add_cols(z, unit(1/8, "line"), 7)
# z <- gtable_add_rows(z, unit(1/8, "line"), 3)
return(z)
}
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Simple plots, meant to use as explaning for the bigger, later plots -----
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Empty facet plot
p <- ggplot(DT_per_parameter, aes(x = Step)) +
facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
theme_dark() +
ylab("Disease genes / Total dis. genes") +
xlab("Max distance to primary reaction") +
ggtitle("Correct disease gene prioritisation") +
scale_color_manual(name = "Prioritised Genes",
labels = c("In Top 15", "Missed"),
values=c(my_greens[1],'RED'))
z <- pretty_plot(p, theme = "dark")
ggsave(paste0(outdir_name,"/Empty_Facet_",sub_name,".png"), plot = z,
width = 250, height = 200, dpi=resolution, units = "mm")
# Simple plot, just top 10, all parameter combinations
p <- ggplot(DT_per_parameter, aes(x = Step)) +
geom_line(aes(y = Prior.frac10, colour = "line10", group = 1)) +
geom_point(aes(y = Prior.frac10, colour = "line10"), size=0.5) +
geom_line(aes(y = Missed.frac, colour = "Missed", group = 1)) +
geom_point(aes(y = Missed.frac, colour = "Missed"), size=0.5) +
facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
theme_dark() +
ylab("Disease genes / Total dis. genes") +
xlab("Max distance to primary reaction") +
ggtitle("Correct disease gene prioritisation") +
scale_color_manual(name = "Prioritised Genes",
labels = c("In Top 10", "Missed"),
values=c(my_greens[4],'RED'))
z <- pretty_plot(p, theme = "dark")
ggsave(paste0(outdir_name,"/",train_val,"_Ranks_And_Missed_Top15_",sub_name,".png"), plot = z,
width = 300, height = 200, dpi=resolution, units = "mm")
# Single panel from big facet plot (top 2, 5, 10 and 15)
p <- ggplot(DT_per_parameter[Z_threshold == "-3, 3" & Max_rxn == 15], aes(x = Step)) +
geom_line(aes(y = Prior.frac15, colour = "line15", group = 1)) +
# geom_ribbon(aes(x= as.numeric(Step), ymin=1*15/Max_Tot.Genes, ymax=1*15/Min_Tot.Genes, fill = "band15")) +
# geom_ribbon(aes(x= as.numeric(Step), ymin=1*10/Max_Tot.Genes, ymax=1*10/Min_Tot.Genes, fill = "band10")) +
# geom_ribbon(aes(x= as.numeric(Step), ymin=1*5/Max_Tot.Genes, ymax=1*5/Min_Tot.Genes, fill = "band05")) +
# geom_ribbon(aes(x= as.numeric(Step), ymin=1*2/Max_Tot.Genes, ymax=1*2/Min_Tot.Genes, fill = "band02")) +
geom_line(aes(y = Prior.frac15, colour = "line15", group = 1), size = 1.3) +
geom_point(aes(y = Prior.frac15, colour = "line15"), size=1) +
geom_line(aes(y = Prior.frac10, colour = "line10", group = 1), size = 1.3) +
geom_point(aes(y = Prior.frac10, colour = "line10"), size=1) +
geom_line(aes(y = Prior.frac05, colour = "line05", group = 1), size = 1.3) +
geom_point(aes(y = Prior.frac05, colour = "line05"), size=1) +
geom_line(aes(y = Prior.frac02, colour = "line02", group = 1), size = 1.3) +
geom_point(aes(y = Prior.frac02, colour = "line02"), size=1) +
geom_line(aes(y = Missed.frac, colour = "missed", group = 1), size = 1.3) +
geom_point(aes(y = Missed.frac, colour = "missed"), size=1)
p <- p + theme_dark() +
ylab("Disease genes / Total dis. genes") +
xlab("Max distance to primary reaction") +
ylim(c(0,1)) +
ggtitle("Correct disease gene prioritisation") +
scale_color_manual(name = "Prioritised Genes",
labels = c("In Top 2", "In Top 5", "In Top 10", "In Top 15", "Missed"),
values=c(my_greens,'RED')) +
scale_fill_manual(name = "Prioritised if random Gene",
labels = c("In Top 2", "In Top 5", "In Top 10", "In Top 15", "Missed"),
values=c(my_greens)) +
guides(colour = guide_legend(order = 1, reverse = TRUE),
fill = guide_legend(order = 2, reverse = TRUE))
# colour = adjustcolor(my_greens[4],alpha.f=0.5), fill = my_greens[4], alpha="0.5"
ggsave(paste0(outdir_name,"/",train_val,"_Ranks_And_Missed_Single_Par_Comb_",sub_name,".png"), plot = p,
width = 300, height = 200, dpi=resolution, units = "mm")
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Combination plot of correctly prioritised and missed genes --------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#####
# Disease gene prioritization per parameter, separated by training/validation runs/patients
p <- ggplot(DT_per_parameter_tra_val_sum, aes(x = Step)) +
geom_line(data = DT_per_parameter_tra_val_sum[Validation == TRUE, ], aes(y = Av.prior.frac10, colour = "Validation", group = "Validation")) +
geom_errorbar(data = DT_per_parameter_tra_val_sum[Validation == TRUE, ],
aes(ymax = Av.prior.frac10 + Sd.prior.frac10,
ymin = Av.prior.frac10 - Sd.prior.frac10,
colour = "Validation"),
width = 0.4) +
geom_line(data = DT_per_parameter_tra_val_sum[Validation == FALSE, ], aes(y = Av.prior.frac10, colour = "Training", group = "Training")) +
geom_errorbar(data = DT_per_parameter_tra_val_sum[Validation == FALSE, ],
aes(ymax = Av.prior.frac10 + Sd.prior.frac10,
ymin = Av.prior.frac10 - Sd.prior.frac10,
colour = "Training"),
width = 0.4) +
geom_line(data = DT_per_parameter, aes(y = Missed.frac, colour = "Missed", group = 1)) +
geom_point(data = DT_per_parameter,aes(y = Missed.frac, colour = "Missed"), size=0.5) +
facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
theme_light() +
ylab("Disease genes / Total dis. genes") +
xlab("Max distance to primary reaction") +
ggtitle("Correct disease gene prioritisation") +
ylim(0,1)
for(i in c(1:5)){
p <- p + geom_point(data = DT_per_parameter_tra_val_sum[Validation == FALSE & as.vector(DT_per_parameter_tra_val_sum[Validation == FALSE, "best_order_top10"]==i),],
aes(x=Step, y=Av.prior.frac10, shape = "Best"),
shape = "*",
size=8)
p <- p + geom_text(data = DT_per_parameter_tra_val_sum[Validation == FALSE & as.vector(DT_per_parameter_tra_val_sum[Validation == FALSE, "best_order_top10"]==i),],
aes(x=Step, y=Av.prior.frac10, label = signif(Av.prior.frac10, digits = digit_significance), group = best_order_top10),
size=3,
colour = my_sig_palette[i],
position = position_dodge(width = 2),
vjust = -0.8)
}
z <- pretty_plot(p, theme = "light")
ggsave(paste0(outdir_name,"/CorrectPrior_top10_tra_val_",sub_name,".svg"), plot = z,
width = 300, height = 200, units = "mm")
#####
# Disease gene prioritization per parameter, all runs/patients averaged to one value
p <- ggplot(DT_per_parameter, aes(x = Step)) +
geom_line(aes(y = Missed.frac, colour = "Missed", group = 1)) +
geom_point(aes(y = Missed.frac, colour = "Missed"), size=0.5) +
geom_line(aes(y = Prior.frac10, colour = "line10", group = 1)) +
geom_point(aes(y = Prior.frac10, colour = "line10"), size=0.5) +
facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
theme_light() +
ylab("Disease genes / Total dis. genes") +
xlab("Max distance to primary reaction") +
ggtitle("Correct disease gene prioritisation") +
scale_color_manual(name = "Prioritised Genes",
labels = c("In Top 10", "Missed"),
values=c(my_greens[1],'RED')) +
ylim(0,1)
for(i in c(1:5)){
p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[, "best_order_top10"]==i),],
aes(x=Step, y=Prior.frac10, shape = "Best"),
shape = "*",
size=8)
p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[, "best_order_top10"]==i),],
aes(x=Step, y=Prior.frac10, label = signif(Prior.frac10, digits = digit_significance), group = best_order_top10),
size=3,
colour = my_sig_palette[i],
position = position_dodge(width = 2),
vjust = -0.8)
}
z <- pretty_plot(p, theme = "light")
ggsave(paste0(outdir_name,"/CorrectPrior_top10_all_",sub_name,".svg"), plot = z,
width = 300, height = 200, units = "mm")
# p <- ggplot(DT_per_parameter, aes(x = Step)) +
# geom_line(aes(y = Prior.frac05, colour = "line05", group = 1)) +
# geom_point(aes(y = Prior.frac05, colour = "line05"), size=0.5) +
# geom_line(aes(y = Prior.frac10, colour = "line10", group = 1)) +
# geom_point(aes(y = Prior.frac10, colour = "line10"), size=0.5) +
# geom_line(aes(y = Missed.frac, group = 1, linetype="Training"), colour = my_reds[1]) +
# geom_point(aes(y = Missed.frac), size=0.5, colour = my_reds[1])
# # geom_hline(data = DT_per_parameter[Set == "training"], aes(yintercept = max(Prior.frac05), size = "Training", colour="line05t"), alpha = 0.8, linetype = "dashed") +
# # geom_hline(data = DT_per_parameter[Set == "validation"], aes(yintercept = max(Prior.frac05), size = "Validation", colour="line05v"), alpha = 0.8, linetype = "dashed")
# p <- p + facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- p + theme_light() +
# ylab("Ratio [rank disease gene] / [total genes]") +
# # ylab(expression(paste("Ratio of: ", frac(`disease genes`,`total disease genes`)))) +
# xlab("Maximum distance to primary reaction") +
# ylim(0, 1) +
# # ggtitle("Performance of disease gene prioritization") +
# scale_color_manual(name = NULL,
# labels = c("Correctly prioritized genes (top 5)","Correctly prioritized genes (top 10)"),
# values= c(my_greens[3], my_greens[2]),
# guide = guide_legend(override.aes = list(linetype = "solid",
# shape = 16),
# order = 1)
# ) +
# scale_linetype_manual(name = NULL,
# labels = "Missed genes",
# values = "solid",
# guide = guide_legend(override.aes = list(colour = my_reds[3]),
# order = 2)
# )
# pp <- pretty_plot(p, theme = "light")
# ggsave(paste0(outdir_name,"/",train_val,"_Ranks_And_Missed_",sub_name,".png"), plot = pp,
# width = 300, height = 200, dpi=resolution, units = "mm")
# Some example plot for making multiple legends out of 1 aesthetic
# ggplot(data=dfr, mapping=aes(x=id, y=value)) +
# geom_line(mapping=aes(colour=group), show_guide=TRUE) +
# geom_hline(
# mapping=aes(yintercept=c(-1,1)*qnorm(0.95), fill="95% CI"),
# color="orange"
# ) +
# geom_hline(
# mapping=aes(yintercept=c(-1,1)*qnorm(0.99), fill="99% CI"),
# color="darkred"
# ) +
# scale_color_hue("Group", guide=guide_legend(order=1)) +
# scale_fill_manual("CI horizontal line", values=rep(1,4),
# guide=guide_legend(
# override.aes = list(colour=c("orange", "darkred")),
# order=2
# ),
# labels=c("CI of 95%", "CI of 99%")
# )
# ##### Average rank non-missed genes + total missed genes ------------------
# p <- ggplot(DT_per_parameter, aes(label=Av_non_missed)) +
# geom_line(aes(x = Step, y = Av_non_missed, colour = "Average", group = 1), size = 1.3) +
# geom_point(aes(x = Step, y = Av_non_missed, colour = "Average"), size=0.5) +
# # geom_line(aes(x = Step, y = Missed.frac*15, colour = "Frac.Missed", group = 1), size = 1.3) +
# # geom_point(aes(x = Step, y = Missed.frac*15, colour = "Frac.Missed"), size=0.5) +
# # scale_y_continuous(sec.axis = sec_axis(~./15, name = "Frac. genes Missed", breaks = c(0, 0.5, 1))) +
# theme_dark() +
# scale_color_manual(name = "",
# labels = c("Av. rank of non-missed genes", "Frac. Missed"),
# values=c('Black', my_greens[3]))
# p <- p + facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# # geom_point(data = DT_per_parameter[best_av50==TRUE, ],aes(x=Step, y=Av_top50),shape = "*", size=8, show.legend = FALSE, colour = "black")+
# p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_NM"]==1),],aes(x=Step, y=Av_non_missed, shape = "Best"), size=8, ) +
# scale_shape_manual(name = "",
# labels = "Best param.\nranks (missed \nfrac. <0.5)",
# values = 42)
# # Annotate the 5 best scoring parameter combinations (determined ~l.280)
# for(i in c(1:5)){
# p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_NM"]==i),],aes(x=Step, y=Av_non_missed, shape = "Best"), shape = "*", size=8, colour = my_sig_palette[i])
# p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_NM"]==i),],
# aes(x=Step, y=Av_non_missed, label = signif(Av_non_missed, digits = digit_significance), group = best_order_NM),
# size=3,
# # show.legend = FALSE,
# colour = my_sig_palette[i],
# position = position_dodge(width = 2),
# vjust = -0.5)
# }
# y_val <- ggplot_build(p)$layout$panel_scales_y[[1]]$range$range[2]
# p <- p +
# ylab("Average non-missed disease gene rank") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Av. gene rank for non-missed genes") +
# # geom_hline(yintercept=y_val/2, linetype="dashed", color = "salmon") +
# guides(shape = guide_legend(order = 1),
# colour = guide_legend(order = 2))
# pp <- pretty_plot(p, theme = "dark")
# ggsave(paste0(outdir_name,"/Best_Ranks_With_>0.5_Non_Missed_",sub_name,".png"), plot = pp,
# width = 300, height = 200, dpi=resolution, units = "mm")
##### all ranks, average rank and average standard deviation --------------
# DT_no_missed <- tmpDT[P.value < 1,]
# DT_missed <- tmpDT[P.value == 1,]
# p <- ggplot(DT_no_missed, aes(x = Step)) +
# # geom_point(aes(y = Position), position=position_dodge(width = 5.5)) +
# geom_jitter(aes(y = Position, colour = "Per-patient rank"), size = 0.005) +
# geom_jitter(data =DT_missed, aes(x = Step, y = Position, fill = "Missed genes"), colour = "blue", size = 0.005, alpha = 0.2) +
# geom_line(data = DT_per_parameter, aes(x = Step, y = Av_non_missed, group = 1, colour = "Average rank")) +
# geom_errorbar(data = DT_per_parameter, aes(x = Step,
# ymax = Av_non_missed + Av_Sd_excl_miss,
# ymin = Av_non_missed - Av_Sd_excl_miss,
# colour = "Average rank"),
# # colour = "cornflowerblue",
# width = 0.5) +
# ylim(c(0,40)) +
# geom_hline(aes(yintercept = 5), colour = "darksalmon", linetype="dashed") +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
# theme_dark() +
# ylab("Disease gene rank") +
# xlab("Max distance to primary reaction") +
# ggtitle("Stability of method") +
# geom_point(data = DT_per_parameter[DT_per_parameter$best_order_top05 ==1, ], aes(x=Step, y=35, shape = "Best"), size=8) +
# scale_shape_manual(name = "",
# labels = "Ratio dis.genes\nin top 5",
# values = 42)
# for(i in c(1:5)){
# p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05"]==i),],aes(x=Step, y=35, shape = "Best"), shape = "*", size=8)
# p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05"]==i),],
# aes(x=Step, y=35, label = signif(Prior.frac05, digits = digit_significance), group = best_order_top05),
# size=3,
# # show.legend = FALSE,
# colour = my_sig_palette[6-i],
# position = position_dodge(width = 2),
# vjust = -0.5)
# }
# p <- p + scale_color_manual(name = "Non-missed genes",
# labels = c("Average rank", "Per-patient rank"),
# values=c("red","black")) +
# scale_fill_manual(name = "Missed genes",
# labels = c("Per-patient rank"),
# values=c("blue"))
# p <- pretty_plot(p, secondary_y_axis = FALSE, theme = "dark")
# ggsave(paste0(outdir_name,"/",train_val,"_Average_Patient_And_Ranks_And_Missed_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
# ##### all ranks, average rank and average standard deviation --------------
# # p <- ggplot(tmpDT[Missed == FALSE], aes(x = Step)) +
# p <- ggplot(DT_per_parameter, aes(x = Step)) +
# # geom_point(aes(y = Position), position=position_dodge(width = 5.5)) +
# # geom_jitter(data = tmpDT, aes(x = Step, y = Position, colour = Missed), size = 0.005, alpha = 0.2) +
# # geom_jitter(data = DT, aes(y = Position, colour = Missed), size = 0.005, alpha = 0.2) +
# # geom_jitter(aes(y = Position, colour = "Per-patient rank"), size = 0.005) +
# # geom_jitter(data =DT_test[Missed == TRUE], aes(x = Step, y = Position, fill = "Missed genes"), colour = "blue", size = 0.005, alpha = 0.2) +
# geom_line(aes(x = Step, y = Av_non_missed, group = 1, colour = "Average rank")) +
# geom_errorbar(aes(x = Step,
# ymax = Av_non_missed + Sd_non_missed,
# ymin = Av_non_missed - Sd_non_missed,
# colour = "Average rank"),
# # colour = "cornflowerblue",
# width = 0.5) +
# ylim(c(-5,35)) +
# geom_hline(aes(yintercept = 5), colour = "darksalmon", linetype="dashed") +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs)) +
# theme_dark() +
# ylab("Disease gene rank") +
# xlab("Max distance to primary reaction") +
# ggtitle("Gene prioritization performance") +
# geom_point(data = DT_per_parameter[DT_per_parameter$best_order_top05 ==1, ], aes(x=Step, y=35, shape = "Best"), size=8) +
# scale_shape_manual(name = "",
# labels = "Best ratios dis.genes\nin top 5",
# values = 42)
# for(i in c(1:5)){
# # p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05_training"]==i),],aes(x=Step, y=35, shape = "Best"), shape = "*", size=8)
# # p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05_training"]==i),],
# # aes(x=Step, y=35, label = signif(Prior.frac05_training, digits = digit_significance), group = best_order_top05_training),
# # size=3,
# # # show.legend = FALSE,
# # colour = "black",
# # position = position_dodge(width = 2),
# # vjust = -0.5)
# # p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05_validation"]==i),],aes(x=Step, y=35, shape = "Best"), shape = "*", size=8)
# # p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05_validation"]==i),],
# # aes(x=Step, y=35, label = signif(Prior.frac05_validation, digits = digit_significance), group = best_order_top05_validation),
# # size=3,
# # # show.legend = FALSE,
# # colour = "black",
# # position = position_dodge(width = 2),
# # vjust = -0.5)
# p <- p + geom_point(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05"]==i),],aes(x=Step, y=25, shape = "Best"), shape = "*", size=8)
# p <- p + geom_text(data = DT_per_parameter[as.vector(DT_per_parameter[,"best_order_top05"]==i),],
# aes(x=Step, y=25, label = signif(Prior.frac05, digits = digit_significance), group = best_order_top05),
# size=3,
# # show.legend = FALSE,
# colour = my_sig_palette[6-i],
# position = position_dodge(width = 2),
# vjust = -0.5)
# }
# p <- p + scale_color_manual(name = "Gene ranks",
# labels = c("Average non-missed", "Per patient", "Missed per patient"),
# values=c("red","black","blue"))
# p <- p + guides(shape = guide_legend(override.aes = list(size = 5)),
# color = guide_legend(override.aes = list(linetype=c(1,NA,NA),
# shape=c(NA,16,16),
# size = c(0.5,2,2),
# alpha = c(NA,1,1))))
# pp <- pretty_plot(p, secondary_y_axis = FALSE, theme = "dark")
# # ggsave(paste0(outdir_name,"/",train_val,"_Average_Patient_And_Ranks_And_Missed_",sub_name,".png"), plot = pp,
# # width = 300, height = 200, dpi=resolution, units = "mm")
# ggsave(paste0(outdir_name,"/test_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
##### Boxplot of a single panel ---------------------------------------
# nm <- 0
# DT[,Patient_follow_number := 0]
# DT[, c("Min_Rank_Patient","Max_Rank_Patient") := list(min(Position),max(Position)), by = .(Step, Z_threshold, Max_rxn, PatientID)]
# for(i in unique(DT$PatientID)){
# nm <- nm + 1
# DT[PatientID == i, Patient_follow_number := nm]
# }
# # Relative ranks
# # p <- ggplot(DT[Z_threshold == "-3, 3" & Max_rxn == 10 & P.value != 1 & Step == 5], aes(x = Patient_follow_number, y = Rank.frac)) +
# p <- ggplot(DT[Z_threshold == "-3, 3" & Max_rxn == 10 & P.value != 1 & Step == 5], aes(x = PatientID, y = Position/201)) +
# geom_boxplot(aes(fill = Patient_follow_number, group = PatientID)) + theme(legend.position = "none") +
# geom_hline(yintercept = 11/201, linetype = "dashed")
# # geom_ribbon(aes(x = Patient_follow_number, ymin=11/201,ymax=11/Min_Rank_Patient),alpha=0.2, fill = "yellow") +
# # geom_ribbon(aes(x = Patient_follow_number, ymin=11/201,ymax=0.6),alpha=0.2, fill = "red") +
# # geom_ribbon(aes(x = Patient_follow_number, ymin = 0, ymax=6/201),alpha=0.2, fill = "green") +
# # theme(axis.text.x = element_text(size = 6, angle = 60, hjust = 1)) +
# # ylim(0,1) +
# # scale_x_continuous(labels = unique(DT[Z_threshold == "-3, 3" & Max_rxn == 10 & P.value != 1 & Step == 5, PatientID]),
# # breaks = unique(DT[Z_threshold == "-3, 3" & Max_rxn == 10 & P.value != 1 & Step == 5,Patient_follow_number]),
# # name = "Patient ID")
# ggsave(paste0(outdir_name,"/",train_val,"Relative_Rank_",sub_name,".png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
# Absolute ranks
tmpDT <- DT[Z_threshold == "-3, 3" & Max_rxn == 15 & P.value != 1 & Step == 4 & Include == TRUE]
tmpDT[ , P.value := as.double(P.value)]
tmpDT <- tmpDT[order(tmpDT[, Gene]),]
tmpDT[, PatientID := paste(Gene, PatientID, sep = "^")]
disease_genes <- NULL
for(i in unique(tmpDT[,PatientID])){
tmp_gene <- unique(tmpDT[PatientID == i, Gene])
cat(tmp_gene, i, "\n")
disease_genes <- c(disease_genes, tmp_gene)
}
p <- ggplot(tmpDT, aes(x = PatientID, y = Position)) +
# geom_boxplot(aes(fill = P.colour, group = PatientID)) +
# geom_boxplot(aes(fill = DBS, group = PatientID)) +
# geom_boxplot(aes(fill = log(P.value), group = PatientID)) +
# geom_boxplot(aes(fill = P.value, group = PatientID)) +
# scale_fill_gradient(high = "#c92222", low = "#22c939") +
geom_boxplot(aes(group = PatientID)) +
# theme(legend.position = "none") +
# geom_boxplot(aes(fill = Patient_follow_number, group = PatientID)) + theme(legend.position = "none") +
geom_hline(yintercept = 10, linetype = "dashed") +
# add DBS to plot
# geom_point(aes(x = Patient_follow_number, y = DBS*5), shape = 8, colour = 'red', alpha = 0.8) +
theme(axis.text.x = element_text(size = 6, angle = 60, hjust = 1))+
ylab("Disease gene rank") +
scale_x_discrete(labels = disease_genes,
name = "Disease genes of individual patients")
ggsave(paste0(outdir_name,"/1Par_Absolute_Rank_",sub_name,".svg"), plot = p,
width = 400, height = 150, units = "mm")
# # visualise Sd_rank & Av_rank ~ DBS per patient
# DT_per_patient[, DBS := factor(DBS)]
# p <- ggplot(DT_per_patient, aes(DBS, Sd_rank_excl_miss)) +
# geom_jitter(colour = "darkgreen", alpha = 0.8) +
# geom_boxplot(alpha = 0.8)
# ggsave(paste0(outdir_name,"/Sd_Abs_Rank_(NoMiss)_vs_DBS.png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
#
# p <- ggplot(DT_per_patient, aes(DBS, Av_rank_excl_miss)) +
# geom_jitter(colour = "darkred", alpha = 0.8) +
# geom_boxplot(alpha = 0.8)
# ggsave(paste0(outdir_name,"/Av_Abs_Rank_(NoMiss)_vs_DBS.png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
#
# # visualise Sd_rank & Av_rank ~ Dataset/Run
# DT_per_patient[, Dataset := factor(Dataset)]
# ggplot(DT_per_patient, aes(Dataset, Sd_rank_excl_miss)) +
# geom_jitter(colour = "darkgreen", alpha = 0.8) +
# geom_boxplot(alpha = 0.8) +
# theme(axis.text.x = element_text(size = 6, angle = 45, hjust = 1))
# ggplot(DT_per_patient, aes(Dataset, Av_rank_excl_miss)) +
# geom_jitter(colour = "darkred", alpha = 0.8) +
# geom_boxplot(aes(fill = Set), alpha = 0.8) +
# theme(axis.text.x = element_text(size = 6, angle = 45, hjust = 1))
#
# # visualise Sd_rank & Av_rank ~ metabolite_set_size per patient (unbinned)
# ggplot(DT_per_patient,aes(Mets_in_set, Sd_rank_excl_miss)) +
# geom_point()
# ggplot(DT_per_patient,aes(Mets_in_set, Av_rank_excl_miss)) +
# geom_point()
#
# # visualise Sd_rank & Av_rank ~ metabolite_set_size per patient (binned)
# DT_per_patient[, Mets_in_set := as.numeric(Mets_in_set)]
# test <- copy(DT_per_patient)
# test[,Bin := "0"]
# test[Mets_in_set > 0, Bin := "0 < x ≤ 5"]
# test[Mets_in_set > 5, Bin := "5 < x ≤ 10"]
# test[Mets_in_set > 10, Bin := "10 < x ≤ 20"]
# test[Mets_in_set > 20, Bin := "20 < x ≤ 40"]
# test[Mets_in_set > 40, Bin := "40 < x ≤ 80"]
# test[Mets_in_set > 80, Bin := "80 < x ≤ 140"]
# test[Mets_in_set > 140, Bin := "140 < x ≤ 200"]
# test[Mets_in_set > 200, Bin := "200 < x ≤ 300"]
# test[Mets_in_set > 300, Bin := "300 < x"]
# test[,Bin := factor(Bin)]
# levels(test$Bin) <- c("0", "0 < x ≤ 5", "5 < x ≤ 10", "10 < x ≤ 20", "20 < x ≤ 40", "40 < x ≤ 80", "80 < x ≤ 140",
# "140 < x ≤ 200", "200 < x ≤ 300", "300 < x")
#
# p <- ggplot(test, aes(x = Bin, y = Sd_rank_excl_miss)) +
# geom_jitter(colour = "darkgreen", alpha = 0.8) +
# geom_boxplot(alpha = 0.8)
# ggsave(paste0(outdir_name,"/Sd_Abs_Rank_(NoMiss)_vs_Met_Set_Size.png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
# p <- ggplot(test, aes(x = Bin, y = Av_rank_excl_miss)) +
# geom_jitter(colour = "darkred", alpha = 0.8) +
# geom_boxplot(alpha = 0.8)
# ggsave(paste0(outdir_name,"/Av_Abs_Rank_(NoMiss)_vs_Met_Set_Size.png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
#####
# difference of #DBS on the rankings
# Determine which genes are present 1. in multiple patients and 2. with different number of DBS
# geneDBS <- unique(DT[, paste(Gene, DBS, sep = ";")])
# geneDBS <- names(which(table(unlist(lapply(geneDBS, function(x) strsplit(x, split = ";")[[1]][1]))) > 1))
#
#
# p <-ggplot(DT_per_patient[Gene %in% geneDBS & Missed == 0, ], aes(x = as.factor(DBS), y = Av_rank)) +
# geom_jitter(aes(colour = Dataset), alpha = 0.8) +
# geom_boxplot(aes(colour = Dataset), alpha = 0.8) +
# # theme(legend.position="bottom") +
# facet_wrap(. ~ Gene, scales='free_x')
# ggsave(paste0(outdir_name,"/Genes_With_Multiple_DBS.png"), plot = p,
# width = 400, height = 260, dpi=resolution, units = "mm")
#
#
# p2 <- ggplot(DT_per_patient[Gene %in% geneDBS, ], aes(x = as.factor(DBS), y = Sd_rank)) +
# geom_jitter(aes(colour = PatientID), alpha = 0.8) +
# geom_boxplot(aes(colour = PatientID), alpha = 0.8) +
# facet_wrap(. ~ Gene, scales='free_x')
#
#
# p1 <-ggplot(DT_per_patient[Gene=="CPT1A",], aes(x = as.factor(DBS), y = Av_rank)) +
# geom_jitter(aes(colour = PatientID), alpha = 0.8) +
# geom_boxplot(aes(colour = PatientID), alpha = 0.8) +
# theme(legend.position="bottom")
# p2 <- ggplot(DT_per_patient[Gene=="CPT1A",], aes(x = as.factor(DBS), y = Sd_rank)) +
# geom_jitter(aes(colour = PatientID), alpha = 0.8) +
# geom_boxplot(aes(colour = PatientID), alpha = 0.8)
#
# g_legend<-function(a.gplot){
# tmp <- ggplot_gtable(ggplot_build(a.gplot))
# leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
# legend <- tmp$grobs[[leg]]
# return(legend)}
# mylegend<-g_legend(p1)
# p3 <- gridExtra::grid.arrange(arrangeGrob(p1 + theme(legend.position="none"),
# p2 + theme(legend.position="none"),
# nrow=1),
# mylegend, nrow=2,heights=c(10, 1))
# # gridExtra::grid.arrange(p1, p2, ncol=2)
# ggsave(paste0(outdir_name,"/CPT1A_Av_Abs_Rank_And_Sd_vs_DBS.png"), plot = p3,
# width = 400, height = 260, dpi=resolution, units = "mm")
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Per patient plots -------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# ##### Average rank per patient --------------------------------------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Av_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/0.25, group = 1, colour = "Genes Missed")) +
# scale_y_continuous(sec.axis = sec_axis(~.*0.25, name = "Tot.Genes Missed")) +
# ylab("Average Rank") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene rank per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/_Av_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 1000, height = 200, dpi=resolution, units = "mm")
#
#
# ##### Standard deviation of average rank per patient ----------------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Sd_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# # geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/4, group = 1, colour = "Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*4, name = "Tot.Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*4, name = "Tot.Genes Missed", breaks = seq(0, 50, 10))) +
# ylab("Sd disease gene rank") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene St.dev. per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/",train_val,"_Sd_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
#
#
# ##### Average relative rank per patient -----------------------------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Av_Rel_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# # geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/50, group = 1, colour = "Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*50, name = "Tot.Genes Missed")) +
# ylab("Average Relative Rank (rank/Tot. genes") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene rel.rank per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/",train_val,"_Av_Rel_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
#
#
# ##### Standard deviation of average relative rank per patient -------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Sd_Rel_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# # geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/800, group = 1, colour = "Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*800, name = "Tot.Genes Missed")) +
# ylab("Sd disease gene relative Rank") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene St.dev. per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/",train_val,"_Sd_Rel_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
#
#
# ##### Average relative rank per patient, reversed (1 = rank 1) ------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Av_Rev_Rel_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# # geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/50, group = 1, colour = "Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*50, name = "Tot.Genes Missed")) +
# ylab("Average reverse Relative Rank (1-((rank-1)/(Tot. genes in set-1))") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene rel.rank per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/",train_val,"_Av_Rev_Rel_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
#
#
# ##### St. dev. of average relative (reversed) rank per patient ------------
# p <- ggplot(data = DT_per_patient, aes(x = Step, y = Sd_Rev_Rel_rank)) +
# geom_bar(position = "dodge", stat="identity", aes(fill = PatientID)) +
# # geom_line(data = DT_per_parameter, aes(x = Step, y = Missed/800, group = 1, colour = "Genes Missed")) +
# # scale_y_continuous(sec.axis = sec_axis(~.*800, name = "Tot.Genes Missed")) +
# ylab("Sd disease gene reverse relative Rank") +
# xlab("Max. distance to primary reaction") +
# ggtitle("Disease gene reverse St.dev. per patient & parameter combination") +
# labs(colour = "", fill = "Patients") +
# guides(colour = guide_legend(order = 1),
# fill = guide_legend(order = 2)) +
# facet_grid(Max_rxn ~ Z_threshold, labeller = labeller(Max_rxn = Rxn_labs, Z_threshold = Thresh_labs))
# p <- pretty_plot(p)
# ggsave(paste0(outdir_name,"/",train_val,"_Sd_Rev_Rel_Rank_Per_Patient_",sub_name,".png"), plot = p,
# width = 300, height = 200, dpi=resolution, units = "mm")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/on_base_percentage_formula.R
\name{obp_formula}
\alias{obp_formula}
\title{On-base Percentage formula}
\usage{
obp_formula(data)
}
\arguments{
\item{data}{KBO batter data}
}
\value{
OBP from data
}
\description{
Calculating OBP from data
}
\examples{
obp_formula(hanhwa_batter_2018)
}
| /man/obp_formula.Rd | permissive | choosunsick/kbodatamining | R | false | true | 363 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/on_base_percentage_formula.R
\name{obp_formula}
\alias{obp_formula}
\title{On-base Percentage formula}
\usage{
obp_formula(data)
}
\arguments{
\item{data}{KBO batter data}
}
\value{
OBP from data
}
\description{
Calculating OBP from data
}
\examples{
obp_formula(hanhwa_batter_2018)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-animals.R
\docType{data}
\name{animal_7}
\alias{animal_7}
\title{Zonotrichia Atricapilla}
\format{
list of igraph objects
}
\source{
https: //bansallab.github.io/asnr/
}
\usage{
animal_7
}
\description{
Species: \emph{Zonotrichia atricapilla}
Taxonomic class: Aves
Population type: free-ranging
Geographical location: California, USA
Data collection technique: survey scan
Interaction type: group membership
Definition of interaction: A flock was defined as a group of birds within an approximately 5-metre radius. Social networks of flock comembership was constructed where nodes represent individual birds and edges represent the simple ratio association index
Edge weight type: simple_ratio_index
Total duration of data collection: 3 months
Time resolution of data collection (within a day):
Time span of data collection (within a day): focal follow/ad libitum
Note: Two networks collected over two consecutive years
}
\references{
Arnberg, Nina N., et al. "Social network structure in wintering golden‐crowned sparrows is not correlated with kinship." Molecular ecology 24.19 (2015): 5034-5044.
}
\keyword{datasets}
| /man/animal_7.Rd | permissive | schochastics/networkdata | R | false | true | 1,219 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-animals.R
\docType{data}
\name{animal_7}
\alias{animal_7}
\title{Zonotrichia Atricapilla}
\format{
list of igraph objects
}
\source{
https: //bansallab.github.io/asnr/
}
\usage{
animal_7
}
\description{
Species: \emph{Zonotrichia atricapilla}
Taxonomic class: Aves
Population type: free-ranging
Geographical location: California, USA
Data collection technique: survey scan
Interaction type: group membership
Definition of interaction: A flock was defined as a group of birds within an approximately 5-metre radius. Social networks of flock comembership was constructed where nodes represent individual birds and edges represent the simple ratio association index
Edge weight type: simple_ratio_index
Total duration of data collection: 3 months
Time resolution of data collection (within a day):
Time span of data collection (within a day): focal follow/ad libitum
Note: Two networks collected over two consecutive years
}
\references{
Arnberg, Nina N., et al. "Social network structure in wintering golden‐crowned sparrows is not correlated with kinship." Molecular ecology 24.19 (2015): 5034-5044.
}
\keyword{datasets}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 545
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 6
c
c Performing E1-Autarky iteration.
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_query50_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 413
c no.of clauses 545
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 0
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_query50_1344.qdimacs 413 545 E1 [3 5 7 8 9 10 11 12 13 14 15 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 52 53 54 55 56 57 58 59 61 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 1 6 195 196 197 198] 0 0 0 0 SAT
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_query50_1344/query24_query50_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 2,254 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 545
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 6
c
c Performing E1-Autarky iteration.
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_query50_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 413
c no.of clauses 545
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 0
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_query50_1344.qdimacs 413 545 E1 [3 5 7 8 9 10 11 12 13 14 15 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 52 53 54 55 56 57 58 59 61 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 1 6 195 196 197 198] 0 0 0 0 SAT
|
#Q1-1
seasons<-matrix(c("봄","여름","가을","겨울"),nrow=2)
seasons
#Q1-2
seasons<-matrix(c("봄","여름","가을","겨울"),nrow=2,byrow=T)
seasons
#Q2
seasons[,2]
#Q3
seasons_2<-rbind(seasons,c("초봄","초가을"))
seasons_2
#Q4
seasons_3<-cbind(seasons_2,c("초여름","초겨울","한겨울"))
seasons_3
#Q1
m<-matrix(c(1,5,2,6,3,7,4,8),nrow=4)
m
m[,1]
m[2,]
colnames(m)<-c('A','B')
m
mm<-cbind(m,c(9,10,11,12))
mm
n<-matrix(c(1,5,2,6,3,7,4,8),nrow=2)
n
n[1,]
n[,4]
nn<-rbind(n,c(9,10,11,12))
nn
colnames(n)<-c('A','B','C','D')
oh<-matrix(c('J','A','V','A','C','A','F','E'),nrow=2)
oh
| /r/2_12제출/3_4/matrix/matrix/practice.R | no_license | mgh3326/big_data_web | R | false | false | 600 | r | #Q1-1
seasons<-matrix(c("봄","여름","가을","겨울"),nrow=2)
seasons
#Q1-2
seasons<-matrix(c("봄","여름","가을","겨울"),nrow=2,byrow=T)
seasons
#Q2
seasons[,2]
#Q3
seasons_2<-rbind(seasons,c("초봄","초가을"))
seasons_2
#Q4
seasons_3<-cbind(seasons_2,c("초여름","초겨울","한겨울"))
seasons_3
#Q1
m<-matrix(c(1,5,2,6,3,7,4,8),nrow=4)
m
m[,1]
m[2,]
colnames(m)<-c('A','B')
m
mm<-cbind(m,c(9,10,11,12))
mm
n<-matrix(c(1,5,2,6,3,7,4,8),nrow=2)
n
n[1,]
n[,4]
nn<-rbind(n,c(9,10,11,12))
nn
colnames(n)<-c('A','B','C','D')
oh<-matrix(c('J','A','V','A','C','A','F','E'),nrow=2)
oh
|
library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.9,family="gaussian",standardize=TRUE)
sink('./Model/EN/ReliefF/soft_tissue/soft_tissue_091.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/soft_tissue/soft_tissue_091.R | no_license | leon1003/QSMART | R | false | false | 366 | r | library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.9,family="gaussian",standardize=TRUE)
sink('./Model/EN/ReliefF/soft_tissue/soft_tissue_091.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#rasterize ground truth in rasterize_shapes.r
#chop labels and files up in prepare_data_for_segmentation.r
#make train and test with train_test_split.r
#train network in keras.r , network defined in Unet.r
#evaluate in evaluate.r
#use visualise.r to make color images from prediction and labels feathers | /app.r | no_license | daanvandermaas/schelde | R | false | false | 308 | r | #rasterize ground truth in rasterize_shapes.r
#chop labels and files up in prepare_data_for_segmentation.r
#make train and test with train_test_split.r
#train network in keras.r , network defined in Unet.r
#evaluate in evaluate.r
#use visualise.r to make color images from prediction and labels feathers |
#Perform clustering with 2nd data set IRIS
data(iris)
str(iris)
# Installing Packages
install.packages("ClusterR")
install.packages("cluster")
# Loading package
library(ClusterR)
library(cluster)
# Removing initial label of
# Species from original dataset
iris_1 <- iris[, -5]
# Fitting K-Means clustering Model
# to training dataset
set.seed(240) # Setting seed
kmeans.re <- kmeans(iris_1, centers = 3, nstart = 20)
kmeans.re
# Cluster identification for # each observation
kmeans.re$cluster
# Confusion Matrix
cm <- table(iris$Species, kmeans.re$cluster)
cm
# Model Evaluation and visualization
plot(iris_1[c("Sepal.Length", "Sepal.Width")])
plot(iris_1[c("Sepal.Length", "Sepal.Width")],
col = kmeans.re$cluster)
plot(iris_1[c("Sepal.Length", "Sepal.Width")],
col = kmeans.re$cluster,
main = "K-means with 3 clusters")
## Plotiing cluster centers
kmeans.re$centers
kmeans.re$centers[, c("Sepal.Length", "Sepal.Width")]
# cex is font size, pch is symbol
points(kmeans.re$centers[, c("Sepal.Length", "Sepal.Width")],
col = 1:3, pch = 8, cex = 3)
## Visualizing clusters
y_kmeans <- kmeans.re$cluster
clusplot(iris_1[, c("Sepal.Length", "Sepal.Width")],
y_kmeans,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste("Cluster iris"),
xlab = 'Sepal.Length',
ylab = 'Sepal.Width')
##Manhattan
set.seed(240) # Setting seed
kmeans.re <- kmeans(iris_1, centers = 3, nstart = 20,method="manhattan")
kmeans.re
# Cluster identification for
# each observation
kmeans.re$cluster
# Confusion Matrix
cm <- table(iris$Species, kmeans.re$cluster)
cm
# Model Evaluation and visualization
plot(iris_1[c("Sepal.Length", "Sepal.Width")])
plot(iris_1[c("Sepal.Length", "Sepal.Width")],
col = kmeans.re$cluster)
plot(iris_1[c("Sepal.Length", "Sepal.Width")],
col = kmeans.re$cluster,
main = "K-means with 3 clusters")
## Plotiing cluster centers
kmeans.re$centers
kmeans.re$centers[, c("Sepal.Length", "Sepal.Width")]
# cex is font size, pch is symbol
points(kmeans.re$centers[, c("Sepal.Length", "Sepal.Width")],
col = 1:3, pch = 8, cex = 3)
## Visualizing clusters
y_kmeans <- kmeans.re$cluster
clusplot(iris_1[, c("Sepal.Length", "Sepal.Width")],
y_kmeans,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste("Cluster iris"),
xlab = 'Sepal.Length',
ylab = 'Sepal.Width') | /Lab6/Assignment6_12.R | no_license | Divya-varma/Data_Analytics_Lab | R | false | false | 2,692 | r | #Perform clustering with 2nd data set IRIS
data(iris)
str(iris)
# Installing Packages
install.packages("ClusterR")
install.packages("cluster")
# Loading package
library(ClusterR)
library(cluster)
# Removing initial label of
# Species from original dataset
iris_1 <- iris[, -5]
# Fitting K-Means clustering Model
# to training dataset
set.seed(240) # Setting seed
kmeans.re <- kmeans(iris_1, centers = 3, nstart = 20)
kmeans.re
# Cluster identification for # each observation
kmeans.re$cluster
# Confusion Matrix
cm <- table(iris$Species, kmeans.re$cluster)
cm
# Model Evaluation and visualization
plot(iris_1[c("Sepal.Length", "Sepal.Width")])
plot(iris_1[c("Sepal.Length", "Sepal.Width")],
col = kmeans.re$cluster)
plot(iris_1[c("Sepal.Length", "Sepal.Width")],
col = kmeans.re$cluster,
main = "K-means with 3 clusters")
## Plotiing cluster centers
kmeans.re$centers
kmeans.re$centers[, c("Sepal.Length", "Sepal.Width")]
# cex is font size, pch is symbol
points(kmeans.re$centers[, c("Sepal.Length", "Sepal.Width")],
col = 1:3, pch = 8, cex = 3)
## Visualizing clusters
y_kmeans <- kmeans.re$cluster
clusplot(iris_1[, c("Sepal.Length", "Sepal.Width")],
y_kmeans,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste("Cluster iris"),
xlab = 'Sepal.Length',
ylab = 'Sepal.Width')
##Manhattan
set.seed(240) # Setting seed
kmeans.re <- kmeans(iris_1, centers = 3, nstart = 20,method="manhattan")
kmeans.re
# Cluster identification for
# each observation
kmeans.re$cluster
# Confusion Matrix
cm <- table(iris$Species, kmeans.re$cluster)
cm
# Model Evaluation and visualization
plot(iris_1[c("Sepal.Length", "Sepal.Width")])
plot(iris_1[c("Sepal.Length", "Sepal.Width")],
col = kmeans.re$cluster)
plot(iris_1[c("Sepal.Length", "Sepal.Width")],
col = kmeans.re$cluster,
main = "K-means with 3 clusters")
## Plotiing cluster centers
kmeans.re$centers
kmeans.re$centers[, c("Sepal.Length", "Sepal.Width")]
# cex is font size, pch is symbol
points(kmeans.re$centers[, c("Sepal.Length", "Sepal.Width")],
col = 1:3, pch = 8, cex = 3)
## Visualizing clusters
y_kmeans <- kmeans.re$cluster
clusplot(iris_1[, c("Sepal.Length", "Sepal.Width")],
y_kmeans,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste("Cluster iris"),
xlab = 'Sepal.Length',
ylab = 'Sepal.Width') |
# qbias.R
isotope_library <-
tibble::tribble(
~metabolite, ~formula, ~polarity,
"2HG", "C5H8O5", "negative",
"2OG", "C5H6O5", "negative",
"alanine", "C3H7NO2", "negative",
"aspartate", "C4H7NO4", "negative",
"citrate", "C6H8O7", "negative",
"glutamate", "C5H9NO4", "negative",
"glutamine", "C5H10N2O3", "negative",
"lactate", "C3H6O3", "negative",
"malate", "C4H6O5", "negative",
"pyruvate", "C3H4O3", "negative",
"serine", "C3H7NO3", "negative",
"succinate", "C4H6O4", "negative",
"3PG", "C3H7O7P", "negative",
"aconitate", "C6H6O6", "negative",
"FBP", "C6H14O12P2", "negative",
"G3P", "C3H9O6P", "negative",
"palmitate", "C16H32O2", "negative",
"PEP", "C3H5O6P", "negative",
"sedoheptulose", "C7H14O7", "negative",
"DHAP", "C3H7O6P", "negative",
"GAP", "C3H7O6P", "negative",
"G1P", "C6H13O9P", "negative",
"G6P", "C6H13O9P", "negative",
"R5P", "C5H11O8P", "negative"
)
calculate_ratios <- function(path) {
readr::read_csv(
path,
show_col_types = FALSE
) |>
dplyr::filter(!is.na(.data$area)) |>
tidyr::separate(
.data$filename,
into = c(NA, "window", "replicate"),
sep = c(1, 2),
convert = TRUE
) |>
tidyr::separate(
.data$ion,
into = c("metabolite", "isotope"),
sep = " ",
convert = TRUE
) |>
dplyr::mutate(carbons = dplyr::case_when(
.data$metabolite %in% c("citrate") ~ 6,
.data$metabolite %in% c("2HG", "2OG", "glutamate", "glutamine") ~ 5,
.data$metabolite %in% c("aspartate", "malate", "succinate") ~ 4,
.data$metabolite %in% c("lactate", "pyruvate", "alanine", "serine") ~ 3
)) |>
dplyr::filter(.data$window <= .data$carbons) |>
tidyr::pivot_wider(names_from = "isotope", values_from = "area") |>
dplyr::mutate(ratio = .data$M1 / .data$M0) |>
dplyr::group_by(.data$metabolite)
}
read_qbias <- function(file_list) {
file_list |>
{\(x) rlang::set_names(
x,
stringr::str_extract(basename(x), pattern = "(?<=_)\\w(?=\\.csv)")
)}() |>
purrr::map_dfr(calculate_ratios, .id = "batch") |>
dplyr::group_by(.data$batch, .data$metabolite) |>
dplyr::arrange(.data$metabolite)
}
predicted_ratios <-
isotope_library |>
dplyr::mutate(table = purrr::map2(
.data$formula,
.data$polarity,
\(x, y) mzrtools::mz_iso_quant(molecule = x, polarity = y)[["prob_matrix"]]
)) |>
dplyr::mutate(pred_ratio = purrr::map_dbl(table, \(x) x[[2, 1]] / x[[1, 1]])) |>
dplyr::select("metabolite", "pred_ratio")
calculate_correction_factors <- function(qbias_ratios, pred_ratios) {
qbias_ratios |>
tidyr::nest() |>
dplyr::mutate(
model = purrr::map(.data$data, \(x) MASS::rlm(ratio ~ stats::poly(window, 3), data = x, maxit = 1000)),
predict = purrr::map2(.data$model, .data$data, stats::predict)
) |>
tidyr::unnest(c("data", "predict")) |>
dplyr::select("batch", "metabolite", "window", "carbons", "predict") |>
dplyr::distinct() |>
dplyr::left_join(pred_ratios, by = "metabolite") |>
dplyr::mutate(cf = .data$predict / .data$pred_ratio) |>
dplyr::select("metabolite", M = "window", "carbons", "cf") |>
dplyr::filter(.data$M < .data$carbons) |>
dplyr::ungroup() |>
dplyr::mutate(M = .data$M + 1) |>
tidyr::pivot_wider(names_from = "M", values_from = "cf") |>
dplyr:: mutate(
M0 = 1,
M1 = 1 / .data$`1` * .data$M0,
M2 = 1 / .data$`2` * .data$M1,
M3 = 1 / .data$`3` * .data$M2,
M4 = 1 / .data$`4` * .data$M3,
M5 = 1 / .data$`5` * .data$M4,
M6 = 1 / .data$`6` * .data$M5
) |>
dplyr::select("batch", "metabolite", tidyselect::matches("M[0-9]+")) |>
tidyr::pivot_longer(
cols = tidyselect::matches("M[0-9]+"),
names_to = "M",
values_to = "cf",
values_drop_na = TRUE
) |>
dplyr::arrange("batch", "metabolite")
}
| /R/qbias.R | permissive | oldhamlab/Copeland.2023.hypoxia.flux | R | false | false | 3,929 | r | # qbias.R
isotope_library <-
tibble::tribble(
~metabolite, ~formula, ~polarity,
"2HG", "C5H8O5", "negative",
"2OG", "C5H6O5", "negative",
"alanine", "C3H7NO2", "negative",
"aspartate", "C4H7NO4", "negative",
"citrate", "C6H8O7", "negative",
"glutamate", "C5H9NO4", "negative",
"glutamine", "C5H10N2O3", "negative",
"lactate", "C3H6O3", "negative",
"malate", "C4H6O5", "negative",
"pyruvate", "C3H4O3", "negative",
"serine", "C3H7NO3", "negative",
"succinate", "C4H6O4", "negative",
"3PG", "C3H7O7P", "negative",
"aconitate", "C6H6O6", "negative",
"FBP", "C6H14O12P2", "negative",
"G3P", "C3H9O6P", "negative",
"palmitate", "C16H32O2", "negative",
"PEP", "C3H5O6P", "negative",
"sedoheptulose", "C7H14O7", "negative",
"DHAP", "C3H7O6P", "negative",
"GAP", "C3H7O6P", "negative",
"G1P", "C6H13O9P", "negative",
"G6P", "C6H13O9P", "negative",
"R5P", "C5H11O8P", "negative"
)
calculate_ratios <- function(path) {
readr::read_csv(
path,
show_col_types = FALSE
) |>
dplyr::filter(!is.na(.data$area)) |>
tidyr::separate(
.data$filename,
into = c(NA, "window", "replicate"),
sep = c(1, 2),
convert = TRUE
) |>
tidyr::separate(
.data$ion,
into = c("metabolite", "isotope"),
sep = " ",
convert = TRUE
) |>
dplyr::mutate(carbons = dplyr::case_when(
.data$metabolite %in% c("citrate") ~ 6,
.data$metabolite %in% c("2HG", "2OG", "glutamate", "glutamine") ~ 5,
.data$metabolite %in% c("aspartate", "malate", "succinate") ~ 4,
.data$metabolite %in% c("lactate", "pyruvate", "alanine", "serine") ~ 3
)) |>
dplyr::filter(.data$window <= .data$carbons) |>
tidyr::pivot_wider(names_from = "isotope", values_from = "area") |>
dplyr::mutate(ratio = .data$M1 / .data$M0) |>
dplyr::group_by(.data$metabolite)
}
read_qbias <- function(file_list) {
file_list |>
{\(x) rlang::set_names(
x,
stringr::str_extract(basename(x), pattern = "(?<=_)\\w(?=\\.csv)")
)}() |>
purrr::map_dfr(calculate_ratios, .id = "batch") |>
dplyr::group_by(.data$batch, .data$metabolite) |>
dplyr::arrange(.data$metabolite)
}
predicted_ratios <-
isotope_library |>
dplyr::mutate(table = purrr::map2(
.data$formula,
.data$polarity,
\(x, y) mzrtools::mz_iso_quant(molecule = x, polarity = y)[["prob_matrix"]]
)) |>
dplyr::mutate(pred_ratio = purrr::map_dbl(table, \(x) x[[2, 1]] / x[[1, 1]])) |>
dplyr::select("metabolite", "pred_ratio")
calculate_correction_factors <- function(qbias_ratios, pred_ratios) {
qbias_ratios |>
tidyr::nest() |>
dplyr::mutate(
model = purrr::map(.data$data, \(x) MASS::rlm(ratio ~ stats::poly(window, 3), data = x, maxit = 1000)),
predict = purrr::map2(.data$model, .data$data, stats::predict)
) |>
tidyr::unnest(c("data", "predict")) |>
dplyr::select("batch", "metabolite", "window", "carbons", "predict") |>
dplyr::distinct() |>
dplyr::left_join(pred_ratios, by = "metabolite") |>
dplyr::mutate(cf = .data$predict / .data$pred_ratio) |>
dplyr::select("metabolite", M = "window", "carbons", "cf") |>
dplyr::filter(.data$M < .data$carbons) |>
dplyr::ungroup() |>
dplyr::mutate(M = .data$M + 1) |>
tidyr::pivot_wider(names_from = "M", values_from = "cf") |>
dplyr:: mutate(
M0 = 1,
M1 = 1 / .data$`1` * .data$M0,
M2 = 1 / .data$`2` * .data$M1,
M3 = 1 / .data$`3` * .data$M2,
M4 = 1 / .data$`4` * .data$M3,
M5 = 1 / .data$`5` * .data$M4,
M6 = 1 / .data$`6` * .data$M5
) |>
dplyr::select("batch", "metabolite", tidyselect::matches("M[0-9]+")) |>
tidyr::pivot_longer(
cols = tidyselect::matches("M[0-9]+"),
names_to = "M",
values_to = "cf",
values_drop_na = TRUE
) |>
dplyr::arrange("batch", "metabolite")
}
|
library(ape)
testtree <- read.tree("7417_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7417_0_unrooted.txt") | /codeml_files/newick_trees_processed/7417_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("7417_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7417_0_unrooted.txt") |
## This function is aided to speed up a (hypothetical) process that involves using several times the inverse of a matrix
## Calculating the inverse of a matrix is, in general, a constly computation
## This function allows to "cache" the inverse of a matriz creating a sort of matrix
makeCacheMatrix <- function(x = numeric()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The following function computes the inverse of the object (this sort ofmatrix) returned by the funtcion makeCacheMatrix created above.
## If the matrix has not changed and its inverse has already been calculated, then the cachesolve detects this fact and shaves
## computer resources by retrieving the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
| /cachematrix.R | no_license | marcosmarva/ProgrammingAssignment2 | R | false | false | 1,239 | r | ## This function is aided to speed up a (hypothetical) process that involves using several times the inverse of a matrix
## Calculating the inverse of a matrix is, in general, a constly computation
## This function allows to "cache" the inverse of a matriz creating a sort of matrix
makeCacheMatrix <- function(x = numeric()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The following function computes the inverse of the object (this sort ofmatrix) returned by the funtcion makeCacheMatrix created above.
## If the matrix has not changed and its inverse has already been calculated, then the cachesolve detects this fact and shaves
## computer resources by retrieving the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
# CONFIG
PARAM_needed_libraries=c("XML","dplyr","ggplot2","plotly","shiny","shinydashboard","reshape2","data.table")
PARAM_data_filename="oridiploxml.xml"
# BUILD PARAMETERS
PARAM_fun_folder=paste0(PARAM_main_folder,"fun/")
PARAM_data_folder=paste0(PARAM_main_folder,"data/")
PARAM_pathfile=paste0(PARAM_data_folder,PARAM_data_filename)
PARAM_fun_files = list.files(path =PARAM_fun_folder ,all.files =FALSE ,full.names = TRUE)
| /config.R | no_license | lucnakache/ambassadeur | R | false | false | 429 | r | # CONFIG
PARAM_needed_libraries=c("XML","dplyr","ggplot2","plotly","shiny","shinydashboard","reshape2","data.table")
PARAM_data_filename="oridiploxml.xml"
# BUILD PARAMETERS
PARAM_fun_folder=paste0(PARAM_main_folder,"fun/")
PARAM_data_folder=paste0(PARAM_main_folder,"data/")
PARAM_pathfile=paste0(PARAM_data_folder,PARAM_data_filename)
PARAM_fun_files = list.files(path =PARAM_fun_folder ,all.files =FALSE ,full.names = TRUE)
|
#Section 5: Function for the plot of the graphical layout of resultant optimal designs (graphoptrcd.mae)
graphoptrcd.mae<-function(trt.N,col.N,theta,OptdesF,Optcrit,cbVal2) {
#cbValue2.I<-tclVar("0")
#cbVal2=as.numeric(tclvalue(cbValue2.I))
trtblkthetano<-paste("(",paste(trt.N, col.N, theta,sep=", "),")",sep="")
if(cbVal2==0) {
Alg="trtE";optcN="treatment"} else if(cbVal2==1) {
Alg="arrayE";optcN="array"} else {stop("The algorithm is not specified")
} #End if (identify the treatment and array exchange algorithm using checkbox value)
NOptcrtr<-paste(Optcrit,"-optimal",sep="")#name of optimality criteria
NOptcrtrG<-paste("Graph_layout_",Optcrit,"optrcd_",Alg,sep="")#name of folder where the graphical layout will be saved
NOptcrtrG2<-paste("_Gout",Optcrit,"optrcd",Alg,".pdf",sep="")
NgoutT=paste(NOptcrtr, "row-column", "design", "for", paste("(v, b, theta) =",trtblkthetano,sep=" "))
NgoutST=paste("using",optcN,"exchange","algorithim",sep=" ")
graph.des <- make_graph(as.numeric(as.factor(OptdesF)), directed = TRUE)
graph.desid <- tkplot(graph.des, canvas.width=515, canvas.height=500,layout=layout.kamada.kawai,vertex.color="cyan",edge.color="black")
canvas <- tk_canvas(graph.desid)
padding <- 100
coords <- norm_coords(layout=layout.kamada.kawai(graph.des), 0+padding, 450-padding,
50+padding, 500-padding)
tk_set_coords(graph.desid, coords)
width <- as.numeric(tkcget(canvas, "-width"))
height <- as.numeric(tkcget(canvas, "-height"))
tkcreate(canvas, "text", width/2, 25, text=NgoutT,
justify="center", font=tcltk::tkfont.create(family="helvetica",size=13,weight="bold"))
tkcreate(canvas, "text", width/2, 45, text=NgoutST,
justify="center", font=tcltk::tkfont.create(family="helvetica",size=13,weight="bold"))
graph.OutlayoptBlk<-paste(getwd(), NOptcrtrG,sep="/")
if(!file.exists(graph.OutlayoptBlk)) dir.create(graph.OutlayoptBlk)
obtdes.goutloptBlk<-paste(graph.OutlayoptBlk,paste(trtblkthetano,NOptcrtrG2,sep=""),sep="/")
pdf(file=obtdes.goutloptBlk)
plot(graph.des,edge.arrow.size=1, vertex.size=15, margin=0.5,
layout=layout.kamada.kawai,vertex.color="cyan",edge.color="black")
title(paste("Graphical layout of ", Optcrit,"-optimal or near-optimal row-column design",sep=""),
sub = NULL,cex.main = 1, font.main= 1, col.main= "black")
mtext(paste(NgoutST," for:",sep=""), line = 0.5, col = "black", font = 1)
mtext(paste("(v, b, theta) =", " (",paste(trt.N, col.N, theta,sep=", "),")",sep=""), line = -0.50, col = "blue", font = 1)
dev.off()
file_loc<-obtdes.goutloptBlk
file_loc2<-paste("Graphical layout of obtained", NOptcrtr, "or near-optimal row-column design is also saved in .pdf at:",sep=" ")
cat(file_loc2,"\n",file_loc,"\n\n")
}#End Section 5 (plot of the graphical layout of resultant optimal design, graphoptrcd.mae)
| /R/graphoptrcd.mae.R | no_license | cran/optrcdmaeAT | R | false | false | 2,924 | r | #Section 5: Function for the plot of the graphical layout of resultant optimal designs (graphoptrcd.mae)
graphoptrcd.mae<-function(trt.N,col.N,theta,OptdesF,Optcrit,cbVal2) {
#cbValue2.I<-tclVar("0")
#cbVal2=as.numeric(tclvalue(cbValue2.I))
trtblkthetano<-paste("(",paste(trt.N, col.N, theta,sep=", "),")",sep="")
if(cbVal2==0) {
Alg="trtE";optcN="treatment"} else if(cbVal2==1) {
Alg="arrayE";optcN="array"} else {stop("The algorithm is not specified")
} #End if (identify the treatment and array exchange algorithm using checkbox value)
NOptcrtr<-paste(Optcrit,"-optimal",sep="")#name of optimality criteria
NOptcrtrG<-paste("Graph_layout_",Optcrit,"optrcd_",Alg,sep="")#name of folder where the graphical layout will be saved
NOptcrtrG2<-paste("_Gout",Optcrit,"optrcd",Alg,".pdf",sep="")
NgoutT=paste(NOptcrtr, "row-column", "design", "for", paste("(v, b, theta) =",trtblkthetano,sep=" "))
NgoutST=paste("using",optcN,"exchange","algorithim",sep=" ")
graph.des <- make_graph(as.numeric(as.factor(OptdesF)), directed = TRUE)
graph.desid <- tkplot(graph.des, canvas.width=515, canvas.height=500,layout=layout.kamada.kawai,vertex.color="cyan",edge.color="black")
canvas <- tk_canvas(graph.desid)
padding <- 100
coords <- norm_coords(layout=layout.kamada.kawai(graph.des), 0+padding, 450-padding,
50+padding, 500-padding)
tk_set_coords(graph.desid, coords)
width <- as.numeric(tkcget(canvas, "-width"))
height <- as.numeric(tkcget(canvas, "-height"))
tkcreate(canvas, "text", width/2, 25, text=NgoutT,
justify="center", font=tcltk::tkfont.create(family="helvetica",size=13,weight="bold"))
tkcreate(canvas, "text", width/2, 45, text=NgoutST,
justify="center", font=tcltk::tkfont.create(family="helvetica",size=13,weight="bold"))
graph.OutlayoptBlk<-paste(getwd(), NOptcrtrG,sep="/")
if(!file.exists(graph.OutlayoptBlk)) dir.create(graph.OutlayoptBlk)
obtdes.goutloptBlk<-paste(graph.OutlayoptBlk,paste(trtblkthetano,NOptcrtrG2,sep=""),sep="/")
pdf(file=obtdes.goutloptBlk)
plot(graph.des,edge.arrow.size=1, vertex.size=15, margin=0.5,
layout=layout.kamada.kawai,vertex.color="cyan",edge.color="black")
title(paste("Graphical layout of ", Optcrit,"-optimal or near-optimal row-column design",sep=""),
sub = NULL,cex.main = 1, font.main= 1, col.main= "black")
mtext(paste(NgoutST," for:",sep=""), line = 0.5, col = "black", font = 1)
mtext(paste("(v, b, theta) =", " (",paste(trt.N, col.N, theta,sep=", "),")",sep=""), line = -0.50, col = "blue", font = 1)
dev.off()
file_loc<-obtdes.goutloptBlk
file_loc2<-paste("Graphical layout of obtained", NOptcrtr, "or near-optimal row-column design is also saved in .pdf at:",sep=" ")
cat(file_loc2,"\n",file_loc,"\n\n")
}#End Section 5 (plot of the graphical layout of resultant optimal design, graphoptrcd.mae)
|
# It uses genlight objects, so you can load the raw plink files.
library(StAMPP)
library(adegenet)
macro<-read.PLINK(choose.files(),n.cores=1)
meso99<-read.PLINK(choose.files(),n.cores=1)
meso20<-read.PLINK(choose.files(),n.cores=1)
all_s<-read.PLINK(choose.files(),n.cores=1)
Mfst<-stamppFst(macro,nboots=10000)
m99fst<-stamppFst(meso99,nboots=10000)
m20fst<-stamppFst(meso20,nboots=10000)
All<-stamppFst(all_s,nboots=10000)
| /Fst.r | no_license | DavidVendrami/Vendrami_etal._Nacella_CGP | R | false | false | 443 | r | # It uses genlight objects, so you can load the raw plink files.
library(StAMPP)
library(adegenet)
macro<-read.PLINK(choose.files(),n.cores=1)
meso99<-read.PLINK(choose.files(),n.cores=1)
meso20<-read.PLINK(choose.files(),n.cores=1)
all_s<-read.PLINK(choose.files(),n.cores=1)
Mfst<-stamppFst(macro,nboots=10000)
m99fst<-stamppFst(meso99,nboots=10000)
m20fst<-stamppFst(meso20,nboots=10000)
All<-stamppFst(all_s,nboots=10000)
|
# installing packages for text mining
# 1. Install the following packages before you proceed: tm, SnowballC, ggplot2,
# wordcloud
pkgs <- c("tm", "SnowballC", "ggplot2", "wordcloud", "igraph", "topicmodels")
install.packages(pkgs)
# check installed packages
| /bs01/install-packages-tm.R | no_license | mutazag/DAM | R | false | false | 264 | r | # installing packages for text mining
# 1. Install the following packages before you proceed: tm, SnowballC, ggplot2,
# wordcloud
pkgs <- c("tm", "SnowballC", "ggplot2", "wordcloud", "igraph", "topicmodels")
install.packages(pkgs)
# check installed packages
|
setwd("~/RStats/analysis-class/data/")
df <- read.csv("ESP_sensor.csv")
df$ESP_ID <- NULL
head(df)
linreg <- lm(data=df, Motor_Vibration ~ Current_Leakage_mA+PressureROC)
summary(linreg)
df$Well <- paste0("Well_", sample(5000, 1023), sample(c("A", "AG", "N", "G")))
rare_1_in_10 <- c(rep(0,9),1)
df$rareB <- sample(rare_1_in_10,1023, replace = T)
df$Surface_Temperature <- 3.57 + 0.9 *df$Intake_Temperature_F - 1.2*(df$Motor_Oil_Temperature^0.7) +
df$rareB*df$Motor_Vibration
df$Surface_Temperature <- round(df$Surface_Temperature)
# OD Casing sizes
od <- c(4.5, 5.5, 7.0)
df$ESP_OD_Casing_inches <- sample(od, 1023, replace = T, prob = c(0.6, 0.3,0.1))
fit <- lm(data=df, Surface_Temperature ~ Motor_Oil_Temperature +
Motor_Vibration + Current_Leakage_mA+PressureROC +
Intake_Temperature_F)
summary(fit)
names(df)
write.csv(df[, c("Well", "ESP_OD_Casing_inches",
"Motor_Oil_Temperature",
"Intake_Temperature_F",
"Intake_Pressure_Mpa",
"Motor_Vibration",
"Current_Leakage_mA",
"PressureROC",
"Surface_Temperature")], file="ESP1_data.csv",
row.names = F)
df2 <- read.csv("ESP1_data.csv", stringsAsFactors = F)
df2$ESP_OD_Casing_inches <- as.factor(df2$ESP_OD_Casing_inches)
# Change only for OD = 7
rows_7in <- df2$ESP_OD_Casing_inches == 7
rows_45in <- df2$ESP_OD_Casing_inches == 4.5
rows_55in <- df2$ESP_OD_Casing_inches == 5.5
rand7 <- runif(1023, 15, max=25)
rand4 <- runif(1023, 1, max=8)
rand5 <- runif(1023, 3, max=14)
df2$TDH <- runif(1023, 4000, 5500)
df2$TDH
df2$TDH[rows_7in] <- df2$TDH[rows_7in] + rand7[rows_7in]
df2$TDH[rows_45in] <- df2$TDH[rows_45in] + rand4[rows_45in]
df2$TDH[rows_55in] <- df2$TDH[rows_55in] + rand5[rows_55in]
fit2 <- lm(data=df2, Surface_Temperature ~ Motor_Oil_Temperature +
Motor_Vibration + Current_Leakage_mA+PressureROC +
Intake_Temperature_F)
summary(fit2)
ESP_anova <- aov(TDH ~ ESP_OD_Casing_inches,
data=df2)
summary(ESP_anova)
model.tables(ESP_anova, type="means")
model.tables(ESP_anova, type="effects")
names(df2)
write.csv(df2, "Wells_ESP2.csv", row.names=F)
| /R/ESP_data.R | no_license | Ram-N/analysis-class | R | false | false | 2,216 | r |
setwd("~/RStats/analysis-class/data/")
df <- read.csv("ESP_sensor.csv")
df$ESP_ID <- NULL
head(df)
linreg <- lm(data=df, Motor_Vibration ~ Current_Leakage_mA+PressureROC)
summary(linreg)
df$Well <- paste0("Well_", sample(5000, 1023), sample(c("A", "AG", "N", "G")))
rare_1_in_10 <- c(rep(0,9),1)
df$rareB <- sample(rare_1_in_10,1023, replace = T)
df$Surface_Temperature <- 3.57 + 0.9 *df$Intake_Temperature_F - 1.2*(df$Motor_Oil_Temperature^0.7) +
df$rareB*df$Motor_Vibration
df$Surface_Temperature <- round(df$Surface_Temperature)
# OD Casing sizes
od <- c(4.5, 5.5, 7.0)
df$ESP_OD_Casing_inches <- sample(od, 1023, replace = T, prob = c(0.6, 0.3,0.1))
fit <- lm(data=df, Surface_Temperature ~ Motor_Oil_Temperature +
Motor_Vibration + Current_Leakage_mA+PressureROC +
Intake_Temperature_F)
summary(fit)
names(df)
write.csv(df[, c("Well", "ESP_OD_Casing_inches",
"Motor_Oil_Temperature",
"Intake_Temperature_F",
"Intake_Pressure_Mpa",
"Motor_Vibration",
"Current_Leakage_mA",
"PressureROC",
"Surface_Temperature")], file="ESP1_data.csv",
row.names = F)
df2 <- read.csv("ESP1_data.csv", stringsAsFactors = F)
df2$ESP_OD_Casing_inches <- as.factor(df2$ESP_OD_Casing_inches)
# Change only for OD = 7
rows_7in <- df2$ESP_OD_Casing_inches == 7
rows_45in <- df2$ESP_OD_Casing_inches == 4.5
rows_55in <- df2$ESP_OD_Casing_inches == 5.5
rand7 <- runif(1023, 15, max=25)
rand4 <- runif(1023, 1, max=8)
rand5 <- runif(1023, 3, max=14)
df2$TDH <- runif(1023, 4000, 5500)
df2$TDH
df2$TDH[rows_7in] <- df2$TDH[rows_7in] + rand7[rows_7in]
df2$TDH[rows_45in] <- df2$TDH[rows_45in] + rand4[rows_45in]
df2$TDH[rows_55in] <- df2$TDH[rows_55in] + rand5[rows_55in]
fit2 <- lm(data=df2, Surface_Temperature ~ Motor_Oil_Temperature +
Motor_Vibration + Current_Leakage_mA+PressureROC +
Intake_Temperature_F)
summary(fit2)
ESP_anova <- aov(TDH ~ ESP_OD_Casing_inches,
data=df2)
summary(ESP_anova)
model.tables(ESP_anova, type="means")
model.tables(ESP_anova, type="effects")
names(df2)
write.csv(df2, "Wells_ESP2.csv", row.names=F)
|
##Dates are represented by the 'Date' class and times are represented by the 'POSIXct' and
##'POSIXlt' classes. Internally, dates are stored as the number of days since 1970-01-01 and
##times are stored as either the number of seconds since 1970-01-01 (for 'POSIXct') or a
##list of seconds, minutes, hours, etc. (for 'POSIXlt').
## gettitng the current date and store it in the variable
d1 <- Sys.Date()
##Use the class() function to confirm d1 is a Date object.
class(d1)
##[1] "Date"
##We can use the unclass() function to see what d1 looks like internally.
unclass(d1)
##[1] 17025 That's the exact number of days since 1970-01-01!
## if you print d1 to the console, you'll get today's date -- YEAR-MONTH-DAY.
d1
##[1] "2016-08-12"
## You can access the current date and time
t1 <- Sys.time()
t1
##[1] "2016-08-12 12:47:52 EEST"
class(t1)
##[1] "POSIXct" "POSIXt"
##As mentioned earlier, POSIXct is just one of two ways that R represents time information.
##(You can ignore the second value above, POSIXt, which just functions as a common language
##between POSIXct and POSIXlt.) Use unclass() to see what t1 looks like internally -- the
##(large) number of seconds since the beginning of 1970.
unclass(t1)
##[1] 1470995273
##By default, Sys.time() returns an object of class POSIXct, but we can coerce the result to
##POSIXlt with as.POSIXlt(Sys.time()). Give it a try and store the result in t2.
t2 <- as.POSIXlt(Sys.time())
class(t2)
##[1] "POSIXlt" "POSIXt"
t2
##[1] "2016-08-12 12:49:28 EEST"
unclass(t2)
## $sec
##[1] 28.12285
##
##$min
##[1] 49
##
##$hour
##[1] 12 and so on
##t2, like all POSIXlt objects, is just a list of values that make up the date and time. Use
##str(unclass(t2)) to have a more compact view.
str(unclass(t2))
##List of 11
##$ sec : num 28.1
##$ min : int 49
##$ hour : int 12
## and so on
t2$min
##[1] 49
## Now that we have explored all three types of date and time objects, let's look at a few
## functions that extract useful information from any of these objects -- weekdays(),
## months(), and quarters().
##The weekdays() function will return the day of week from any date or time object. Try it
##out on d1, which is the Date object that contains today's date.
weekdays(d1)
##[1] "п'ятниця"
## The months() function also works on any date or time object. Try it on t1, which is the
## POSIXct object that contains the current time (well, it was the current time when you
## created it).
months(t1)
##[1] "серпень"
## The quarters() function returns the quarter of the year (Q1-Q4) from any date or time
## object. Try it on t2, which is the POSIXlt object that contains the time at which you
## created it.
quarters(t2)
##[1] "Q3"
## Often, the dates and times in a dataset will be in a format that R does not recognize. The
## strptime() function can be helpful in this situation.
##strptime() converts character vectors to POSIXlt. In that sense, it is similar to
##as.POSIXlt(), except that the input doesn't have to be in a particular format (YYYY-MM-DD).
t3 <- "October 17, 1986 08:24"
t4 <- strptime(t3, "%B %d, %Y %H:%M")
##[1] NA
t4
##[1] NA
class(t4)
##[1] "POSIXlt" "POSIXt"
Sys.time() > t1
##[1] TRUE
Sys.time() - t1
##Time difference of 7.655197 mins
##to control over the units when finding the above difference in times, you can use
##difftime(), which allows you to specify a 'units' parameter.
difftime(Sys.time(), t1, units = 'days')
##Time difference of 0.005844198 days | /2. R Programming/date_and_time.R | no_license | martafd/datasciencecoursera | R | false | false | 3,477 | r | ##Dates are represented by the 'Date' class and times are represented by the 'POSIXct' and
##'POSIXlt' classes. Internally, dates are stored as the number of days since 1970-01-01 and
##times are stored as either the number of seconds since 1970-01-01 (for 'POSIXct') or a
##list of seconds, minutes, hours, etc. (for 'POSIXlt').
## gettitng the current date and store it in the variable
d1 <- Sys.Date()
##Use the class() function to confirm d1 is a Date object.
class(d1)
##[1] "Date"
##We can use the unclass() function to see what d1 looks like internally.
unclass(d1)
##[1] 17025 That's the exact number of days since 1970-01-01!
## if you print d1 to the console, you'll get today's date -- YEAR-MONTH-DAY.
d1
##[1] "2016-08-12"
## You can access the current date and time
t1 <- Sys.time()
t1
##[1] "2016-08-12 12:47:52 EEST"
class(t1)
##[1] "POSIXct" "POSIXt"
##As mentioned earlier, POSIXct is just one of two ways that R represents time information.
##(You can ignore the second value above, POSIXt, which just functions as a common language
##between POSIXct and POSIXlt.) Use unclass() to see what t1 looks like internally -- the
##(large) number of seconds since the beginning of 1970.
unclass(t1)
##[1] 1470995273
##By default, Sys.time() returns an object of class POSIXct, but we can coerce the result to
##POSIXlt with as.POSIXlt(Sys.time()). Give it a try and store the result in t2.
t2 <- as.POSIXlt(Sys.time())
class(t2)
##[1] "POSIXlt" "POSIXt"
t2
##[1] "2016-08-12 12:49:28 EEST"
unclass(t2)
## $sec
##[1] 28.12285
##
##$min
##[1] 49
##
##$hour
##[1] 12 and so on
##t2, like all POSIXlt objects, is just a list of values that make up the date and time. Use
##str(unclass(t2)) to have a more compact view.
str(unclass(t2))
##List of 11
##$ sec : num 28.1
##$ min : int 49
##$ hour : int 12
## and so on
t2$min
##[1] 49
## Now that we have explored all three types of date and time objects, let's look at a few
## functions that extract useful information from any of these objects -- weekdays(),
## months(), and quarters().
##The weekdays() function will return the day of week from any date or time object. Try it
##out on d1, which is the Date object that contains today's date.
weekdays(d1)
##[1] "п'ятниця"
## The months() function also works on any date or time object. Try it on t1, which is the
## POSIXct object that contains the current time (well, it was the current time when you
## created it).
months(t1)
##[1] "серпень"
## The quarters() function returns the quarter of the year (Q1-Q4) from any date or time
## object. Try it on t2, which is the POSIXlt object that contains the time at which you
## created it.
quarters(t2)
##[1] "Q3"
## Often, the dates and times in a dataset will be in a format that R does not recognize. The
## strptime() function can be helpful in this situation.
##strptime() converts character vectors to POSIXlt. In that sense, it is similar to
##as.POSIXlt(), except that the input doesn't have to be in a particular format (YYYY-MM-DD).
t3 <- "October 17, 1986 08:24"
t4 <- strptime(t3, "%B %d, %Y %H:%M")
##[1] NA
t4
##[1] NA
class(t4)
##[1] "POSIXlt" "POSIXt"
Sys.time() > t1
##[1] TRUE
Sys.time() - t1
##Time difference of 7.655197 mins
##to control over the units when finding the above difference in times, you can use
##difftime(), which allows you to specify a 'units' parameter.
difftime(Sys.time(), t1, units = 'days')
##Time difference of 0.005844198 days |
#' Convert ggplot2 to plotly
#'
#' This function converts a [ggplot2::ggplot()] object to a
#' plotly object.
#'
#' @details Conversion of relative sizes depends on the size of the current
#' graphics device (if no device is open, width/height of a new (off-screen)
#' device defaults to 640/480). In other words, `height` and
#' `width` must be specified at runtime to ensure sizing is correct.
#'
#' @param p a ggplot object.
#' @param width Width of the plot in pixels (optional, defaults to automatic sizing).
#' @param height Height of the plot in pixels (optional, defaults to automatic sizing).
#' @param tooltip a character vector specifying which aesthetic mappings to show
#' in the tooltip. The default, "all", means show all the aesthetic mappings
#' (including the unofficial "text" aesthetic). The order of variables here will
#' also control the order they appear. For example, use
#' `tooltip = c("y", "x", "colour")` if you want y first, x second, and
#' colour last.
#' @param dynamicTicks should plotly.js dynamically generate axis tick labels?
#' Dynamic ticks are useful for updating ticks in response to zoom/pan
#' interactions; however, they can not always reproduce labels as they
#' would appear in the static ggplot2 image.
#' @param layerData data from which layer should be returned?
#' @param originalData should the "original" or "scaled" data be returned?
#' @param source a character string of length 1. Match the value of this string
#' with the source argument in [event_data()] to retrieve the
#' event data corresponding to a specific plot (shiny apps can have multiple plots).
#' @param ... arguments passed onto methods.
#' @export
#' @author Carson Sievert
#' @references \url{https://plot.ly/ggplot2}
#' @seealso [plot_ly()]
#' @examples \dontrun{
#' # simple example
#' ggiris <- qplot(Petal.Width, Sepal.Length, data = iris, color = Species)
#' ggplotly(ggiris)
#'
#' data(canada.cities, package = "maps")
#' viz <- ggplot(canada.cities, aes(long, lat)) +
#' borders(regions = "canada") +
#' coord_equal() +
#' geom_point(aes(text = name, size = pop), colour = "red", alpha = 1/2)
#' ggplotly(viz, tooltip = c("text", "size"))
#'
#' # linked scatterplot brushing
#' library(crosstalk)
#' d <- SharedData$new(mtcars)
#' subplot(
#' qplot(data = d, x = mpg, y = wt),
#' qplot(data = d, x = mpg, y = vs)
#' )
#'
#' # more brushing (i.e. highlighting) examples
#' demo("crosstalk-highlight-ggplotly", package = "plotly")
#'
#' # client-side linked brushing in a scatterplot matrix
#' SharedData$new(iris) %>%
#' GGally::ggpairs(aes(colour = Species), columns = 1:4) %>%
#' ggplotly(tooltip = c("x", "y", "colour"))
#' }
#'
ggplotly <- function(p = ggplot2::last_plot(), width = NULL, height = NULL,
tooltip = "all", dynamicTicks = FALSE,
layerData = 1, originalData = TRUE, source = "A", ...) {
UseMethod("ggplotly", p)
}
#' @export
ggplotly.plotly <- function(p = ggplot2::last_plot(), width = NULL, height = NULL,
tooltip = "all", dynamicTicks = FALSE,
layerData = 1, originalData = TRUE, source = "A", ...) {
p
}
#' @export
ggplotly.ggmatrix <- function(p = ggplot2::last_plot(), width = NULL,
height = NULL, tooltip = "all", dynamicTicks = FALSE,
layerData = 1, originalData = TRUE, source = "A", ...) {
dots <- list(...)
# provide a sensible crosstalk if none is already provided (makes ggnostic() work at least)
if (!crosstalk_key() %in% names(p$data)) {
p$data[[crosstalk_key()]] <- p$data[[".rownames"]] %||% seq_len(nrow(p$data))
attr(p$data, "set") <- dots[["set"]] %||% new_id()
}
subplotList <- list()
for (i in seq_len(p$ncol)) {
columnList <- list()
for (j in seq_len(p$nrow)) {
thisPlot <- p[j, i]
if (i == 1) {
# should the first column contain axis labels?
if (p$showYAxisPlotLabels %||% TRUE) thisPlot <- thisPlot + ylab(p$yAxisLabels[j])
} else {
# y-axes are never drawn on the interior, and diagonal plots are densities,
# so it doesn't make sense to synch zoom actions on y
thisPlot <- thisPlot + ylab(NULL) +
theme(
axis.ticks.y = element_blank(),
axis.text.y = element_blank()
)
}
columnList <- c(
columnList, list(ggplotly(
thisPlot, tooltip = tooltip, dynamicTicks = dynamicTicks,
layerData = layerData, originalData = originalData, source = source,
width = width, height = height
))
)
}
# conditioned on a column in a ggmatrix, the x-axis should be on the
# same scale.
s <- subplot(columnList, nrows = p$nrow, margin = 0.01, shareX = TRUE,
titleY = TRUE, titleX = TRUE)
subplotList <- c(subplotList, list(s))
}
s <- subplot(subplotList, nrows = 1, margin = 0.01,
titleY = TRUE, titleX = TRUE) %>%
hide_legend() %>%
layout(dragmode = "select")
if (nchar(p$title %||% "") > 0) {
s <- layout(s, title = p$title)
}
for (i in seq_along(p$xAxisLabels)) {
s$x$layout[[sub("^xaxis1$", "xaxis", paste0("xaxis", i))]]$title <- p$xAxisLabels[[i]]
}
if (length(p$yAxisLabels)) {
s$x$layout$margin$l <- s$x$layout$margin$l + 50
}
config(s)
}
#' @export
ggplotly.ggplot <- function(p = ggplot2::last_plot(), width = NULL,
height = NULL, tooltip = "all", dynamicTicks = FALSE,
layerData = 1, originalData = TRUE, source = "A", ...) {
l <- gg2list(p, width = width, height = height, tooltip = tooltip,
dynamicTicks = dynamicTicks, layerData = layerData,
originalData = originalData, source = source, ...)
config(as_widget(l))
}
#' Convert a ggplot to a list.
#' @param p ggplot2 plot.
#' @param width Width of the plot in pixels (optional, defaults to automatic sizing).
#' @param height Height of the plot in pixels (optional, defaults to automatic sizing).
#' @param tooltip a character vector specifying which aesthetic tooltips to show in the
#' tooltip. The default, "all", means show all the aesthetic tooltips
#' (including the unofficial "text" aesthetic).
#' @param dynamicTicks accepts the following values: `FALSE`, `TRUE`, `"x"`, or `"y"`.
#' Dynamic ticks are useful for updating ticks in response to zoom/pan/filter
#' interactions; however, there is no guarantee they reproduce axis tick text
#' as they would appear in the static ggplot2 image.
#' @param layerData data from which layer should be returned?
#' @param originalData should the "original" or "scaled" data be returned?
#' @param source a character string of length 1. Match the value of this string
#' with the source argument in [event_data()] to retrieve the
#' event data corresponding to a specific plot (shiny apps can have multiple plots).
#' @param ... currently not used
#' @return a 'built' plotly object (list with names "data" and "layout").
#' @export
gg2list <- function(p, width = NULL, height = NULL,
tooltip = "all", dynamicTicks = FALSE,
layerData = 1, originalData = TRUE, source = "A", ...) {
# To convert relative sizes correctly, we use grid::convertHeight(),
# which may open a new *screen* device, if none is currently open.
# To avoid undesirable side effects, we may need to open a
# non-interactive device and close it on exit...
# https://github.com/att/rcloud.htmlwidgets/issues/2
# Note that we never have to open a non-interactive device
# in RStudio since it ships with one. Plus, calling dev.size()
# adds it to dev.list() & should ensure grid can query the correct device size
rStudioDevSize <- if (is_rstudio()) grDevices::dev.size("px")
if (is.null(grDevices::dev.list())) {
dev_fun <- if (system.file(package = "Cairo") != "") {
Cairo::Cairo
} else if (capabilities("png")) {
grDevices::png
} else if (capabilities("jpeg")) {
grDevices::jpeg
} else {
stop(
"No graphics device is currently open and no cairo or bitmap device is available.\n",
"A graphics device is required to convert sizes correctly. You have three options:",
" (1) Open a graphics device (with the desired size) using ggplotly()",
" (2) install.packages('Cairo')",
" (3) compile R to use a bitmap device (png or jpeg)",
call. = FALSE
)
}
dev_fun(file = tempfile(), width = width %||% 640, height = height %||% 480)
on.exit(grDevices::dev.off(), add = TRUE)
}
# check the value of dynamicTicks
dynamicValues <- c(FALSE, TRUE, "x", "y")
if (length(setdiff(dynamicTicks, dynamicValues))) {
stop(
sprintf(
"`dynamicValues` accepts the following values: '%s'",
paste(dynamicValues, collapse = "', '")
), call. = FALSE
)
}
# we currently support ggplot2 >= 2.2.1 (see DESCRIPTION)
# there are too many naming changes in 2.2.1.9000 to realistically
if (!is_dev_ggplot2()) {
message(
"We recommend that you use the dev version of ggplot2 with `ggplotly()`\n",
"Install it with: `devtools::install_github('tidyverse/ggplot2')`"
)
if (!identical(dynamicTicks, FALSE)) {
warning(
"You need the dev version of ggplot2 to use `dynamicTicks`", call. = FALSE
)
}
return(
gg2list_legacy(
p, width = width, height = height, tooltip = tooltip,
layerData = layerData, originalData = originalData, source = source, ...
)
)
}
# ------------------------------------------------------------------------
# Our internal version of ggplot2::ggplot_build(). Modified from
# https://github.com/hadley/ggplot2/blob/0cd0ba/R/plot-build.r#L18-L92
# ------------------------------------------------------------------------
plot <- ggfun("plot_clone")(p)
if (length(plot$layers) == 0) {
plot <- plot + geom_blank()
}
layers <- plot$layers
layer_data <- lapply(layers, function(y) y$layer_data(plot$data))
# save crosstalk sets before this attribute gets squashed
sets <- lapply(layer_data, function(y) attr(y, "set"))
scales <- plot$scales
# Apply function to layer and matching data
by_layer <- function(f) {
out <- vector("list", length(data))
for (i in seq_along(data)) {
out[[i]] <- f(l = layers[[i]], d = data[[i]])
}
out
}
# Initialise panels, add extra data for margins & missing facetting
# variables, and add on a PANEL variable to data
layout <- ggfun("create_layout")(plot$facet, plot$coordinates)
data <- layout$setup(layer_data, plot$data, plot$plot_env)
# save the domain of the group for display in tooltips
groupDomains <- Map(function(x, y) {
tryCatch(
eval(y$mapping[["group"]] %||% plot$mapping[["group"]], x),
error = function(e) NULL
)
}, data, layers)
# for simple (StatIdentity) geoms, add crosstalk key to aes mapping
# (effectively adding it as a group)
# later on, for more complicated geoms (w/ non-trivial summary statistics),
# we construct a nested key mapping (within group)
layers <- Map(function(x, y) {
if (crosstalk_key() %in% names(y) && !"key" %in% names(x[["mapping"]]) &&
inherits(x[["stat"]], "StatIdentity")) {
x[["mapping"]] <- c(x[["mapping"]], key = as.name(crosstalk_key()))
}
x
}, layers, layer_data)
# Compute aesthetics to produce data with generalised variable names
data <- by_layer(function(l, d) l$compute_aesthetics(d, plot))
# add frame to group if it exists
data <- lapply(data, function(d) {
if (!"frame" %in% names(d)) return(d)
d$group <- with(d, paste(group, frame, sep = "-"))
d
})
# The computed aesthetic codes the groups as integers
# Here we build a map each of the integer values to the group label
group_maps <- Map(function(x, y) {
tryCatch({
x_group <- x[["group"]]
names(x_group) <- y
x_group <- x_group[!duplicated(x_group)]
x_group
}, error = function(e) NULL
)
}, data, groupDomains)
# Before mapping x/y position, save the domain (for discrete scales)
# to display in tooltip.
data <- lapply(data, function(d) {
d[["x_plotlyDomain"]] <- d[["x"]]
d[["y_plotlyDomain"]] <- d[["y"]]
d
})
# Transform all scales
data <- lapply(data, ggfun("scales_transform_df"), scales = scales)
# Map and train positions so that statistics have access to ranges
# and all positions are numeric
scale_x <- function() scales$get_scales("x")
scale_y <- function() scales$get_scales("y")
layout$train_position(data, scale_x(), scale_y())
data <- layout$map_position(data)
# build a mapping between group and key
# if there are multiple keys within a group, the key is a list-column
reComputeGroup <- function(x, layer = NULL) {
# 1-to-1 link between data & visual marks -- group == key
if (inherits(layer$geom, "GeomDotplot")) {
x <- split(x, x[["PANEL"]])
x <- lapply(x, function(d) {
d[["group"]] <- do.call("order", d[c("x", "group")])
d
})
x <- dplyr::bind_rows(x)
}
if (inherits(layer$geom, "GeomSf")) {
x <- split(x, x[["PANEL"]])
x <- lapply(x, function(d) {
d[["group"]] <- seq_len(nrow(d))
d
})
# I think this is safe?
x <- suppressWarnings(dplyr::bind_rows(x))
}
x
}
nestedKeys <- Map(function(x, y, z) {
key <- y[[crosstalk_key()]]
if (is.null(key) || inherits(z[["stat"]], "StatIdentity")) return(NULL)
x <- reComputeGroup(x, z)
tib <- tibble::as_tibble(x[c("PANEL", "group")])
tib[["key"]] <- key
nested <- tidyr::nest(tib, key, .key = key)
# reduce the dimensions of list column elements from 2 to 1
nested$key <- lapply(nested$key, function(x) x[[1]])
nested
}, data, layer_data, layers)
# for some geoms (e.g. boxplots) plotly.js needs the "pre-statistics" data
# we also now provide the option to return one of these two
prestats_data <- data
data <- by_layer(function(l, d) l$compute_statistic(d, layout))
data <- by_layer(function(l, d) l$map_statistic(d, plot))
# Make sure missing (but required) aesthetics are added
ggfun("scales_add_missing")(plot, c("x", "y"), plot$plot_env)
# Reparameterise geoms from (e.g.) y and width to ymin and ymax
data <- by_layer(function(l, d) l$compute_geom_1(d))
# compute_geom_1 can reorder the rows from `data`, making groupDomains
# invalid. We rebuild groupDomains based on the current `data` and the
# group map we built before.
groupDomains <- Map(function(x, y) {
tryCatch({
names(y)[match(x$group, y)]
}, error = function(e) NULL
)
}, data, group_maps)
# there are some geoms (e.g. geom_dotplot()) where attaching the key
# before applying the statistic can cause problems, but there is still a
# 1-to-1 corresponding between graphical marks and
# Apply position adjustments
data <- by_layer(function(l, d) l$compute_position(d, layout))
# Reset position scales, then re-train and map. This ensures that facets
# have control over the range of a plot: is it generated from what's
# displayed, or does it include the range of underlying data
layout$reset_scales()
layout$train_position(data, scale_x(), scale_y())
layout$setup_panel_params()
data <- layout$map_position(data)
# Train and map non-position scales
npscales <- scales$non_position_scales()
if (npscales$n() > 0) {
lapply(data, ggfun("scales_train_df"), scales = npscales)
# this for loop is unique to plotly -- it saves the "domain"
# of each non-positional scale for display in tooltips
for (sc in npscales$scales) {
data <- lapply(data, function(d) {
# scale may not be relevant for every layer data
if (any(names(d) %in% sc$aesthetics)) {
d[paste0(sc$aesthetics, "_plotlyDomain")] <- d[sc$aesthetics]
}
d
})
}
data <- lapply(data, ggfun("scales_map_df"), scales = npscales)
}
# Fill in defaults etc.
data <- by_layer(function(l, d) l$compute_geom_2(d))
# Let layer stat have a final say before rendering
data <- by_layer(function(l, d) l$finish_statistics(d))
# Let Layout modify data before rendering
data <- layout$finish_data(data)
# ------------------------------------------------------------------------
# end of ggplot_build()
# ------------------------------------------------------------------------
# if necessary, attach key
data <- Map(function(x, y, z) {
if (!length(y)) return(x)
x <- reComputeGroup(x, z)
# dplyr issue??? https://github.com/tidyverse/dplyr/issues/2701
attr(y$group, "n") <- NULL
suppressMessages(dplyr::left_join(x, y))
}, data, nestedKeys, layers)
# initiate plotly.js layout with some plot-wide theming stuff
theme <- ggfun("plot_theme")(plot)
elements <- names(which(sapply(theme, inherits, "element")))
for (i in elements) {
theme[[i]] <- ggplot2::calc_element(i, theme)
}
# Translate plot wide theme elements to plotly.js layout
pm <- unitConvert(theme$plot.margin, "pixels")
gglayout <- list(
margin = list(t = pm[[1]], r = pm[[2]], b = pm[[3]], l = pm[[4]]),
plot_bgcolor = toRGB(theme$panel.background$fill),
paper_bgcolor = toRGB(theme$plot.background$fill),
font = text2font(theme$text)
)
# main plot title
if (nchar(plot$labels$title %||% "") > 0) {
gglayout$title <- faced(plot$labels$title, theme$plot.title$face)
gglayout$titlefont <- text2font(theme$plot.title)
gglayout$margin$t <- gglayout$margin$t + gglayout$titlefont$size
}
# ensure there's enough space for the modebar (this is based on a height of 1em)
# https://github.com/plotly/plotly.js/blob/dd1547/src/components/modebar/index.js#L171
gglayout$margin$t <- gglayout$margin$t + 16
# important stuff like layout$panel_params is already flipped, but
# plot$scales/plot$labels/data aren't. We flip x/y trace data at the very end
# and scales in the axis loop below.
if (inherits(plot$coordinates, "CoordFlip")) {
plot$labels[c("x", "y")] <- plot$labels[c("y", "x")]
}
# important panel summary stats
nPanels <- nrow(layout$layout)
nRows <- max(layout$layout$ROW)
nCols <- max(layout$layout$COL)
# panel -> plotly.js axis/anchor info
# (assume a grid layout by default)
layout$layout$xaxis <- layout$layout$COL
layout$layout$yaxis <- layout$layout$ROW
layout$layout$xanchor <- nRows
layout$layout$yanchor <- 1
if (inherits(plot$facet, "FacetWrap")) {
if (plot$facet$params$free$x) {
layout$layout$xaxis <- layout$layout$PANEL
layout$layout$xanchor <- layout$layout$ROW
}
if (plot$facet$params$free$y) {
layout$layout$yaxis <- layout$layout$PANEL
layout$layout$yanchor <- layout$layout$COL
layout$layout$xanchor <- nPanels
}
if (plot$facet$params$free$x && plot$facet$params$free$y) {
layout$layout$xaxis <- layout$layout$PANEL
layout$layout$yaxis <- layout$layout$PANEL
layout$layout$xanchor <- layout$layout$PANEL
layout$layout$yanchor <- layout$layout$PANEL
}
}
# format the axis/anchor to a format plotly.js respects
layout$layout$xaxis <- paste0("xaxis", sub("^1$", "", layout$layout$xaxis))
layout$layout$yaxis <- paste0("yaxis", sub("^1$", "", layout$layout$yaxis))
layout$layout$xanchor <- paste0("y", sub("^1$", "", layout$layout$xanchor))
layout$layout$yanchor <- paste0("x", sub("^1$", "", layout$layout$yanchor))
# for some layers2traces computations, we need the range of each panel
layout$layout$x_min <- sapply(layout$panel_params, function(z) min(z$x.range %||% z$x_range))
layout$layout$x_max <- sapply(layout$panel_params, function(z) max(z$x.range %||% z$x_range))
layout$layout$y_min <- sapply(layout$panel_params, function(z) min(z$y.range %||% z$y_range))
layout$layout$y_max <- sapply(layout$panel_params, function(z) max(z$y.range %||% z$y_range))
# layers -> plotly.js traces
plot$tooltip <- tooltip
data <- Map(function(x, y) {
tryCatch({ x$group_plotlyDomain <- y; x }, error = function(e) x)
}, data, groupDomains)
# reattach crosstalk key-set attribute
data <- Map(function(x, y) structure(x, set = y), data, sets)
traces <- layers2traces(data, prestats_data, layout, plot)
gglayout <- layers2layout(gglayout, layers, layout$layout)
# default to just the text in hover info, mainly because of this
# https://github.com/plotly/plotly.js/issues/320
traces <- lapply(traces, function(tr) {
tr$hoverinfo <- tr$hoverinfo %||%"text"
tr
})
# show only one legend entry per legendgroup
grps <- sapply(traces, "[[", "legendgroup")
traces <- Map(function(x, y) {
if (!is.null(x[["frame"]])) return(x)
x$showlegend <- isTRUE(x$showlegend) && y
x
}, traces, !duplicated(grps))
# ------------------------------------------------------------------------
# axis/facet/margin conversion
# ------------------------------------------------------------------------
# panel margins must be computed before panel/axis loops
# (in order to use get_domains())
panelMarginX <- unitConvert(
theme[["panel.spacing.x"]] %||% theme[["panel.spacing"]],
"npc", "width"
)
panelMarginY <- unitConvert(
theme[["panel.spacing.y"]] %||% theme[["panel.spacing"]],
"npc", "height"
)
# space for _interior_ facet strips
if (inherits(plot$facet, "FacetWrap")) {
stripSize <- unitConvert(
theme[["strip.text.x"]] %||% theme[["strip.text"]],
"npc", "height"
)
panelMarginY <- panelMarginY + stripSize
# space for ticks/text in free scales
if (plot$facet$params$free$x) {
axisTicksX <- unitConvert(
theme[["axis.ticks.x"]] %||% theme[["axis.ticks"]],
"npc", "height"
)
# allocate enough space for the _longest_ text label
axisTextX <- theme[["axis.text.x"]] %||% theme[["axis.text"]]
labz <- unlist(lapply(layout$panel_params, "[[", "x.labels"))
lab <- labz[which.max(nchar(labz))]
panelMarginY <- panelMarginY + axisTicksX +
bbox(lab, axisTextX$angle, unitConvert(axisTextX, "npc", "height"))[["height"]]
}
if (plot$facet$params$free$y) {
axisTicksY <- unitConvert(
theme[["axis.ticks.y"]] %||% theme[["axis.ticks"]],
"npc", "width"
)
# allocate enough space for the _longest_ text label
axisTextY <- theme[["axis.text.y"]] %||% theme[["axis.text"]]
labz <- unlist(lapply(layout$panel_params, "[[", "y.labels"))
lab <- labz[which.max(nchar(labz))]
panelMarginX <- panelMarginX + axisTicksY +
bbox(lab, axisTextY$angle, unitConvert(axisTextY, "npc", "width"))[["width"]]
}
}
margins <- c(
rep(panelMarginX, 2),
rep(panelMarginY, 2)
)
doms <- get_domains(nPanels, nRows, margins)
for (i in seq_len(nPanels)) {
lay <- layout$layout[i, ]
for (xy in c("x", "y")) {
# find axis specific theme elements that inherit from their parent
theme_el <- function(el) {
theme[[paste0(el, ".", xy)]] %||% theme[[el]]
}
axisTicks <- theme_el("axis.ticks")
axisText <- theme_el("axis.text")
axisTitle <- theme_el("axis.title")
axisLine <- theme_el("axis.line")
panelGrid <- theme_el("panel.grid.major")
stripText <- theme_el("strip.text")
axisName <- lay[, paste0(xy, "axis")]
anchor <- lay[, paste0(xy, "anchor")]
rng <- layout$panel_params[[i]]
# panel_params is quite different for "CoordSf"
if ("CoordSf" %in% class(p$coordinates)) {
# see CoordSf$render_axis_v
direction <- if (xy == "x") "E" else "N"
idx <- rng$graticule$type == direction & !is.na(rng$graticule$degree_label)
tickData <- rng$graticule[idx, ]
# TODO: how to convert a language object to unicode character string?
rng[[paste0(xy, ".labels")]] <- as.character(tickData[["degree_label"]])
rng[[paste0(xy, ".major")]] <- tickData[[paste0(xy, "_start")]]
# If it doesn't already exist (for this panel),
# generate graticule (as done in, CoordSf$render_bg)
isGrill <- vapply(traces, function(tr) {
identical(tr$xaxis, lay$xaxis) &&
identical(tr$yaxis, lay$yaxis) &&
isTRUE(tr$`_isGraticule`)
}, logical(1))
if (sum(isGrill) == 0) {
# TODO: reduce the number of points (via coord_munch?)
d <- expand(rng$graticule)
d$x <- scales::rescale(d$x, rng$x_range, from = c(0, 1))
d$y <- scales::rescale(d$y, rng$y_range, from = c(0, 1))
params <- list(
colour = theme$panel.grid.major$colour,
size = theme$panel.grid.major$size,
linetype = theme$panel.grid.major$linetype
)
grill <- geom2trace.GeomPath(d, params)
grill$hoverinfo <- "none"
grill$showlegend <- FALSE
grill$`_isGraticule` <- TRUE
grill$xaxis <- lay$xaxis
grill$yaxis <- lay$yaxis
traces <- c(list(grill), traces)
}
# if labels are empty, don't show axis ticks
tickExists <- with(rng$graticule, sapply(degree_label, is.language))
if (sum(tickExists) == 0) {
theme$axis.ticks.length <- 0
} else{
# convert the special *degree expression in plotmath to HTML entity
# TODO: can this be done more generally for all ?
rng[[paste0(xy, ".labels")]] <- sub(
"\\*\\s+degree[ ]?[\\*]?", "°", rng[[paste0(xy, ".labels")]]
)
}
}
# stuff like layout$panel_params is already flipped, but scales aren't
sc <- if (inherits(plot$coordinates, "CoordFlip")) {
scales$get_scales(setdiff(c("x", "y"), xy))
} else {
scales$get_scales(xy)
}
# type of unit conversion
type <- if (xy == "x") "height" else "width"
# get axis title
axisTitleText <- sc$name %||% plot$labels[[xy]] %||% ""
if (is_blank(axisTitle)) axisTitleText <- ""
# is this axis dynamic?
isDynamic <- isTRUE(dynamicTicks) || identical(dynamicTicks, xy)
if (isDynamic && !p$coordinates$is_linear()) {
warning(
"`dynamicTicks` is only supported for linear (i.e., cartesian) coordinates",
call. = FALSE
)
}
# determine axis types (note: scale_name may go away someday)
# https://github.com/hadley/ggplot2/issues/1312
isDate <- isTRUE(sc$scale_name %in% c("date", "datetime"))
isDateType <- isDynamic && isDate
isDiscrete <- identical(sc$scale_name, "position_d")
isDiscreteType <- isDynamic && isDiscrete
axisObj <- list(
# TODO: log type?
type = if (isDateType) "date" else if (isDiscreteType) "category" else "linear",
autorange = isDynamic,
range = rng[[paste0(xy, ".range")]] %||% rng[[paste0(xy, "_range")]],
tickmode = if (isDynamic) "auto" else "array",
ticktext = rng[[paste0(xy, ".labels")]],
tickvals = rng[[paste0(xy, ".major")]],
categoryorder = "array",
categoryarray = rng[[paste0(xy, ".labels")]],
nticks = nrow(rng),
ticks = if (is_blank(axisTicks)) "" else "outside",
tickcolor = toRGB(axisTicks$colour),
ticklen = unitConvert(theme$axis.ticks.length, "pixels", type),
tickwidth = unitConvert(axisTicks, "pixels", type),
showticklabels = !is_blank(axisText),
tickfont = text2font(axisText, type),
tickangle = - (axisText$angle %||% 0),
showline = !is_blank(axisLine),
linecolor = toRGB(axisLine$colour),
linewidth = unitConvert(axisLine, "pixels", type),
# TODO: always `showgrid=FALSE` and implement our own using traces
showgrid = !is_blank(panelGrid) && !"CoordSf" %in% class(p$coordinates),
domain = sort(as.numeric(doms[i, paste0(xy, c("start", "end"))])),
gridcolor = toRGB(panelGrid$colour),
gridwidth = unitConvert(panelGrid, "pixels", type),
zeroline = FALSE,
anchor = anchor,
title = faced(axisTitleText, axisTitle$face),
titlefont = text2font(axisTitle)
)
# set scaleanchor/scaleratio if these are fixed coordinates
fixed_coords <- c("CoordSf", "CoordFixed", "CoordMap", "CoordQuickmap")
if (inherits(p$coordinates, fixed_coords)) {
axisObj$scaleanchor <- anchor
ratio <- p$coordinates$ratio %||% 1
# a la CoordSf$aspect
if (isTRUE(sf::st_is_longlat(rng$crs))) {
ratio <- cos(mean(rng$y_range) * pi/180)
}
axisObj$scaleratio <- if (xy == "y") ratio else 1 / ratio
}
# TODO: should we implement aspect ratios?
if (!is.null(theme$aspect.ratio)) {
warning(
"Aspect ratios aren't yet implemented, but you can manually set",
" a suitable height/width", call. = FALSE
)
}
# tickvals are currently on 0-1 scale, but we want them on data scale
axisObj$tickvals <- scales::rescale(
axisObj$tickvals, to = axisObj$range, from = c(0, 1)
)
# inverse transform date data based on tickvals/ticktext
invert_date <- function(x, scale) {
if (inherits(scale, "ScaleContinuousDatetime")) {
as.POSIXct(x, origin = "1970-01-01", tz = scale$timezone)
} else {
as.Date(x, origin = "1970-01-01", tz = scale$timezone)
}
}
if (isDateType) {
axisObj$range <- invert_date(axisObj$range, sc)
traces <- lapply(traces, function(tr) {
tr[[xy]] <- invert_date(tr[[xy]], sc)
# TODO: are there other similar cases we need to handle?
if (identical("bar", tr$type)) {
tr[["width"]] <- invert_date(tr[["width"]], sc)
}
tr
})
}
# inverse transform categorical data based on tickvals/ticktext
if (isDiscreteType) {
traces <- lapply(traces, function(tr) {
# map x/y trace data back to the 'closest' ticktext label
# http://r.789695.n4.nabble.com/check-for-nearest-value-in-a-vector-td4369339.html
tr[[xy]]<- vapply(tr[[xy]], function(val) {
with(axisObj, ticktext[[which.min(abs(tickvals - val))]])
}, character(1))
tr
})
if ("dodge" %in% sapply(layers, ggtype, "position")) gglayout$barmode <- "dodge"
}
# attach axis object to the layout
gglayout[[axisName]] <- axisObj
# do some stuff that should be done once for the entire plot
if (i == 1) {
axisTickText <- axisObj$ticktext[which.max(nchar(axisObj$ticktext))]
side <- if (xy == "x") "b" else "l"
# account for axis ticks, ticks text, and titles in plot margins
# (apparently ggplot2 doesn't support axis.title/axis.text margins)
gglayout$margin[[side]] <- gglayout$margin[[side]] + axisObj$ticklen +
bbox(axisTickText, axisObj$tickangle, axisObj$tickfont$size)[[type]] +
bbox(axisTitleText, axisTitle$angle, unitConvert(axisTitle, "pixels", type))[[type]]
if (nchar(axisTitleText) > 0) {
axisTextSize <- unitConvert(axisText, "npc", type)
axisTitleSize <- unitConvert(axisTitle, "npc", type)
offset <-
(0 -
bbox(axisTickText, axisText$angle, axisTextSize)[[type]] -
bbox(axisTitleText, axisTitle$angle, axisTitleSize)[[type]] / 2 -
unitConvert(theme$axis.ticks.length, "npc", type))
}
# add space for exterior facet strips in `layout.margin`
if (has_facet(plot)) {
stripSize <- unitConvert(stripText, "pixels", type)
if (xy == "x") {
gglayout$margin$t <- gglayout$margin$t + stripSize
}
if (xy == "y" && inherits(plot$facet, "FacetGrid")) {
gglayout$margin$r <- gglayout$margin$r + stripSize
}
# facets have multiple axis objects, but only one title for the plot,
# so we empty the titles and try to draw the title as an annotation
if (nchar(axisTitleText) > 0) {
# npc is on a 0-1 scale of the _entire_ device,
# but these units _should_ be wrt to the plotting region
# multiplying the offset by 2 seems to work, but this is a terrible hack
x <- if (xy == "x") 0.5 else offset
y <- if (xy == "x") offset else 0.5
gglayout$annotations <- c(
gglayout$annotations,
make_label(
faced(axisTitleText, axisTitle$face), x, y, el = axisTitle,
xanchor = if (xy == "x") "center" else "right",
yanchor = if (xy == "x") "top" else "center",
annotationType = "axis"
)
)
}
}
}
if (has_facet(plot)) gglayout[[axisName]]$title <- ""
} # end of axis loop
# theme(panel.border = ) -> plotly rect shape
xdom <- gglayout[[lay[, "xaxis"]]]$domain
ydom <- gglayout[[lay[, "yaxis"]]]$domain
border <- make_panel_border(xdom, ydom, theme)
gglayout$shapes <- c(gglayout$shapes, border)
# facet strips -> plotly annotations
if (has_facet(plot)) {
col_vars <- ifelse(inherits(plot$facet, "FacetWrap"), "facets", "cols")
col_txt <- paste(
plot$facet$params$labeller(
lay[names(plot$facet$params[[col_vars]])]
), collapse = br()
)
if (is_blank(theme[["strip.text.x"]])) col_txt <- ""
if (inherits(plot$facet, "FacetGrid") && lay$ROW != 1) col_txt <- ""
if (nchar(col_txt) > 0) {
col_lab <- make_label(
col_txt, x = mean(xdom), y = max(ydom),
el = theme[["strip.text.x"]] %||% theme[["strip.text"]],
xanchor = "center", yanchor = "bottom"
)
gglayout$annotations <- c(gglayout$annotations, col_lab)
strip <- make_strip_rect(xdom, ydom, theme, "top")
gglayout$shapes <- c(gglayout$shapes, strip)
}
row_txt <- paste(
plot$facet$params$labeller(
lay[names(plot$facet$params$rows)]
), collapse = br()
)
if (is_blank(theme[["strip.text.y"]])) row_txt <- ""
if (inherits(plot$facet, "FacetGrid") && lay$COL != nCols) row_txt <- ""
if (nchar(row_txt) > 0) {
row_lab <- make_label(
row_txt, x = max(xdom), y = mean(ydom),
el = theme[["strip.text.y"]] %||% theme[["strip.text"]],
xanchor = "left", yanchor = "middle"
)
gglayout$annotations <- c(gglayout$annotations, row_lab)
strip <- make_strip_rect(xdom, ydom, theme, "right")
gglayout$shapes <- c(gglayout$shapes, strip)
}
}
} # end of panel loop
# ------------------------------------------------------------------------
# guide conversion
# Strategy: Obtain and translate the output of ggplot2:::guides_train().
# To do so, we borrow some of the body of ggplot2:::guides_build().
# ------------------------------------------------------------------------
# will there be a legend?
gglayout$showlegend <- sum(unlist(lapply(traces, "[[", "showlegend"))) >= 1
# legend styling
gglayout$legend <- list(
bgcolor = toRGB(theme$legend.background$fill),
bordercolor = toRGB(theme$legend.background$colour),
borderwidth = unitConvert(theme$legend.background$size, "pixels", "width"),
font = text2font(theme$legend.text)
)
# if theme(legend.position = "none") is used, don't show a legend _or_ guide
if (npscales$n() == 0 || identical(theme$legend.position, "none")) {
gglayout$showlegend <- FALSE
} else {
# by default, guide boxes are vertically aligned
theme$legend.box <- theme$legend.box %||% "vertical"
# size of key (also used for bar in colorbar guide)
theme$legend.key.width <- theme$legend.key.width %||% theme$legend.key.size
theme$legend.key.height <- theme$legend.key.height %||% theme$legend.key.size
# legend direction must be vertical
theme$legend.direction <- theme$legend.direction %||% "vertical"
if (!identical(theme$legend.direction, "vertical")) {
warning(
"plotly.js does not (yet) support horizontal legend items \n",
"You can track progress here: \n",
"https://github.com/plotly/plotly.js/issues/53 \n",
call. = FALSE
)
theme$legend.direction <- "vertical"
}
# justification of legend boxes
theme$legend.box.just <- theme$legend.box.just %||% c("center", "center")
# scales -> data for guides
gdefs <- ggfun("guides_train")(scales, theme, plot$guides, plot$labels)
if (length(gdefs) > 0) {
gdefs <- ggfun("guides_merge")(gdefs)
gdefs <- ggfun("guides_geom")(gdefs, layers, plot$mapping)
}
# colourbar -> plotly.js colorbar
colorbar <- compact(lapply(gdefs, gdef2trace, theme, gglayout))
nguides <- length(colorbar) + gglayout$showlegend
# If we have 2 or more guides, set x/y positions accordingly
if (nguides >= 2) {
# place legend at the bottom
gglayout$legend$y <- 1 / nguides
gglayout$legend$yanchor <- "top"
# adjust colorbar position(s)
for (i in seq_along(colorbar)) {
colorbar[[i]]$marker$colorbar$yanchor <- "top"
colorbar[[i]]$marker$colorbar$len <- 1 / nguides
colorbar[[i]]$marker$colorbar$y <- 1 - (i - 1) * (1 / nguides)
}
}
traces <- c(traces, colorbar)
# legend title annotation - https://github.com/plotly/plotly.js/issues/276
if (isTRUE(gglayout$showlegend)) {
legendTitles <- compact(lapply(gdefs, function(g) if (inherits(g, "legend")) g$title else NULL))
legendTitle <- paste(legendTitles, collapse = br())
titleAnnotation <- make_label(
legendTitle,
x = gglayout$legend$x %||% 1.02,
y = gglayout$legend$y %||% 1,
theme$legend.title,
xanchor = "left",
yanchor = "bottom",
# just so the R client knows this is a title
legendTitle = TRUE
)
gglayout$annotations <- c(gglayout$annotations, titleAnnotation)
# adjust the height of the legend to accomodate for the title
# this assumes the legend always appears below colorbars
gglayout$legend$y <- (gglayout$legend$y %||% 1) -
length(legendTitles) * unitConvert(theme$legend.title$size, "npc", "height")
}
}
# flip x/y in traces for flipped coordinates
# (we've already done appropriate flipping for axis objects)
if (inherits(plot$coordinates, "CoordFlip")) {
for (i in seq_along(traces)) {
tr <- traces[[i]]
# flipping logic for bar positioning is in geom2trace.GeomBar
if (tr$type != "bar") traces[[i]][c("x", "y")] <- tr[c("y", "x")]
if (tr$type %in% "box") {
traces[[i]]$orientation <- "h"
traces[[i]]$hoverinfo <- "x"
}
names(traces[[i]])[grepl("^error_y$", names(tr))] <- "error_x"
names(traces[[i]])[grepl("^error_x$", names(tr))] <- "error_y"
}
}
# Error bar widths in ggplot2 are on the range of the x/y scale,
# but plotly wants them in pixels:
for (xy in c("x", "y")) {
type <- if (xy == "x") "width" else "height"
err <- if (xy == "x") "error_y" else "error_x"
for (i in seq_along(traces)) {
e <- traces[[i]][[err]]
if (!is.null(e)) {
# TODO: again, "npc" is on device scale...we really want plot scale
w <- grid::unit(e$width %||% 0, "npc")
traces[[i]][[err]]$width <- unitConvert(w, "pixels", type)
}
}
}
# try to merge marker/line traces that have the same values for these props
props <- c("x", "y", "text", "type", "xaxis", "yaxis", "name")
hashes <- vapply(traces, function(x) digest::digest(x[names(x) %in% props]), character(1))
modes <- vapply(traces, function(x) x$mode %||% "", character(1))
nhashes <- length(unique(hashes))
if (nhashes < length(traces)) {
mergedTraces <- vector("list", nhashes)
for (i in unique(hashes)) {
idx <- which(hashes %in% i)
mergedTraces[[i]] <- Reduce(modify_list, traces[idx])
mergedTraces[[i]]$mode <- paste(
unique(unlist(lapply(traces[idx], "[[", "mode"))),
collapse = "+"
)
# show one, show all
show <- vapply(traces[idx], function(tr) tr$showlegend %||% TRUE, logical(1))
if (any(show)) {
mergedTraces[[i]]$showlegend <- TRUE
}
}
traces <- mergedTraces
}
# better layout defaults (TODO: provide a mechanism for templating defaults)
gglayout$hovermode <- "closest"
ax <- grep("^[x-y]axis", names(gglayout))
for (i in ax) {
gglayout[[i]]$hoverformat <- ".2f"
}
# If a trace isn't named, it shouldn't have additional hoverinfo
traces <- lapply(compact(traces), function(x) { x$name <- x$name %||% ""; x })
gglayout$width <- width
gglayout$height <- height
gglayout$barmode <- gglayout$barmode %||% "relative"
l <- list(
data = setNames(traces, NULL),
layout = compact(gglayout),
# prevent autosize on doubleClick which clears ggplot2 margins
config = list(doubleClick = "reset"),
source = source
)
# strip any existing 'AsIs' list elements of their 'AsIs' status.
# this is necessary since ggplot_build(qplot(1:10, fill = I("red")))
# returns list element with their 'AsIs' class,
# which conflicts with our JSON unboxing strategy.
l <- rm_asis(l)
# start build a plotly object with meta information about the ggplot
# first, translate layer mappings -> plotly attrs
mappingFormulas <- lapply(layers, function(x) {
mappings <- c(x$mapping, if (isTRUE(x$inherit.aes)) plot$mapping)
if (originalData) {
lapply(mappings, lazyeval::f_new)
} else {
nms <- names(mappings)
setNames(lapply(nms, function(x) lazyeval::f_new(as.name(x))), nms)
}
})
return_dat <- if (originalData) layer_data else data
# translate group aesthetics to data attributes
return_dat <- Map(function(x, y) {
if (is.null(y[["group"]])) return(x)
dplyr::group_by_(x, y[["group"]])
}, return_dat, mappingFormulas)
# don't need to add group as an attribute anymore
mappingFormulas <- lapply(mappingFormulas, function(x) x[!grepl("^group$", names(x))])
ids <- lapply(seq_along(data), function(x) new_id())
l$attrs <- setNames(mappingFormulas, ids)
l$attrs <- lapply(l$attrs, function(x) structure(x, class = "plotly_eval"))
# the build step removes the first attrs if no type exists
l$attrs[[1]][["type"]] <- l$data[[1]][["type"]] %||% "scatter"
l$cur_data <- ids[[layerData]]
l$visdat <- setNames(lapply(return_dat, function(x) function(y) x), ids)
l
}
#-----------------------------------------------------------------------------
# ggplotly 'utility' functions
#-----------------------------------------------------------------------------
# convert ggplot2 sizes and grid unit(s) to pixels or normalized point coordinates
unitConvert <- function(u, to = c("npc", "pixels"), type = c("x", "y", "height", "width")) {
u <- verifyUnit(u)
convert <- switch(
type[1],
x = grid::convertX,
y = grid::convertY,
width = grid::convertWidth,
height = grid::convertHeight
)
# convert everything to npc first
if (inherits(u, "margin")) {
# margins consist of 4 parts: top, right, bottom, and left
uh <- grid::convertHeight(u, "npc")
uw <- grid::convertWidth(u, "npc")
u <- grid::unit(c(uh[1], uw[2], uh[3], uw[4]), "npc")
} else {
u <- convert(u, "npc")
}
if (to[1] == "pixels") {
if (inherits(u, "margin")) {
uh <- mm2pixels(grid::convertHeight(uh, "mm"))
uw <- mm2pixels(grid::convertWidth(uw, "mm"))
u <- c(uh[1], uw[2], uh[3], uw[4])
} else {
u <- mm2pixels(convert(u, "mm"))
}
}
as.numeric(u)
}
# ggplot2 size is in millimeters. plotly is in pixels. To do this correctly,
# we need to know PPI/DPI of the display. I'm not sure of a decent way to do that
# from R, but it seems 96 is a reasonable assumption.
mm2pixels <- function(u) {
u <- verifyUnit(u)
if (attr(u, "unit") != "mm") {
stop("Unit must be in millimeters")
}
(as.numeric(u) * 96) / 25.4
}
verifyUnit <- function(u) {
# the default unit in ggplot2 is millimeters (unless it's element_text())
if (is.null(attr(u, "unit"))) {
u <- if (inherits(u, "element")) {
grid::unit(u$size %||% 0, "points")
} else {
grid::unit(u %||% 0, "mm")
}
}
u
}
# detect a blank theme element
is_blank <- function(x) {
inherits(x, "element_blank") && inherits(x, "element")
}
# given text, and x/y coordinates on 0-1 scale,
# convert ggplot2::element_text() to plotly annotation
make_label <- function(txt = "", x, y, el = ggplot2::element_text(), ...) {
if (is_blank(el) || is.null(txt) || nchar(txt) == 0 || length(txt) == 0) {
return(NULL)
}
angle <- el$angle %||% 0
list(list(
text = txt,
x = x,
y = y,
showarrow = FALSE,
# TODO: hjust/vjust?
ax = 0,
ay = 0,
font = text2font(el),
xref = "paper",
yref = "paper",
textangle = -angle,
...
))
}
has_facet <- function(x) {
inherits(x$facet, c("FacetGrid", "FacetWrap"))
}
#' Estimate bounding box of a rotated string
#'
#' @param txt a character string of length 1
#' @param angle sets the angle of the tick labels with respect to the
#' horizontal (e.g., `tickangle` of -90 draws the tick labels vertically)
#' @param size vertical size of a character
#' @references
#' https://www.dropbox.com/s/nc6968prgw8ne4w/bbox.pdf?dl=0
bbox <- function(txt = "foo", angle = 0, size = 12) {
# assuming the horizontal size of a character is roughly half of the vertical
n <- nchar(txt)
if (sum(n) == 0) return(list(height = 0, width = 0))
w <- size * (nchar(txt) / 2)
angle <- abs(angle %||% 0)
# do the sensible thing in the majority of cases
if (angle == 0) return(list(height = size, width = w))
if (angle == 90) return(list(height = w, width = size))
# first, compute the hypotenus
hyp <- sqrt(size ^ 2 + w ^ 2)
list(
height = max(hyp * cos(90 - angle), size),
width = max(hyp * sin(90 - angle), w)
)
}
# create a plotly font object from ggplot2::element_text()
text2font <- function(x = ggplot2::element_text(), type = "height") {
list(
color = toRGB(x$colour),
family = x$family,
# TODO: what about the size of vertical text?
size = unitConvert(grid::unit(x$size %||% 0, "points"), "pixels", type)
)
}
# wrap text in bold/italics according to the text "face"
faced <- function(txt, face = "plain") {
if (is.null(face)) face <- "plain"
x <- switch(face,
plain = txt,
bold = bold(txt),
italic = italic(txt),
bold.italic = bold(italic(txt))
)
# if, for some reason, a face we don't support is used, return the text
if (is.null(x)) txt else x
}
bold <- function(x) paste("<b>", x, "</b>")
italic <- function(x) paste("<i>", x, "</i>")
# if a vector that has one unique value (ignoring missings), return that value
uniq <- function(x) {
u <- unique(x)
if (identical(u, NA) || length(u) == 0) return(u)
u <- u[!is.na(u)]
if (length(u) == 1) u else x
}
# theme(strip.background) -> plotly.js rect shape
make_strip_rect <- function(xdom, ydom, theme, side = "top") {
rekt <- rect2shape(theme[["strip.background"]])
stripTextX <- theme[["strip.text.x"]] %||% theme[["strip.text"]]
xTextSize <- unitConvert(stripTextX$size, "npc", "width")
stripTextY <- theme[["strip.text.y"]] %||% theme[["strip.text"]]
yTextSize <- unitConvert(stripTextY$size, "npc", "height")
if ("right" %in% side) {
# x-padding should be accounted for in `layout.margin.r`
rekt$x0 <- xdom[2]
rekt$x1 <- xdom[2] + xTextSize
rekt$y0 <- ydom[1]
rekt$y1 <- ydom[2]
}
if ("top" %in% side) {
rekt$x0 <- xdom[1]
rekt$x1 <- xdom[2]
rekt$y0 <- ydom[2]
rekt$y1 <- ydom[2] + yTextSize
}
list(rekt)
}
# theme(panel.border) -> plotly.js rect shape
make_panel_border <- function(xdom, ydom, theme) {
rekt <- rect2shape(theme[["panel.border"]])
rekt$x0 <- xdom[1]
rekt$x1 <- xdom[2]
rekt$y0 <- ydom[1]
rekt$y1 <- ydom[2]
list(rekt)
}
# element_rect -> plotly.js rect shape
rect2shape <- function(rekt = ggplot2::element_rect()) {
list(
type = "rect",
fillcolor = toRGB(rekt$fill),
line = list(
color = toRGB(rekt$colour),
width = unitConvert(rekt, "pixels", "width"),
linetype = lty2dash(rekt$linetype)
),
yref = "paper",
xref = "paper"
)
}
is_dev_ggplot2 <- function() {
packageVersion("ggplot2") > "2.2.1"
}
# We need access to internal ggplot2 functions in several places
# this helps us import functions in a way that R CMD check won't cry about
ggfun <- function(x) {
tryCatch(getFromNamespace(x, "ggplot2"), error = function(e) NULL)
}
ggtype <- function(x, y = "geom") {
sub(y, "", tolower(class(x[[y]])[1]))
}
# colourbar -> plotly.js colorbar
gdef2trace <- function(gdef, theme, gglayout) {
if (inherits(gdef, "colorbar")) {
# sometimes the key has missing values, which we can ignore
gdef$key <- gdef$key[!is.na(gdef$key$.value), ]
rng <- range(gdef$bar$value)
gdef$bar$value <- scales::rescale(gdef$bar$value, from = rng)
gdef$key$.value <- scales::rescale(gdef$key$.value, from = rng)
list(
x = with(gglayout$xaxis, if (identical(tickmode, "auto")) ticktext else tickvals)[[1]],
y = with(gglayout$yaxis, if (identical(tickmode, "auto")) ticktext else tickvals)[[1]],
# esentially to prevent this getting merged at a later point
name = gdef$hash,
type = "scatter",
mode = "markers",
opacity = 0,
hoverinfo = "none",
showlegend = FALSE,
# do everything on a 0-1 scale
marker = list(
color = c(0, 1),
colorscale = setNames(gdef$bar[c("value", "colour")], NULL),
colorbar = list(
bgcolor = toRGB(theme$legend.background$fill),
bordercolor = toRGB(theme$legend.background$colour),
borderwidth = unitConvert(
theme$legend.background$size, "pixels", "width"
),
thickness = unitConvert(
theme$legend.key.width, "pixels", "width"
),
title = gdef$title,
titlefont = text2font(gdef$title.theme %||% theme$legend.title),
tickmode = "array",
ticktext = gdef$key$.label,
tickvals = gdef$key$.value,
tickfont = text2font(gdef$label.theme %||% theme$legend.text),
ticklen = 2,
len = 1/2
)
)
)
} else {
# if plotly.js gets better support for multiple legends,
# that conversion should go here
NULL
}
}
| /R/ggplotly.R | permissive | cpruitt1230/plotly | R | false | false | 51,212 | r | #' Convert ggplot2 to plotly
#'
#' This function converts a [ggplot2::ggplot()] object to a
#' plotly object.
#'
#' @details Conversion of relative sizes depends on the size of the current
#' graphics device (if no device is open, width/height of a new (off-screen)
#' device defaults to 640/480). In other words, `height` and
#' `width` must be specified at runtime to ensure sizing is correct.
#'
#' @param p a ggplot object.
#' @param width Width of the plot in pixels (optional, defaults to automatic sizing).
#' @param height Height of the plot in pixels (optional, defaults to automatic sizing).
#' @param tooltip a character vector specifying which aesthetic mappings to show
#' in the tooltip. The default, "all", means show all the aesthetic mappings
#' (including the unofficial "text" aesthetic). The order of variables here will
#' also control the order they appear. For example, use
#' `tooltip = c("y", "x", "colour")` if you want y first, x second, and
#' colour last.
#' @param dynamicTicks should plotly.js dynamically generate axis tick labels?
#' Dynamic ticks are useful for updating ticks in response to zoom/pan
#' interactions; however, they can not always reproduce labels as they
#' would appear in the static ggplot2 image.
#' @param layerData data from which layer should be returned?
#' @param originalData should the "original" or "scaled" data be returned?
#' @param source a character string of length 1. Match the value of this string
#' with the source argument in [event_data()] to retrieve the
#' event data corresponding to a specific plot (shiny apps can have multiple plots).
#' @param ... arguments passed onto methods.
#' @export
#' @author Carson Sievert
#' @references \url{https://plot.ly/ggplot2}
#' @seealso [plot_ly()]
#' @examples \dontrun{
#' # simple example
#' ggiris <- qplot(Petal.Width, Sepal.Length, data = iris, color = Species)
#' ggplotly(ggiris)
#'
#' data(canada.cities, package = "maps")
#' viz <- ggplot(canada.cities, aes(long, lat)) +
#' borders(regions = "canada") +
#' coord_equal() +
#' geom_point(aes(text = name, size = pop), colour = "red", alpha = 1/2)
#' ggplotly(viz, tooltip = c("text", "size"))
#'
#' # linked scatterplot brushing
#' library(crosstalk)
#' d <- SharedData$new(mtcars)
#' subplot(
#' qplot(data = d, x = mpg, y = wt),
#' qplot(data = d, x = mpg, y = vs)
#' )
#'
#' # more brushing (i.e. highlighting) examples
#' demo("crosstalk-highlight-ggplotly", package = "plotly")
#'
#' # client-side linked brushing in a scatterplot matrix
#' SharedData$new(iris) %>%
#' GGally::ggpairs(aes(colour = Species), columns = 1:4) %>%
#' ggplotly(tooltip = c("x", "y", "colour"))
#' }
#'
ggplotly <- function(p = ggplot2::last_plot(), width = NULL, height = NULL,
tooltip = "all", dynamicTicks = FALSE,
layerData = 1, originalData = TRUE, source = "A", ...) {
UseMethod("ggplotly", p)
}
#' @export
ggplotly.plotly <- function(p = ggplot2::last_plot(), width = NULL, height = NULL,
tooltip = "all", dynamicTicks = FALSE,
layerData = 1, originalData = TRUE, source = "A", ...) {
p
}
#' @export
ggplotly.ggmatrix <- function(p = ggplot2::last_plot(), width = NULL,
height = NULL, tooltip = "all", dynamicTicks = FALSE,
layerData = 1, originalData = TRUE, source = "A", ...) {
dots <- list(...)
# provide a sensible crosstalk if none is already provided (makes ggnostic() work at least)
if (!crosstalk_key() %in% names(p$data)) {
p$data[[crosstalk_key()]] <- p$data[[".rownames"]] %||% seq_len(nrow(p$data))
attr(p$data, "set") <- dots[["set"]] %||% new_id()
}
subplotList <- list()
for (i in seq_len(p$ncol)) {
columnList <- list()
for (j in seq_len(p$nrow)) {
thisPlot <- p[j, i]
if (i == 1) {
# should the first column contain axis labels?
if (p$showYAxisPlotLabels %||% TRUE) thisPlot <- thisPlot + ylab(p$yAxisLabels[j])
} else {
# y-axes are never drawn on the interior, and diagonal plots are densities,
# so it doesn't make sense to synch zoom actions on y
thisPlot <- thisPlot + ylab(NULL) +
theme(
axis.ticks.y = element_blank(),
axis.text.y = element_blank()
)
}
columnList <- c(
columnList, list(ggplotly(
thisPlot, tooltip = tooltip, dynamicTicks = dynamicTicks,
layerData = layerData, originalData = originalData, source = source,
width = width, height = height
))
)
}
# conditioned on a column in a ggmatrix, the x-axis should be on the
# same scale.
s <- subplot(columnList, nrows = p$nrow, margin = 0.01, shareX = TRUE,
titleY = TRUE, titleX = TRUE)
subplotList <- c(subplotList, list(s))
}
s <- subplot(subplotList, nrows = 1, margin = 0.01,
titleY = TRUE, titleX = TRUE) %>%
hide_legend() %>%
layout(dragmode = "select")
if (nchar(p$title %||% "") > 0) {
s <- layout(s, title = p$title)
}
for (i in seq_along(p$xAxisLabels)) {
s$x$layout[[sub("^xaxis1$", "xaxis", paste0("xaxis", i))]]$title <- p$xAxisLabels[[i]]
}
if (length(p$yAxisLabels)) {
s$x$layout$margin$l <- s$x$layout$margin$l + 50
}
config(s)
}
#' @export
ggplotly.ggplot <- function(p = ggplot2::last_plot(), width = NULL,
height = NULL, tooltip = "all", dynamicTicks = FALSE,
layerData = 1, originalData = TRUE, source = "A", ...) {
l <- gg2list(p, width = width, height = height, tooltip = tooltip,
dynamicTicks = dynamicTicks, layerData = layerData,
originalData = originalData, source = source, ...)
config(as_widget(l))
}
#' Convert a ggplot to a list.
#' @param p ggplot2 plot.
#' @param width Width of the plot in pixels (optional, defaults to automatic sizing).
#' @param height Height of the plot in pixels (optional, defaults to automatic sizing).
#' @param tooltip a character vector specifying which aesthetic tooltips to show in the
#' tooltip. The default, "all", means show all the aesthetic tooltips
#' (including the unofficial "text" aesthetic).
#' @param dynamicTicks accepts the following values: `FALSE`, `TRUE`, `"x"`, or `"y"`.
#' Dynamic ticks are useful for updating ticks in response to zoom/pan/filter
#' interactions; however, there is no guarantee they reproduce axis tick text
#' as they would appear in the static ggplot2 image.
#' @param layerData data from which layer should be returned?
#' @param originalData should the "original" or "scaled" data be returned?
#' @param source a character string of length 1. Match the value of this string
#' with the source argument in [event_data()] to retrieve the
#' event data corresponding to a specific plot (shiny apps can have multiple plots).
#' @param ... currently not used
#' @return a 'built' plotly object (list with names "data" and "layout").
#' @export
gg2list <- function(p, width = NULL, height = NULL,
tooltip = "all", dynamicTicks = FALSE,
layerData = 1, originalData = TRUE, source = "A", ...) {
# To convert relative sizes correctly, we use grid::convertHeight(),
# which may open a new *screen* device, if none is currently open.
# To avoid undesirable side effects, we may need to open a
# non-interactive device and close it on exit...
# https://github.com/att/rcloud.htmlwidgets/issues/2
# Note that we never have to open a non-interactive device
# in RStudio since it ships with one. Plus, calling dev.size()
# adds it to dev.list() & should ensure grid can query the correct device size
rStudioDevSize <- if (is_rstudio()) grDevices::dev.size("px")
if (is.null(grDevices::dev.list())) {
dev_fun <- if (system.file(package = "Cairo") != "") {
Cairo::Cairo
} else if (capabilities("png")) {
grDevices::png
} else if (capabilities("jpeg")) {
grDevices::jpeg
} else {
stop(
"No graphics device is currently open and no cairo or bitmap device is available.\n",
"A graphics device is required to convert sizes correctly. You have three options:",
" (1) Open a graphics device (with the desired size) using ggplotly()",
" (2) install.packages('Cairo')",
" (3) compile R to use a bitmap device (png or jpeg)",
call. = FALSE
)
}
dev_fun(file = tempfile(), width = width %||% 640, height = height %||% 480)
on.exit(grDevices::dev.off(), add = TRUE)
}
# check the value of dynamicTicks
dynamicValues <- c(FALSE, TRUE, "x", "y")
if (length(setdiff(dynamicTicks, dynamicValues))) {
stop(
sprintf(
"`dynamicValues` accepts the following values: '%s'",
paste(dynamicValues, collapse = "', '")
), call. = FALSE
)
}
# we currently support ggplot2 >= 2.2.1 (see DESCRIPTION)
# there are too many naming changes in 2.2.1.9000 to realistically
if (!is_dev_ggplot2()) {
message(
"We recommend that you use the dev version of ggplot2 with `ggplotly()`\n",
"Install it with: `devtools::install_github('tidyverse/ggplot2')`"
)
if (!identical(dynamicTicks, FALSE)) {
warning(
"You need the dev version of ggplot2 to use `dynamicTicks`", call. = FALSE
)
}
return(
gg2list_legacy(
p, width = width, height = height, tooltip = tooltip,
layerData = layerData, originalData = originalData, source = source, ...
)
)
}
# ------------------------------------------------------------------------
# Our internal version of ggplot2::ggplot_build(). Modified from
# https://github.com/hadley/ggplot2/blob/0cd0ba/R/plot-build.r#L18-L92
# ------------------------------------------------------------------------
plot <- ggfun("plot_clone")(p)
if (length(plot$layers) == 0) {
plot <- plot + geom_blank()
}
layers <- plot$layers
layer_data <- lapply(layers, function(y) y$layer_data(plot$data))
# save crosstalk sets before this attribute gets squashed
sets <- lapply(layer_data, function(y) attr(y, "set"))
scales <- plot$scales
# Apply function to layer and matching data
by_layer <- function(f) {
out <- vector("list", length(data))
for (i in seq_along(data)) {
out[[i]] <- f(l = layers[[i]], d = data[[i]])
}
out
}
# Initialise panels, add extra data for margins & missing facetting
# variables, and add on a PANEL variable to data
layout <- ggfun("create_layout")(plot$facet, plot$coordinates)
data <- layout$setup(layer_data, plot$data, plot$plot_env)
# save the domain of the group for display in tooltips
groupDomains <- Map(function(x, y) {
tryCatch(
eval(y$mapping[["group"]] %||% plot$mapping[["group"]], x),
error = function(e) NULL
)
}, data, layers)
# for simple (StatIdentity) geoms, add crosstalk key to aes mapping
# (effectively adding it as a group)
# later on, for more complicated geoms (w/ non-trivial summary statistics),
# we construct a nested key mapping (within group)
layers <- Map(function(x, y) {
if (crosstalk_key() %in% names(y) && !"key" %in% names(x[["mapping"]]) &&
inherits(x[["stat"]], "StatIdentity")) {
x[["mapping"]] <- c(x[["mapping"]], key = as.name(crosstalk_key()))
}
x
}, layers, layer_data)
# Compute aesthetics to produce data with generalised variable names
data <- by_layer(function(l, d) l$compute_aesthetics(d, plot))
# add frame to group if it exists
data <- lapply(data, function(d) {
if (!"frame" %in% names(d)) return(d)
d$group <- with(d, paste(group, frame, sep = "-"))
d
})
# The computed aesthetic codes the groups as integers
# Here we build a map each of the integer values to the group label
group_maps <- Map(function(x, y) {
tryCatch({
x_group <- x[["group"]]
names(x_group) <- y
x_group <- x_group[!duplicated(x_group)]
x_group
}, error = function(e) NULL
)
}, data, groupDomains)
# Before mapping x/y position, save the domain (for discrete scales)
# to display in tooltip.
data <- lapply(data, function(d) {
d[["x_plotlyDomain"]] <- d[["x"]]
d[["y_plotlyDomain"]] <- d[["y"]]
d
})
# Transform all scales
data <- lapply(data, ggfun("scales_transform_df"), scales = scales)
# Map and train positions so that statistics have access to ranges
# and all positions are numeric
scale_x <- function() scales$get_scales("x")
scale_y <- function() scales$get_scales("y")
layout$train_position(data, scale_x(), scale_y())
data <- layout$map_position(data)
# build a mapping between group and key
# if there are multiple keys within a group, the key is a list-column
reComputeGroup <- function(x, layer = NULL) {
# 1-to-1 link between data & visual marks -- group == key
if (inherits(layer$geom, "GeomDotplot")) {
x <- split(x, x[["PANEL"]])
x <- lapply(x, function(d) {
d[["group"]] <- do.call("order", d[c("x", "group")])
d
})
x <- dplyr::bind_rows(x)
}
if (inherits(layer$geom, "GeomSf")) {
x <- split(x, x[["PANEL"]])
x <- lapply(x, function(d) {
d[["group"]] <- seq_len(nrow(d))
d
})
# I think this is safe?
x <- suppressWarnings(dplyr::bind_rows(x))
}
x
}
nestedKeys <- Map(function(x, y, z) {
key <- y[[crosstalk_key()]]
if (is.null(key) || inherits(z[["stat"]], "StatIdentity")) return(NULL)
x <- reComputeGroup(x, z)
tib <- tibble::as_tibble(x[c("PANEL", "group")])
tib[["key"]] <- key
nested <- tidyr::nest(tib, key, .key = key)
# reduce the dimensions of list column elements from 2 to 1
nested$key <- lapply(nested$key, function(x) x[[1]])
nested
}, data, layer_data, layers)
# for some geoms (e.g. boxplots) plotly.js needs the "pre-statistics" data
# we also now provide the option to return one of these two
prestats_data <- data
data <- by_layer(function(l, d) l$compute_statistic(d, layout))
data <- by_layer(function(l, d) l$map_statistic(d, plot))
# Make sure missing (but required) aesthetics are added
ggfun("scales_add_missing")(plot, c("x", "y"), plot$plot_env)
# Reparameterise geoms from (e.g.) y and width to ymin and ymax
data <- by_layer(function(l, d) l$compute_geom_1(d))
# compute_geom_1 can reorder the rows from `data`, making groupDomains
# invalid. We rebuild groupDomains based on the current `data` and the
# group map we built before.
groupDomains <- Map(function(x, y) {
tryCatch({
names(y)[match(x$group, y)]
}, error = function(e) NULL
)
}, data, group_maps)
# there are some geoms (e.g. geom_dotplot()) where attaching the key
# before applying the statistic can cause problems, but there is still a
# 1-to-1 corresponding between graphical marks and
# Apply position adjustments
data <- by_layer(function(l, d) l$compute_position(d, layout))
# Reset position scales, then re-train and map. This ensures that facets
# have control over the range of a plot: is it generated from what's
# displayed, or does it include the range of underlying data
layout$reset_scales()
layout$train_position(data, scale_x(), scale_y())
layout$setup_panel_params()
data <- layout$map_position(data)
# Train and map non-position scales
npscales <- scales$non_position_scales()
if (npscales$n() > 0) {
lapply(data, ggfun("scales_train_df"), scales = npscales)
# this for loop is unique to plotly -- it saves the "domain"
# of each non-positional scale for display in tooltips
for (sc in npscales$scales) {
data <- lapply(data, function(d) {
# scale may not be relevant for every layer data
if (any(names(d) %in% sc$aesthetics)) {
d[paste0(sc$aesthetics, "_plotlyDomain")] <- d[sc$aesthetics]
}
d
})
}
data <- lapply(data, ggfun("scales_map_df"), scales = npscales)
}
# Fill in defaults etc.
data <- by_layer(function(l, d) l$compute_geom_2(d))
# Let layer stat have a final say before rendering
data <- by_layer(function(l, d) l$finish_statistics(d))
# Let Layout modify data before rendering
data <- layout$finish_data(data)
# ------------------------------------------------------------------------
# end of ggplot_build()
# ------------------------------------------------------------------------
# if necessary, attach key
data <- Map(function(x, y, z) {
if (!length(y)) return(x)
x <- reComputeGroup(x, z)
# dplyr issue??? https://github.com/tidyverse/dplyr/issues/2701
attr(y$group, "n") <- NULL
suppressMessages(dplyr::left_join(x, y))
}, data, nestedKeys, layers)
# initiate plotly.js layout with some plot-wide theming stuff
theme <- ggfun("plot_theme")(plot)
elements <- names(which(sapply(theme, inherits, "element")))
for (i in elements) {
theme[[i]] <- ggplot2::calc_element(i, theme)
}
# Translate plot wide theme elements to plotly.js layout
pm <- unitConvert(theme$plot.margin, "pixels")
gglayout <- list(
margin = list(t = pm[[1]], r = pm[[2]], b = pm[[3]], l = pm[[4]]),
plot_bgcolor = toRGB(theme$panel.background$fill),
paper_bgcolor = toRGB(theme$plot.background$fill),
font = text2font(theme$text)
)
# main plot title
if (nchar(plot$labels$title %||% "") > 0) {
gglayout$title <- faced(plot$labels$title, theme$plot.title$face)
gglayout$titlefont <- text2font(theme$plot.title)
gglayout$margin$t <- gglayout$margin$t + gglayout$titlefont$size
}
# ensure there's enough space for the modebar (this is based on a height of 1em)
# https://github.com/plotly/plotly.js/blob/dd1547/src/components/modebar/index.js#L171
gglayout$margin$t <- gglayout$margin$t + 16
# important stuff like layout$panel_params is already flipped, but
# plot$scales/plot$labels/data aren't. We flip x/y trace data at the very end
# and scales in the axis loop below.
if (inherits(plot$coordinates, "CoordFlip")) {
plot$labels[c("x", "y")] <- plot$labels[c("y", "x")]
}
# important panel summary stats
nPanels <- nrow(layout$layout)
nRows <- max(layout$layout$ROW)
nCols <- max(layout$layout$COL)
# panel -> plotly.js axis/anchor info
# (assume a grid layout by default)
layout$layout$xaxis <- layout$layout$COL
layout$layout$yaxis <- layout$layout$ROW
layout$layout$xanchor <- nRows
layout$layout$yanchor <- 1
if (inherits(plot$facet, "FacetWrap")) {
if (plot$facet$params$free$x) {
layout$layout$xaxis <- layout$layout$PANEL
layout$layout$xanchor <- layout$layout$ROW
}
if (plot$facet$params$free$y) {
layout$layout$yaxis <- layout$layout$PANEL
layout$layout$yanchor <- layout$layout$COL
layout$layout$xanchor <- nPanels
}
if (plot$facet$params$free$x && plot$facet$params$free$y) {
layout$layout$xaxis <- layout$layout$PANEL
layout$layout$yaxis <- layout$layout$PANEL
layout$layout$xanchor <- layout$layout$PANEL
layout$layout$yanchor <- layout$layout$PANEL
}
}
# format the axis/anchor to a format plotly.js respects
layout$layout$xaxis <- paste0("xaxis", sub("^1$", "", layout$layout$xaxis))
layout$layout$yaxis <- paste0("yaxis", sub("^1$", "", layout$layout$yaxis))
layout$layout$xanchor <- paste0("y", sub("^1$", "", layout$layout$xanchor))
layout$layout$yanchor <- paste0("x", sub("^1$", "", layout$layout$yanchor))
# for some layers2traces computations, we need the range of each panel
layout$layout$x_min <- sapply(layout$panel_params, function(z) min(z$x.range %||% z$x_range))
layout$layout$x_max <- sapply(layout$panel_params, function(z) max(z$x.range %||% z$x_range))
layout$layout$y_min <- sapply(layout$panel_params, function(z) min(z$y.range %||% z$y_range))
layout$layout$y_max <- sapply(layout$panel_params, function(z) max(z$y.range %||% z$y_range))
# layers -> plotly.js traces
plot$tooltip <- tooltip
data <- Map(function(x, y) {
tryCatch({ x$group_plotlyDomain <- y; x }, error = function(e) x)
}, data, groupDomains)
# reattach crosstalk key-set attribute
data <- Map(function(x, y) structure(x, set = y), data, sets)
traces <- layers2traces(data, prestats_data, layout, plot)
gglayout <- layers2layout(gglayout, layers, layout$layout)
# default to just the text in hover info, mainly because of this
# https://github.com/plotly/plotly.js/issues/320
traces <- lapply(traces, function(tr) {
tr$hoverinfo <- tr$hoverinfo %||%"text"
tr
})
# show only one legend entry per legendgroup
grps <- sapply(traces, "[[", "legendgroup")
traces <- Map(function(x, y) {
if (!is.null(x[["frame"]])) return(x)
x$showlegend <- isTRUE(x$showlegend) && y
x
}, traces, !duplicated(grps))
# ------------------------------------------------------------------------
# axis/facet/margin conversion
# ------------------------------------------------------------------------
# panel margins must be computed before panel/axis loops
# (in order to use get_domains())
panelMarginX <- unitConvert(
theme[["panel.spacing.x"]] %||% theme[["panel.spacing"]],
"npc", "width"
)
panelMarginY <- unitConvert(
theme[["panel.spacing.y"]] %||% theme[["panel.spacing"]],
"npc", "height"
)
# space for _interior_ facet strips
if (inherits(plot$facet, "FacetWrap")) {
stripSize <- unitConvert(
theme[["strip.text.x"]] %||% theme[["strip.text"]],
"npc", "height"
)
panelMarginY <- panelMarginY + stripSize
# space for ticks/text in free scales
if (plot$facet$params$free$x) {
axisTicksX <- unitConvert(
theme[["axis.ticks.x"]] %||% theme[["axis.ticks"]],
"npc", "height"
)
# allocate enough space for the _longest_ text label
axisTextX <- theme[["axis.text.x"]] %||% theme[["axis.text"]]
labz <- unlist(lapply(layout$panel_params, "[[", "x.labels"))
lab <- labz[which.max(nchar(labz))]
panelMarginY <- panelMarginY + axisTicksX +
bbox(lab, axisTextX$angle, unitConvert(axisTextX, "npc", "height"))[["height"]]
}
if (plot$facet$params$free$y) {
axisTicksY <- unitConvert(
theme[["axis.ticks.y"]] %||% theme[["axis.ticks"]],
"npc", "width"
)
# allocate enough space for the _longest_ text label
axisTextY <- theme[["axis.text.y"]] %||% theme[["axis.text"]]
labz <- unlist(lapply(layout$panel_params, "[[", "y.labels"))
lab <- labz[which.max(nchar(labz))]
panelMarginX <- panelMarginX + axisTicksY +
bbox(lab, axisTextY$angle, unitConvert(axisTextY, "npc", "width"))[["width"]]
}
}
margins <- c(
rep(panelMarginX, 2),
rep(panelMarginY, 2)
)
doms <- get_domains(nPanels, nRows, margins)
for (i in seq_len(nPanels)) {
lay <- layout$layout[i, ]
for (xy in c("x", "y")) {
# find axis specific theme elements that inherit from their parent
theme_el <- function(el) {
theme[[paste0(el, ".", xy)]] %||% theme[[el]]
}
axisTicks <- theme_el("axis.ticks")
axisText <- theme_el("axis.text")
axisTitle <- theme_el("axis.title")
axisLine <- theme_el("axis.line")
panelGrid <- theme_el("panel.grid.major")
stripText <- theme_el("strip.text")
axisName <- lay[, paste0(xy, "axis")]
anchor <- lay[, paste0(xy, "anchor")]
rng <- layout$panel_params[[i]]
# panel_params is quite different for "CoordSf"
if ("CoordSf" %in% class(p$coordinates)) {
# see CoordSf$render_axis_v
direction <- if (xy == "x") "E" else "N"
idx <- rng$graticule$type == direction & !is.na(rng$graticule$degree_label)
tickData <- rng$graticule[idx, ]
# TODO: how to convert a language object to unicode character string?
rng[[paste0(xy, ".labels")]] <- as.character(tickData[["degree_label"]])
rng[[paste0(xy, ".major")]] <- tickData[[paste0(xy, "_start")]]
# If it doesn't already exist (for this panel),
# generate graticule (as done in, CoordSf$render_bg)
isGrill <- vapply(traces, function(tr) {
identical(tr$xaxis, lay$xaxis) &&
identical(tr$yaxis, lay$yaxis) &&
isTRUE(tr$`_isGraticule`)
}, logical(1))
if (sum(isGrill) == 0) {
# TODO: reduce the number of points (via coord_munch?)
d <- expand(rng$graticule)
d$x <- scales::rescale(d$x, rng$x_range, from = c(0, 1))
d$y <- scales::rescale(d$y, rng$y_range, from = c(0, 1))
params <- list(
colour = theme$panel.grid.major$colour,
size = theme$panel.grid.major$size,
linetype = theme$panel.grid.major$linetype
)
grill <- geom2trace.GeomPath(d, params)
grill$hoverinfo <- "none"
grill$showlegend <- FALSE
grill$`_isGraticule` <- TRUE
grill$xaxis <- lay$xaxis
grill$yaxis <- lay$yaxis
traces <- c(list(grill), traces)
}
# if labels are empty, don't show axis ticks
tickExists <- with(rng$graticule, sapply(degree_label, is.language))
if (sum(tickExists) == 0) {
theme$axis.ticks.length <- 0
} else{
# convert the special *degree expression in plotmath to HTML entity
# TODO: can this be done more generally for all ?
rng[[paste0(xy, ".labels")]] <- sub(
"\\*\\s+degree[ ]?[\\*]?", "°", rng[[paste0(xy, ".labels")]]
)
}
}
# stuff like layout$panel_params is already flipped, but scales aren't
sc <- if (inherits(plot$coordinates, "CoordFlip")) {
scales$get_scales(setdiff(c("x", "y"), xy))
} else {
scales$get_scales(xy)
}
# type of unit conversion
type <- if (xy == "x") "height" else "width"
# get axis title
axisTitleText <- sc$name %||% plot$labels[[xy]] %||% ""
if (is_blank(axisTitle)) axisTitleText <- ""
# is this axis dynamic?
isDynamic <- isTRUE(dynamicTicks) || identical(dynamicTicks, xy)
if (isDynamic && !p$coordinates$is_linear()) {
warning(
"`dynamicTicks` is only supported for linear (i.e., cartesian) coordinates",
call. = FALSE
)
}
# determine axis types (note: scale_name may go away someday)
# https://github.com/hadley/ggplot2/issues/1312
isDate <- isTRUE(sc$scale_name %in% c("date", "datetime"))
isDateType <- isDynamic && isDate
isDiscrete <- identical(sc$scale_name, "position_d")
isDiscreteType <- isDynamic && isDiscrete
axisObj <- list(
# TODO: log type?
type = if (isDateType) "date" else if (isDiscreteType) "category" else "linear",
autorange = isDynamic,
range = rng[[paste0(xy, ".range")]] %||% rng[[paste0(xy, "_range")]],
tickmode = if (isDynamic) "auto" else "array",
ticktext = rng[[paste0(xy, ".labels")]],
tickvals = rng[[paste0(xy, ".major")]],
categoryorder = "array",
categoryarray = rng[[paste0(xy, ".labels")]],
nticks = nrow(rng),
ticks = if (is_blank(axisTicks)) "" else "outside",
tickcolor = toRGB(axisTicks$colour),
ticklen = unitConvert(theme$axis.ticks.length, "pixels", type),
tickwidth = unitConvert(axisTicks, "pixels", type),
showticklabels = !is_blank(axisText),
tickfont = text2font(axisText, type),
tickangle = - (axisText$angle %||% 0),
showline = !is_blank(axisLine),
linecolor = toRGB(axisLine$colour),
linewidth = unitConvert(axisLine, "pixels", type),
# TODO: always `showgrid=FALSE` and implement our own using traces
showgrid = !is_blank(panelGrid) && !"CoordSf" %in% class(p$coordinates),
domain = sort(as.numeric(doms[i, paste0(xy, c("start", "end"))])),
gridcolor = toRGB(panelGrid$colour),
gridwidth = unitConvert(panelGrid, "pixels", type),
zeroline = FALSE,
anchor = anchor,
title = faced(axisTitleText, axisTitle$face),
titlefont = text2font(axisTitle)
)
# set scaleanchor/scaleratio if these are fixed coordinates
fixed_coords <- c("CoordSf", "CoordFixed", "CoordMap", "CoordQuickmap")
if (inherits(p$coordinates, fixed_coords)) {
axisObj$scaleanchor <- anchor
ratio <- p$coordinates$ratio %||% 1
# a la CoordSf$aspect
if (isTRUE(sf::st_is_longlat(rng$crs))) {
ratio <- cos(mean(rng$y_range) * pi/180)
}
axisObj$scaleratio <- if (xy == "y") ratio else 1 / ratio
}
# TODO: should we implement aspect ratios?
if (!is.null(theme$aspect.ratio)) {
warning(
"Aspect ratios aren't yet implemented, but you can manually set",
" a suitable height/width", call. = FALSE
)
}
# tickvals are currently on 0-1 scale, but we want them on data scale
axisObj$tickvals <- scales::rescale(
axisObj$tickvals, to = axisObj$range, from = c(0, 1)
)
# inverse transform date data based on tickvals/ticktext
invert_date <- function(x, scale) {
if (inherits(scale, "ScaleContinuousDatetime")) {
as.POSIXct(x, origin = "1970-01-01", tz = scale$timezone)
} else {
as.Date(x, origin = "1970-01-01", tz = scale$timezone)
}
}
if (isDateType) {
axisObj$range <- invert_date(axisObj$range, sc)
traces <- lapply(traces, function(tr) {
tr[[xy]] <- invert_date(tr[[xy]], sc)
# TODO: are there other similar cases we need to handle?
if (identical("bar", tr$type)) {
tr[["width"]] <- invert_date(tr[["width"]], sc)
}
tr
})
}
# inverse transform categorical data based on tickvals/ticktext
if (isDiscreteType) {
traces <- lapply(traces, function(tr) {
# map x/y trace data back to the 'closest' ticktext label
# http://r.789695.n4.nabble.com/check-for-nearest-value-in-a-vector-td4369339.html
tr[[xy]]<- vapply(tr[[xy]], function(val) {
with(axisObj, ticktext[[which.min(abs(tickvals - val))]])
}, character(1))
tr
})
if ("dodge" %in% sapply(layers, ggtype, "position")) gglayout$barmode <- "dodge"
}
# attach axis object to the layout
gglayout[[axisName]] <- axisObj
# do some stuff that should be done once for the entire plot
if (i == 1) {
axisTickText <- axisObj$ticktext[which.max(nchar(axisObj$ticktext))]
side <- if (xy == "x") "b" else "l"
# account for axis ticks, ticks text, and titles in plot margins
# (apparently ggplot2 doesn't support axis.title/axis.text margins)
gglayout$margin[[side]] <- gglayout$margin[[side]] + axisObj$ticklen +
bbox(axisTickText, axisObj$tickangle, axisObj$tickfont$size)[[type]] +
bbox(axisTitleText, axisTitle$angle, unitConvert(axisTitle, "pixels", type))[[type]]
if (nchar(axisTitleText) > 0) {
axisTextSize <- unitConvert(axisText, "npc", type)
axisTitleSize <- unitConvert(axisTitle, "npc", type)
offset <-
(0 -
bbox(axisTickText, axisText$angle, axisTextSize)[[type]] -
bbox(axisTitleText, axisTitle$angle, axisTitleSize)[[type]] / 2 -
unitConvert(theme$axis.ticks.length, "npc", type))
}
# add space for exterior facet strips in `layout.margin`
if (has_facet(plot)) {
stripSize <- unitConvert(stripText, "pixels", type)
if (xy == "x") {
gglayout$margin$t <- gglayout$margin$t + stripSize
}
if (xy == "y" && inherits(plot$facet, "FacetGrid")) {
gglayout$margin$r <- gglayout$margin$r + stripSize
}
# facets have multiple axis objects, but only one title for the plot,
# so we empty the titles and try to draw the title as an annotation
if (nchar(axisTitleText) > 0) {
# npc is on a 0-1 scale of the _entire_ device,
# but these units _should_ be wrt to the plotting region
# multiplying the offset by 2 seems to work, but this is a terrible hack
x <- if (xy == "x") 0.5 else offset
y <- if (xy == "x") offset else 0.5
gglayout$annotations <- c(
gglayout$annotations,
make_label(
faced(axisTitleText, axisTitle$face), x, y, el = axisTitle,
xanchor = if (xy == "x") "center" else "right",
yanchor = if (xy == "x") "top" else "center",
annotationType = "axis"
)
)
}
}
}
if (has_facet(plot)) gglayout[[axisName]]$title <- ""
} # end of axis loop
# theme(panel.border = ) -> plotly rect shape
xdom <- gglayout[[lay[, "xaxis"]]]$domain
ydom <- gglayout[[lay[, "yaxis"]]]$domain
border <- make_panel_border(xdom, ydom, theme)
gglayout$shapes <- c(gglayout$shapes, border)
# facet strips -> plotly annotations
if (has_facet(plot)) {
col_vars <- ifelse(inherits(plot$facet, "FacetWrap"), "facets", "cols")
col_txt <- paste(
plot$facet$params$labeller(
lay[names(plot$facet$params[[col_vars]])]
), collapse = br()
)
if (is_blank(theme[["strip.text.x"]])) col_txt <- ""
if (inherits(plot$facet, "FacetGrid") && lay$ROW != 1) col_txt <- ""
if (nchar(col_txt) > 0) {
col_lab <- make_label(
col_txt, x = mean(xdom), y = max(ydom),
el = theme[["strip.text.x"]] %||% theme[["strip.text"]],
xanchor = "center", yanchor = "bottom"
)
gglayout$annotations <- c(gglayout$annotations, col_lab)
strip <- make_strip_rect(xdom, ydom, theme, "top")
gglayout$shapes <- c(gglayout$shapes, strip)
}
row_txt <- paste(
plot$facet$params$labeller(
lay[names(plot$facet$params$rows)]
), collapse = br()
)
if (is_blank(theme[["strip.text.y"]])) row_txt <- ""
if (inherits(plot$facet, "FacetGrid") && lay$COL != nCols) row_txt <- ""
if (nchar(row_txt) > 0) {
row_lab <- make_label(
row_txt, x = max(xdom), y = mean(ydom),
el = theme[["strip.text.y"]] %||% theme[["strip.text"]],
xanchor = "left", yanchor = "middle"
)
gglayout$annotations <- c(gglayout$annotations, row_lab)
strip <- make_strip_rect(xdom, ydom, theme, "right")
gglayout$shapes <- c(gglayout$shapes, strip)
}
}
} # end of panel loop
# ------------------------------------------------------------------------
# guide conversion
# Strategy: Obtain and translate the output of ggplot2:::guides_train().
# To do so, we borrow some of the body of ggplot2:::guides_build().
# ------------------------------------------------------------------------
# will there be a legend?
gglayout$showlegend <- sum(unlist(lapply(traces, "[[", "showlegend"))) >= 1
# legend styling
gglayout$legend <- list(
bgcolor = toRGB(theme$legend.background$fill),
bordercolor = toRGB(theme$legend.background$colour),
borderwidth = unitConvert(theme$legend.background$size, "pixels", "width"),
font = text2font(theme$legend.text)
)
# if theme(legend.position = "none") is used, don't show a legend _or_ guide
if (npscales$n() == 0 || identical(theme$legend.position, "none")) {
gglayout$showlegend <- FALSE
} else {
# by default, guide boxes are vertically aligned
theme$legend.box <- theme$legend.box %||% "vertical"
# size of key (also used for bar in colorbar guide)
theme$legend.key.width <- theme$legend.key.width %||% theme$legend.key.size
theme$legend.key.height <- theme$legend.key.height %||% theme$legend.key.size
# legend direction must be vertical
theme$legend.direction <- theme$legend.direction %||% "vertical"
if (!identical(theme$legend.direction, "vertical")) {
warning(
"plotly.js does not (yet) support horizontal legend items \n",
"You can track progress here: \n",
"https://github.com/plotly/plotly.js/issues/53 \n",
call. = FALSE
)
theme$legend.direction <- "vertical"
}
# justification of legend boxes
theme$legend.box.just <- theme$legend.box.just %||% c("center", "center")
# scales -> data for guides
gdefs <- ggfun("guides_train")(scales, theme, plot$guides, plot$labels)
if (length(gdefs) > 0) {
gdefs <- ggfun("guides_merge")(gdefs)
gdefs <- ggfun("guides_geom")(gdefs, layers, plot$mapping)
}
# colourbar -> plotly.js colorbar
colorbar <- compact(lapply(gdefs, gdef2trace, theme, gglayout))
nguides <- length(colorbar) + gglayout$showlegend
# If we have 2 or more guides, set x/y positions accordingly
if (nguides >= 2) {
# place legend at the bottom
gglayout$legend$y <- 1 / nguides
gglayout$legend$yanchor <- "top"
# adjust colorbar position(s)
for (i in seq_along(colorbar)) {
colorbar[[i]]$marker$colorbar$yanchor <- "top"
colorbar[[i]]$marker$colorbar$len <- 1 / nguides
colorbar[[i]]$marker$colorbar$y <- 1 - (i - 1) * (1 / nguides)
}
}
traces <- c(traces, colorbar)
# legend title annotation - https://github.com/plotly/plotly.js/issues/276
if (isTRUE(gglayout$showlegend)) {
legendTitles <- compact(lapply(gdefs, function(g) if (inherits(g, "legend")) g$title else NULL))
legendTitle <- paste(legendTitles, collapse = br())
titleAnnotation <- make_label(
legendTitle,
x = gglayout$legend$x %||% 1.02,
y = gglayout$legend$y %||% 1,
theme$legend.title,
xanchor = "left",
yanchor = "bottom",
# just so the R client knows this is a title
legendTitle = TRUE
)
gglayout$annotations <- c(gglayout$annotations, titleAnnotation)
# adjust the height of the legend to accomodate for the title
# this assumes the legend always appears below colorbars
gglayout$legend$y <- (gglayout$legend$y %||% 1) -
length(legendTitles) * unitConvert(theme$legend.title$size, "npc", "height")
}
}
# flip x/y in traces for flipped coordinates
# (we've already done appropriate flipping for axis objects)
if (inherits(plot$coordinates, "CoordFlip")) {
for (i in seq_along(traces)) {
tr <- traces[[i]]
# flipping logic for bar positioning is in geom2trace.GeomBar
if (tr$type != "bar") traces[[i]][c("x", "y")] <- tr[c("y", "x")]
if (tr$type %in% "box") {
traces[[i]]$orientation <- "h"
traces[[i]]$hoverinfo <- "x"
}
names(traces[[i]])[grepl("^error_y$", names(tr))] <- "error_x"
names(traces[[i]])[grepl("^error_x$", names(tr))] <- "error_y"
}
}
# Error bar widths in ggplot2 are on the range of the x/y scale,
# but plotly wants them in pixels:
for (xy in c("x", "y")) {
type <- if (xy == "x") "width" else "height"
err <- if (xy == "x") "error_y" else "error_x"
for (i in seq_along(traces)) {
e <- traces[[i]][[err]]
if (!is.null(e)) {
# TODO: again, "npc" is on device scale...we really want plot scale
w <- grid::unit(e$width %||% 0, "npc")
traces[[i]][[err]]$width <- unitConvert(w, "pixels", type)
}
}
}
# try to merge marker/line traces that have the same values for these props
props <- c("x", "y", "text", "type", "xaxis", "yaxis", "name")
hashes <- vapply(traces, function(x) digest::digest(x[names(x) %in% props]), character(1))
modes <- vapply(traces, function(x) x$mode %||% "", character(1))
nhashes <- length(unique(hashes))
if (nhashes < length(traces)) {
mergedTraces <- vector("list", nhashes)
for (i in unique(hashes)) {
idx <- which(hashes %in% i)
mergedTraces[[i]] <- Reduce(modify_list, traces[idx])
mergedTraces[[i]]$mode <- paste(
unique(unlist(lapply(traces[idx], "[[", "mode"))),
collapse = "+"
)
# show one, show all
show <- vapply(traces[idx], function(tr) tr$showlegend %||% TRUE, logical(1))
if (any(show)) {
mergedTraces[[i]]$showlegend <- TRUE
}
}
traces <- mergedTraces
}
# better layout defaults (TODO: provide a mechanism for templating defaults)
gglayout$hovermode <- "closest"
ax <- grep("^[x-y]axis", names(gglayout))
for (i in ax) {
gglayout[[i]]$hoverformat <- ".2f"
}
# If a trace isn't named, it shouldn't have additional hoverinfo
traces <- lapply(compact(traces), function(x) { x$name <- x$name %||% ""; x })
gglayout$width <- width
gglayout$height <- height
gglayout$barmode <- gglayout$barmode %||% "relative"
l <- list(
data = setNames(traces, NULL),
layout = compact(gglayout),
# prevent autosize on doubleClick which clears ggplot2 margins
config = list(doubleClick = "reset"),
source = source
)
# strip any existing 'AsIs' list elements of their 'AsIs' status.
# this is necessary since ggplot_build(qplot(1:10, fill = I("red")))
# returns list element with their 'AsIs' class,
# which conflicts with our JSON unboxing strategy.
l <- rm_asis(l)
# start build a plotly object with meta information about the ggplot
# first, translate layer mappings -> plotly attrs
mappingFormulas <- lapply(layers, function(x) {
mappings <- c(x$mapping, if (isTRUE(x$inherit.aes)) plot$mapping)
if (originalData) {
lapply(mappings, lazyeval::f_new)
} else {
nms <- names(mappings)
setNames(lapply(nms, function(x) lazyeval::f_new(as.name(x))), nms)
}
})
return_dat <- if (originalData) layer_data else data
# translate group aesthetics to data attributes
return_dat <- Map(function(x, y) {
if (is.null(y[["group"]])) return(x)
dplyr::group_by_(x, y[["group"]])
}, return_dat, mappingFormulas)
# don't need to add group as an attribute anymore
mappingFormulas <- lapply(mappingFormulas, function(x) x[!grepl("^group$", names(x))])
ids <- lapply(seq_along(data), function(x) new_id())
l$attrs <- setNames(mappingFormulas, ids)
l$attrs <- lapply(l$attrs, function(x) structure(x, class = "plotly_eval"))
# the build step removes the first attrs if no type exists
l$attrs[[1]][["type"]] <- l$data[[1]][["type"]] %||% "scatter"
l$cur_data <- ids[[layerData]]
l$visdat <- setNames(lapply(return_dat, function(x) function(y) x), ids)
l
}
#-----------------------------------------------------------------------------
# ggplotly 'utility' functions
#-----------------------------------------------------------------------------
# convert ggplot2 sizes and grid unit(s) to pixels or normalized point coordinates
unitConvert <- function(u, to = c("npc", "pixels"), type = c("x", "y", "height", "width")) {
u <- verifyUnit(u)
convert <- switch(
type[1],
x = grid::convertX,
y = grid::convertY,
width = grid::convertWidth,
height = grid::convertHeight
)
# convert everything to npc first
if (inherits(u, "margin")) {
# margins consist of 4 parts: top, right, bottom, and left
uh <- grid::convertHeight(u, "npc")
uw <- grid::convertWidth(u, "npc")
u <- grid::unit(c(uh[1], uw[2], uh[3], uw[4]), "npc")
} else {
u <- convert(u, "npc")
}
if (to[1] == "pixels") {
if (inherits(u, "margin")) {
uh <- mm2pixels(grid::convertHeight(uh, "mm"))
uw <- mm2pixels(grid::convertWidth(uw, "mm"))
u <- c(uh[1], uw[2], uh[3], uw[4])
} else {
u <- mm2pixels(convert(u, "mm"))
}
}
as.numeric(u)
}
# ggplot2 size is in millimeters. plotly is in pixels. To do this correctly,
# we need to know PPI/DPI of the display. I'm not sure of a decent way to do that
# from R, but it seems 96 is a reasonable assumption.
mm2pixels <- function(u) {
u <- verifyUnit(u)
if (attr(u, "unit") != "mm") {
stop("Unit must be in millimeters")
}
(as.numeric(u) * 96) / 25.4
}
verifyUnit <- function(u) {
# the default unit in ggplot2 is millimeters (unless it's element_text())
if (is.null(attr(u, "unit"))) {
u <- if (inherits(u, "element")) {
grid::unit(u$size %||% 0, "points")
} else {
grid::unit(u %||% 0, "mm")
}
}
u
}
# detect a blank theme element
is_blank <- function(x) {
inherits(x, "element_blank") && inherits(x, "element")
}
# given text, and x/y coordinates on 0-1 scale,
# convert ggplot2::element_text() to plotly annotation
make_label <- function(txt = "", x, y, el = ggplot2::element_text(), ...) {
if (is_blank(el) || is.null(txt) || nchar(txt) == 0 || length(txt) == 0) {
return(NULL)
}
angle <- el$angle %||% 0
list(list(
text = txt,
x = x,
y = y,
showarrow = FALSE,
# TODO: hjust/vjust?
ax = 0,
ay = 0,
font = text2font(el),
xref = "paper",
yref = "paper",
textangle = -angle,
...
))
}
has_facet <- function(x) {
inherits(x$facet, c("FacetGrid", "FacetWrap"))
}
#' Estimate bounding box of a rotated string
#'
#' @param txt a character string of length 1
#' @param angle sets the angle of the tick labels with respect to the
#' horizontal (e.g., `tickangle` of -90 draws the tick labels vertically)
#' @param size vertical size of a character
#' @references
#' https://www.dropbox.com/s/nc6968prgw8ne4w/bbox.pdf?dl=0
bbox <- function(txt = "foo", angle = 0, size = 12) {
# assuming the horizontal size of a character is roughly half of the vertical
n <- nchar(txt)
if (sum(n) == 0) return(list(height = 0, width = 0))
w <- size * (nchar(txt) / 2)
angle <- abs(angle %||% 0)
# do the sensible thing in the majority of cases
if (angle == 0) return(list(height = size, width = w))
if (angle == 90) return(list(height = w, width = size))
# first, compute the hypotenus
hyp <- sqrt(size ^ 2 + w ^ 2)
list(
height = max(hyp * cos(90 - angle), size),
width = max(hyp * sin(90 - angle), w)
)
}
# create a plotly font object from ggplot2::element_text()
text2font <- function(x = ggplot2::element_text(), type = "height") {
list(
color = toRGB(x$colour),
family = x$family,
# TODO: what about the size of vertical text?
size = unitConvert(grid::unit(x$size %||% 0, "points"), "pixels", type)
)
}
# wrap text in bold/italics according to the text "face"
faced <- function(txt, face = "plain") {
if (is.null(face)) face <- "plain"
x <- switch(face,
plain = txt,
bold = bold(txt),
italic = italic(txt),
bold.italic = bold(italic(txt))
)
# if, for some reason, a face we don't support is used, return the text
if (is.null(x)) txt else x
}
bold <- function(x) paste("<b>", x, "</b>")
italic <- function(x) paste("<i>", x, "</i>")
# if a vector that has one unique value (ignoring missings), return that value
uniq <- function(x) {
u <- unique(x)
if (identical(u, NA) || length(u) == 0) return(u)
u <- u[!is.na(u)]
if (length(u) == 1) u else x
}
# theme(strip.background) -> plotly.js rect shape
make_strip_rect <- function(xdom, ydom, theme, side = "top") {
rekt <- rect2shape(theme[["strip.background"]])
stripTextX <- theme[["strip.text.x"]] %||% theme[["strip.text"]]
xTextSize <- unitConvert(stripTextX$size, "npc", "width")
stripTextY <- theme[["strip.text.y"]] %||% theme[["strip.text"]]
yTextSize <- unitConvert(stripTextY$size, "npc", "height")
if ("right" %in% side) {
# x-padding should be accounted for in `layout.margin.r`
rekt$x0 <- xdom[2]
rekt$x1 <- xdom[2] + xTextSize
rekt$y0 <- ydom[1]
rekt$y1 <- ydom[2]
}
if ("top" %in% side) {
rekt$x0 <- xdom[1]
rekt$x1 <- xdom[2]
rekt$y0 <- ydom[2]
rekt$y1 <- ydom[2] + yTextSize
}
list(rekt)
}
# theme(panel.border) -> plotly.js rect shape
make_panel_border <- function(xdom, ydom, theme) {
rekt <- rect2shape(theme[["panel.border"]])
rekt$x0 <- xdom[1]
rekt$x1 <- xdom[2]
rekt$y0 <- ydom[1]
rekt$y1 <- ydom[2]
list(rekt)
}
# element_rect -> plotly.js rect shape
rect2shape <- function(rekt = ggplot2::element_rect()) {
list(
type = "rect",
fillcolor = toRGB(rekt$fill),
line = list(
color = toRGB(rekt$colour),
width = unitConvert(rekt, "pixels", "width"),
linetype = lty2dash(rekt$linetype)
),
yref = "paper",
xref = "paper"
)
}
is_dev_ggplot2 <- function() {
packageVersion("ggplot2") > "2.2.1"
}
# We need access to internal ggplot2 functions in several places
# this helps us import functions in a way that R CMD check won't cry about
ggfun <- function(x) {
tryCatch(getFromNamespace(x, "ggplot2"), error = function(e) NULL)
}
ggtype <- function(x, y = "geom") {
sub(y, "", tolower(class(x[[y]])[1]))
}
# colourbar -> plotly.js colorbar
gdef2trace <- function(gdef, theme, gglayout) {
if (inherits(gdef, "colorbar")) {
# sometimes the key has missing values, which we can ignore
gdef$key <- gdef$key[!is.na(gdef$key$.value), ]
rng <- range(gdef$bar$value)
gdef$bar$value <- scales::rescale(gdef$bar$value, from = rng)
gdef$key$.value <- scales::rescale(gdef$key$.value, from = rng)
list(
x = with(gglayout$xaxis, if (identical(tickmode, "auto")) ticktext else tickvals)[[1]],
y = with(gglayout$yaxis, if (identical(tickmode, "auto")) ticktext else tickvals)[[1]],
# esentially to prevent this getting merged at a later point
name = gdef$hash,
type = "scatter",
mode = "markers",
opacity = 0,
hoverinfo = "none",
showlegend = FALSE,
# do everything on a 0-1 scale
marker = list(
color = c(0, 1),
colorscale = setNames(gdef$bar[c("value", "colour")], NULL),
colorbar = list(
bgcolor = toRGB(theme$legend.background$fill),
bordercolor = toRGB(theme$legend.background$colour),
borderwidth = unitConvert(
theme$legend.background$size, "pixels", "width"
),
thickness = unitConvert(
theme$legend.key.width, "pixels", "width"
),
title = gdef$title,
titlefont = text2font(gdef$title.theme %||% theme$legend.title),
tickmode = "array",
ticktext = gdef$key$.label,
tickvals = gdef$key$.value,
tickfont = text2font(gdef$label.theme %||% theme$legend.text),
ticklen = 2,
len = 1/2
)
)
)
} else {
# if plotly.js gets better support for multiple legends,
# that conversion should go here
NULL
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_help_functions.R
\name{map_resource_use_categories}
\alias{map_resource_use_categories}
\title{Function to to read the text form of resource use and replace it with
standard texts of resoure use ie. some one can describe GP visit as GP surgery
visit, surgery visit or general practioners visit etc. Here all these texts
should be given in a excel or csv file and then corresponidng standard form
will be read from the file and will be replaced.}
\usage{
map_resource_use_categories(
the_data,
service_actual,
new_column,
mapped_data,
mapped_use,
analysis,
replace_only,
relevant_column = NULL,
check_value_relevant = NULL,
nhs_use_column = NULL,
check_value_nhs_use = NULL
)
}
\arguments{
\item{the_data}{the data where the observations are held}
\item{service_actual}{columna name of the actual service use}
\item{new_column}{the name of the column where the mapped resource use to be}
\item{mapped_data}{data where the service name and mapped service name
has been stored}
\item{mapped_use}{columan name of mapped resource use in mapped_data}
\item{analysis}{base case or secondary}
\item{replace_only}{if we want to replace only certain resource use}
\item{relevant_column}{the name of the column where the mapped resource use
is indicated as relevant or not}
\item{check_value_relevant}{how is the mapped resource
is indicated as relevant by a value}
\item{nhs_use_column}{the name of the column where the mapped resource use
comes under NHS or not}
\item{check_value_nhs_use}{value that is used to indicated the nhs use}
}
\value{
the data with added sum
}
\description{
Function to to read the text form of resource use and replace it with
standard texts of resoure use ie. some one can describe GP visit as GP surgery
visit, surgery visit or general practioners visit etc. Here all these texts
should be given in a excel or csv file and then corresponidng standard form
will be read from the file and will be replaced.
}
| /man/map_resource_use_categories.Rd | no_license | sheejamk/packDAMipd | R | false | true | 2,046 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_help_functions.R
\name{map_resource_use_categories}
\alias{map_resource_use_categories}
\title{Function to to read the text form of resource use and replace it with
standard texts of resoure use ie. some one can describe GP visit as GP surgery
visit, surgery visit or general practioners visit etc. Here all these texts
should be given in a excel or csv file and then corresponidng standard form
will be read from the file and will be replaced.}
\usage{
map_resource_use_categories(
the_data,
service_actual,
new_column,
mapped_data,
mapped_use,
analysis,
replace_only,
relevant_column = NULL,
check_value_relevant = NULL,
nhs_use_column = NULL,
check_value_nhs_use = NULL
)
}
\arguments{
\item{the_data}{the data where the observations are held}
\item{service_actual}{columna name of the actual service use}
\item{new_column}{the name of the column where the mapped resource use to be}
\item{mapped_data}{data where the service name and mapped service name
has been stored}
\item{mapped_use}{columan name of mapped resource use in mapped_data}
\item{analysis}{base case or secondary}
\item{replace_only}{if we want to replace only certain resource use}
\item{relevant_column}{the name of the column where the mapped resource use
is indicated as relevant or not}
\item{check_value_relevant}{how is the mapped resource
is indicated as relevant by a value}
\item{nhs_use_column}{the name of the column where the mapped resource use
comes under NHS or not}
\item{check_value_nhs_use}{value that is used to indicated the nhs use}
}
\value{
the data with added sum
}
\description{
Function to to read the text form of resource use and replace it with
standard texts of resoure use ie. some one can describe GP visit as GP surgery
visit, surgery visit or general practioners visit etc. Here all these texts
should be given in a excel or csv file and then corresponidng standard form
will be read from the file and will be replaced.
}
|
FG <- function(covmats, nvec, method = c('ML', 'LS'))
{
# Implementation of the FG algorithm as described in Flury 1988 (p 178),
# with Maximum Likelihood estimation. Least Squares estimation method as
# described in Beaghen (1997) Canonical variate analysis and related
# methods with longitudinal data (PhD dissertation, Appendix 2).
# covmats: array of covariance matrices to be simultaneously diagonalized,
# created by a command such as covmats <- array(NA, dim = c(p, p, k))
# nvec: vector of sample sizes for the covariance matrices in covmats
# method: estimation method, either Maximum Likelihood ('ML', the default)
# or Least Squares ('LS')
p <- dim(covmats)[2]
B <- diag(p)
DIFF <- 100
k <- dim(covmats)[3]
while(DIFF > 1e-09){
B.old <- B
T.mat <- array(NA, dim = c(2, 2, k))
for(m in 1:(p - 1)){
for(j in (m + 1):p){ # m<-1; j<-2
vek <- c(m, j)
for(i in 1:k){
T.mat[, , i] <- t(B[, vek]) %*% covmats[, , i] %*% B[, vek]
}
J <- G.algorithm(T.mat, nvec, method = method[1])
B[, vek] <- B[, vek] %*% J
}
}
for(i in 1:p){
for(j in 1:p){
DIFF <- abs(B[i, j] - B.old[i, j])
}
}
}
# Order the columns of B
diagvals <- 0
for(i in 1:k){
diagvals <- diagvals + diag(t(B) %*% covmats[, , i] %*% B)
}
B <- B[, order(diagvals, decreasing = TRUE)]
diagvalsmat <- matrix(NA, nrow = p, ncol = k)
for(i in 1:k){
diagvalsmat[, i] <- diag(t(B) %*% covmats[, , i] %*% B)
}
return(list(B = B, diagvals = diagvalsmat))
} | /R/FG.R | no_license | tpepler/cpc | R | false | false | 1,637 | r | FG <- function(covmats, nvec, method = c('ML', 'LS'))
{
# Implementation of the FG algorithm as described in Flury 1988 (p 178),
# with Maximum Likelihood estimation. Least Squares estimation method as
# described in Beaghen (1997) Canonical variate analysis and related
# methods with longitudinal data (PhD dissertation, Appendix 2).
# covmats: array of covariance matrices to be simultaneously diagonalized,
# created by a command such as covmats <- array(NA, dim = c(p, p, k))
# nvec: vector of sample sizes for the covariance matrices in covmats
# method: estimation method, either Maximum Likelihood ('ML', the default)
# or Least Squares ('LS')
p <- dim(covmats)[2]
B <- diag(p)
DIFF <- 100
k <- dim(covmats)[3]
while(DIFF > 1e-09){
B.old <- B
T.mat <- array(NA, dim = c(2, 2, k))
for(m in 1:(p - 1)){
for(j in (m + 1):p){ # m<-1; j<-2
vek <- c(m, j)
for(i in 1:k){
T.mat[, , i] <- t(B[, vek]) %*% covmats[, , i] %*% B[, vek]
}
J <- G.algorithm(T.mat, nvec, method = method[1])
B[, vek] <- B[, vek] %*% J
}
}
for(i in 1:p){
for(j in 1:p){
DIFF <- abs(B[i, j] - B.old[i, j])
}
}
}
# Order the columns of B
diagvals <- 0
for(i in 1:k){
diagvals <- diagvals + diag(t(B) %*% covmats[, , i] %*% B)
}
B <- B[, order(diagvals, decreasing = TRUE)]
diagvalsmat <- matrix(NA, nrow = p, ncol = k)
for(i in 1:k){
diagvalsmat[, i] <- diag(t(B) %*% covmats[, , i] %*% B)
}
return(list(B = B, diagvals = diagvalsmat))
} |
ftpList<-function(ftp, fileonly = FALSE)
{
# ftp starts with ftp://
if(!grepl("^ftp://", ftp)){ftp <- paste("ftp://", ftp, sep="")}
# to avoid warnings, ftp should end in "/"
if(!grepl("/$", ftp)){ftp <- paste(ftp, "/", sep="")}
x <- try( getURL(ftp, ftp.use.epsv = FALSE), silent=TRUE)
if(class(x) == "try-error"){stop("No directory found matching ", ftp) }
zz <- textConnection(x)
# in case spaces are in FILE names
x2 <- read.table(zz, as.is = TRUE, fill=TRUE)
close(zz)
n <- ncol(x2)
if(n > 9){
x9 <- apply(x2[,9:n], 1, paste, collapse=" ")
x2[,9] <- gsub(" *$", "", x9)
x2 <- x2[, 1:9]
}
colnames(x2)[c(5,9)] <- c("size", "name")
x2$mode <- substr(x2[,1], 1,1)
year <- format( Sys.Date(), "%Y")
# ADD dates... current year has time
x2$date <- as.Date( paste( x2[,6], x2[,7], ifelse( grepl(":", x2[,8]), year, x2[,8])), "%b %d %Y")
x2 <- x2[, c(9,10,5, 11)]
if(fileonly){
subset(x2, mode == "-", c(1,3,4))
}else{
subset(x2, mode != "l") # skip links?
}
}
| /R/ftpList.R | no_license | cstubben/genomes2 | R | false | false | 1,045 | r | ftpList<-function(ftp, fileonly = FALSE)
{
# ftp starts with ftp://
if(!grepl("^ftp://", ftp)){ftp <- paste("ftp://", ftp, sep="")}
# to avoid warnings, ftp should end in "/"
if(!grepl("/$", ftp)){ftp <- paste(ftp, "/", sep="")}
x <- try( getURL(ftp, ftp.use.epsv = FALSE), silent=TRUE)
if(class(x) == "try-error"){stop("No directory found matching ", ftp) }
zz <- textConnection(x)
# in case spaces are in FILE names
x2 <- read.table(zz, as.is = TRUE, fill=TRUE)
close(zz)
n <- ncol(x2)
if(n > 9){
x9 <- apply(x2[,9:n], 1, paste, collapse=" ")
x2[,9] <- gsub(" *$", "", x9)
x2 <- x2[, 1:9]
}
colnames(x2)[c(5,9)] <- c("size", "name")
x2$mode <- substr(x2[,1], 1,1)
year <- format( Sys.Date(), "%Y")
# ADD dates... current year has time
x2$date <- as.Date( paste( x2[,6], x2[,7], ifelse( grepl(":", x2[,8]), year, x2[,8])), "%b %d %Y")
x2 <- x2[, c(9,10,5, 11)]
if(fileonly){
subset(x2, mode == "-", c(1,3,4))
}else{
subset(x2, mode != "l") # skip links?
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.