content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
setwd("~/sync/cw-race/figures")
### For forest plot of results
### ent and inst models
### Depends on model output from FCmodels.r in working environment (objects ent.results, inst.results)
library(ggplot2)
library(dplyr)
#source("~/Dropbox/cw-race/sim.R")
###merged results from Rubin combination - alt using posterior sim pooling in cw-forest-simuncertainty.r
### Count models
outcomes<-3
predictors<-nrow(b.d.tab)-1
forest.est<-data.frame("var"=rep(c("Intercept", "Incarceration", "Child poverty",
"Unemployment", "Single parent",
"Less than HS", "Percent pop",
"Leg ideology", "Arrest", "Year",
"TANF adeq", "TANF incl",
"Medicaid incl", "SNAP incl"),outcomes*2),
"Race"=c(rep("African American", predictors*outcomes), rep("Native American",predictors*outcomes)),
"Outcome"=c(rep("Caseload", predictors), rep("Entry", predictors), rep("Reunification", predictors),
rep("Native Am Caseload", predictors), rep("Native Am Entry", predictors), rep("Native Am Reun", predictors)),
"beta"=c(b.d.tab$Beta[1:predictors], b.ent.tab$Beta[1:predictors], b.reun.tab$Beta[1:predictors],
a.d.tab$Beta[1:predictors], a.ent.tab$Beta[1:predictors], a.reun.tab$Beta[1:predictors]),
"upper"=c(b.d.tab$Beta[1:predictors]+1.96*b.d.tab$SE[1:predictors], b.ent.tab$Beta[1:predictors]+1.96*b.ent.tab$SE[1:predictors], b.reun.tab$Beta[1:predictors]+1.96*b.reun.tab$SE[1:predictors],
a.d.tab$Beta[1:predictors]+1.96*a.d.tab$SE[1:predictors], a.ent.tab$Beta[1:predictors]+1.96*a.ent.tab$SE[1:predictors], a.reun.tab$Beta[1:predictors]+1.96*a.reun.tab$SE[1:predictors]),
"lower"=c(b.d.tab$Beta[1:predictors]-1.96*b.d.tab$SE[1:predictors], b.ent.tab$Beta[1:predictors]-1.96*b.ent.tab$SE[1:predictors], b.reun.tab$Beta[1:predictors]-1.96*b.reun.tab$SE[1:predictors],
a.d.tab$Beta[1:predictors]-1.96*a.d.tab$SE[1:predictors], a.ent.tab$Beta[1:predictors]-1.96*a.ent.tab$SE[1:predictors], a.reun.tab$Beta[1:predictors]-1.96*a.reun.tab$SE[1:predictors]))
forest.est<-forest.est%>%filter(var!="Intercept")%>%
filter(var!="Percent pop")%>%filter(var!="Year")
#
forest.est$var<-factor(forest.est$var, levels=c("Intercept", "Incarceration", "Child poverty",
"Unemployment", "Single parent",
"Less than HS", "Percent pop",
"Arrest",
"TANF adeq", "TANF incl",
"Medicaid incl", "SNAP incl",
"Leg ideology","Year", "Percent pop"))
forest.est$var = with(forest.est, factor(var, levels = rev(levels(var))))
forest.b<-forest.est%>%filter(Race=="African American")
forest.a<-forest.est%>%filter(Race=="Native American")
# forest.est$varname<-factor(forest.est$varname, levels(forest.est$varname)[c(1:9, 18,10:14, 15:17)])
|
/cw-forest.r
|
no_license
|
f-edwards/cw-race
|
R
| false
| false
| 3,298
|
r
|
setwd("~/sync/cw-race/figures")
### For forest plot of results
### ent and inst models
### Depends on model output from FCmodels.r in working environment (objects ent.results, inst.results)
library(ggplot2)
library(dplyr)
#source("~/Dropbox/cw-race/sim.R")
###merged results from Rubin combination - alt using posterior sim pooling in cw-forest-simuncertainty.r
### Count models
outcomes<-3
predictors<-nrow(b.d.tab)-1
forest.est<-data.frame("var"=rep(c("Intercept", "Incarceration", "Child poverty",
"Unemployment", "Single parent",
"Less than HS", "Percent pop",
"Leg ideology", "Arrest", "Year",
"TANF adeq", "TANF incl",
"Medicaid incl", "SNAP incl"),outcomes*2),
"Race"=c(rep("African American", predictors*outcomes), rep("Native American",predictors*outcomes)),
"Outcome"=c(rep("Caseload", predictors), rep("Entry", predictors), rep("Reunification", predictors),
rep("Native Am Caseload", predictors), rep("Native Am Entry", predictors), rep("Native Am Reun", predictors)),
"beta"=c(b.d.tab$Beta[1:predictors], b.ent.tab$Beta[1:predictors], b.reun.tab$Beta[1:predictors],
a.d.tab$Beta[1:predictors], a.ent.tab$Beta[1:predictors], a.reun.tab$Beta[1:predictors]),
"upper"=c(b.d.tab$Beta[1:predictors]+1.96*b.d.tab$SE[1:predictors], b.ent.tab$Beta[1:predictors]+1.96*b.ent.tab$SE[1:predictors], b.reun.tab$Beta[1:predictors]+1.96*b.reun.tab$SE[1:predictors],
a.d.tab$Beta[1:predictors]+1.96*a.d.tab$SE[1:predictors], a.ent.tab$Beta[1:predictors]+1.96*a.ent.tab$SE[1:predictors], a.reun.tab$Beta[1:predictors]+1.96*a.reun.tab$SE[1:predictors]),
"lower"=c(b.d.tab$Beta[1:predictors]-1.96*b.d.tab$SE[1:predictors], b.ent.tab$Beta[1:predictors]-1.96*b.ent.tab$SE[1:predictors], b.reun.tab$Beta[1:predictors]-1.96*b.reun.tab$SE[1:predictors],
a.d.tab$Beta[1:predictors]-1.96*a.d.tab$SE[1:predictors], a.ent.tab$Beta[1:predictors]-1.96*a.ent.tab$SE[1:predictors], a.reun.tab$Beta[1:predictors]-1.96*a.reun.tab$SE[1:predictors]))
forest.est<-forest.est%>%filter(var!="Intercept")%>%
filter(var!="Percent pop")%>%filter(var!="Year")
#
forest.est$var<-factor(forest.est$var, levels=c("Intercept", "Incarceration", "Child poverty",
"Unemployment", "Single parent",
"Less than HS", "Percent pop",
"Arrest",
"TANF adeq", "TANF incl",
"Medicaid incl", "SNAP incl",
"Leg ideology","Year", "Percent pop"))
forest.est$var = with(forest.est, factor(var, levels = rev(levels(var))))
forest.b<-forest.est%>%filter(Race=="African American")
forest.a<-forest.est%>%filter(Race=="Native American")
# forest.est$varname<-factor(forest.est$varname, levels(forest.est$varname)[c(1:9, 18,10:14, 15:17)])
|
#' @useDynLib crestree
NULL
##' Sample pptree objects using different seeds
##' @param n.samples a number of seed samplings.
##' @param seeds a vector of seeds to use. Overwrites n.samples.
##' @return a list of pptree objects
##' @export
mppt.tree <- function( ... , n.cores=parallel::detectCores()/2,n.samples=n.cores, seed=NULL,seeds=NULL) {
if(!is.null(seed)) {
set.seed(seed);
}
# sample seeds
if(is.null(seeds)) {
seeds <- round(runif(n.samples,0,.Machine$integer.max))
}
mclapply(seeds,function(i) ppt.tree(..., seed=i),mc.cores=n.cores)
}
##' Sample pptree objects using bootstrap
##' @param X expression matrix of genes (rows) and cells (columns).
##' @param M number of principal points of pptree.
##' @param n.samples a number of seed samplings.
##' @param replace sampling with replacement (logical).
##' @return a list of pptree objects
##' @export
bootstrap.ppt <- function( ..., X, M=ncol(X),n.cores=parallel::detectCores()/2,n.samples=n.cores, seed=NULL,replace=T) {
if(!is.null(seed)) {
set.seed(seed);
}
parallel::mclapply(1:n.samples,function(i) {
# take a bootstrap sample
b.X <- X[,sample(1:ncol(X),M,replace=replace)];
ppt.tree(..., X=b.X, M=M, init=b.X)
},mc.cores=n.cores)
}
##' Calculate weighted pairwise correlations between columns of matrices A and B
##' @export
cor.mat <- function(A,B){
A1 <- t(t(A)-colMeans(A))
B1 <- t(t(B)-colMeans(B))
res <- (crossprod(A1,B1))/sqrt( tcrossprod(colSums(A1^2),(colSums(B1^2))) )
return(res)
}
##' Calculate pairwise euclidean distances between columns of matrices A and B
euclidean.mat <- function(A,B){
x <- do.call(cbind,rep(list(colSums(A^2)),ncol(B)))
y <- do.call(rbind,rep(list(colSums(B^2)),ncol(A)))
suppressWarnings(res <- sqrt(x + y - 2*crossprod(A,B)))
res[is.na(res) | is.nan(res)] <- 0
return(res)
}
##' calculate weighted correlation between columns of a matrix and a given vector
wcr <- function(X,y,w){
w <- w/sum(w)
X1 <- X*w
y1 <- y*w
X2 <- t(t(X)-colSums(X1))
y2 <- y - sum(y1)
cv1 <- (y2*w)%*%X2
cv2 <- sqrt(colSums(X2^2*w)*sum(y2^2*w))
cvv <- cv1/cv2
return(cvv[1,])
}
##' Reconstruction of the tree
##'
##' Using SimplePPT approach to model principal tree (pptree) of the data
##' @name ppt.tree
##' @param X gene (row) vs cell (columns) expression matrix
##' @param emb embdedding to visalize cells and principal tree together
##' @param M number of principal points to use (more than zero, no more than number of cells)
##' @param init matrix of initial gene coordinates of principal points
##' @param plot plot or not intermediate trees
##' @param lambda penalty for the tree length, as used in SimplePPT
##' @param sigma parameter as used in SimplePPT
##' @param seed used to make initial assignment of principal points to a subset of cells
##' @param n.steps number of iteraions
##' @param metrics metrics used to calculated distances between cells or principal points. "euclidean" or "cosine"
##' @param p.power if cosine metrics used, option p.power allows to use (1-cor)^p.power (p.power=1 by default)
##' @param err.cut stop algorithm if proximity of principal points between iterations less than err.cut
##' @return pptree object
##' @export
ppt.tree <- function(X,W=NA,emb=NA,M,init=NULL,plot=TRUE,output=TRUE,lambda=1e1,sigma=0.1,seed=NULL,n.steps=50,err.cut = 5e-2,metrics="cosine",p.power=1,knn=NULL,...) {
if ( metrics!="euclidean" & metrics!="cosine" ){ stop("metrics paramterer is nethier 'euclidean' nor 'cosine'") }
if ( M < 0 | M > ncol(X)) { stop("M should be more than zero and less or equal than the number of cells") }
if (!is.na(emb)){
if ( sum(!colnames(X)%in%rownames(emb))>0 ) { stop("column names of gene expression matrix (X) are not consistent with row names of embedding (emb)") }
}
X <- as.matrix(X)
wt <- TRUE
if (is.na(W)) {
wt <- FALSE
W <- matrix(1,nrow=nrow(X),ncol=ncol(X))
}else{
W <- as.matrix(W[rownames(X),colnames(X)])
}
if(is.null(init)){
if(!is.null(seed)){
set.seed(seed);
}
F.mat <- X[,sample(1:ncol(X),M)]; rownames(F.mat) <- NULL; colnames(F.mat) <- NULL;
} else {
F.mat <- init;
}
# row-normalize W
rwm <- matrix(rowSums(W),nrow=nrow(F.mat),ncol(F.mat))
W <- W/rowSums(W)*ncol(W);
# repeat untile convergence
j=1; err=100;
while(j <= n.steps & err > err.cut) {
# calculate R
if (metrics=="euclidean"){
# simple correlation or column-wise weighted correlation.
if (wt==FALSE) {
R <- euclidean.mat(F.mat,X)^p.power
}else{
R <- do.call(cbind,lapply(1:ncol(X),function(i) {
sqrt(colSums(((F.mat-X[,i])^2)*W[,i]))^p.power
}))
}
R <- t(exp(-R/sigma))
}else if(metrics=="cosine"){
# simple correlation or column-wise weighted correlation.
if (wt==FALSE) {
cordist <- (1-cor.mat(F.mat,X))^p.power
}else{
cordist <- do.call(cbind,lapply(1:ncol(X),function(i) {
(1-matWVCorr(F.mat,X[,i],W[,i]))^p.power
#(1-wcr(F.mat,X[,i],W[,i]))^p.power
}))
colnames(cordist) <- colnames(X)
}
cordist <- (cordist-mean(cordist))
R <- t(exp( -(cordist)/sigma ))
}
R[is.na(R) | is.nan(R)] <- 0
if (!is.null(knn)){
R = apply(R,2,function(x){
x[ x < sort(x,decreasing = TRUE)[knn] ] <- 0
x
})
}
R <- R/rowSums(R)
R[is.na(R) | is.nan(R)] <- 0
# calculate distance between principal points
if (metrics=="euclidean"){
d <- euclidean.mat(F.mat,F.mat)
}else if (metrics=="cosine"){
if (wt==FALSE) {
d <- 1-cor.mat(F.mat,F.mat)
}
else{
d <- do.call(cbind,lapply(1:ncol(F.mat),function(i) {
(1-matWVCorr(F.mat,F.mat[,i],rwm[,i]))^p.power
#(1-wcr(F.mat,F.mat[,i],rwm[,i]))^p.power
}))
}
d <- abs(d)^p.power*sign(d)
}
bt <- minimum.spanning.tree(graph.adjacency(as.matrix(d),weighted=T,mode="undirected"))
B <- as.matrix(get.adjacency(bt))
D <- diag(nrow(B))*rowSums(B)
L <- D-B
M <- L*lambda + diag(ncol(R))*colSums(R)
old.F <- F.mat;
#F.mat <- (X%*%R) %*% chol2inv(chol(M))
F.mat <- t(solve( t(M),t((X*W)%*%R) ))# slightly faster, 15%
F.mat <- as.matrix(F.mat)
if (plot==TRUE){plotppt(list(F=F.mat,B=B,R=R,L=L,lambda=lambda,sigma=sigma),emb,...)}
if (output==TRUE){
cat(j,":")
cat("\n")
err = max(sqrt(colSums(F.mat-old.F)^2)/apply(F.mat,2,function(x)sqrt(sum(x^2))))
cat(err,"\n")
}
j=j+1
}
if (plot==TRUE){plotppt(list(F=F.mat,B=B,R=R,L=L,lambda=lambda,sigma=sigma),emb,...)}
g = graph.adjacency(B,mode="undirected");tips = V(g)[igraph::degree(g)==1];forks = V(g)[igraph::degree(g)>2]
score = c( sum( t(1-cor.mat(F.mat,X))*R)/nrow(R), sigma/nrow(R)*sum(R*log(R),na.rm=T),lambda/2*sum(d*B))
colnames(R) <- colnames(F.mat) <- rownames(B) <- colnames(B) <- as.character(1:nrow(B))
invisible(list(score=score,F=F.mat,B=B,R=R,L=L,DT=d,lambda=lambda,sigma=sigma,n.steps=n.steps,metrics=metrics,M=M,cells=vi,tips=tips,forks=forks))
}
##' Estimate optimal sigma parameter.
##'
##' Using cross-validation criteria to select sigma parameter.
##' @param X gene (rows) vs cell (columns) expression matrix
##' @param M number of principal points in pptree modeling
##' @param n.sample number of sampled trees per each sigma
##' @param sig.lims a vector of sigma for which cross-validation estimated
##' @param metrics similarity measure. "cosine" or "euclidean"
##' @return optimal sigma parameter
##' @export
sig.explore <- function(X,W=NA,M=as.integer(ncol(X)/2),n.sample=1,sig.lims=seq(0.01,0.2,0.03),metrics="cosine",p.power = 1,plot=TRUE,err.cut=5e-1,n.steps=20,n.cores=1){
if (is.na(X)) {stop("matrix X should be specified")}
if (is.na(M)) {stop("number of principal points M should be specified")}
cells <- colnames(X)
for (i in 1:n.sample){
cv <- do.call(rbind,mclapply(sig.lims,function(sig){
x <- ppt.tree(X = X,W,M=M,err.cut=err.cut,metrics=metrics,n.steps=n.steps,p.power = p.power,lambda=0,sigma=sig,plot=FALSE,output=FALSE,seed=sample(100,1))
y <- cor(X,x$F)
apply(y,1,max)
},mc.cores = n.cores))
if (i==1){
cv.tot <- cv
}
else{
cv.tot <- cv.tot + cv
}
}
cv.tot <- cv.tot/n.sample
sig.opt <- sig.lims[which.max(apply(cv.tot,1,mean))]
if (plot==TRUE){
par(mfrow=c(1,1),mar=c(5,5,1,1))
plot( sig.lims, apply(cv.tot,1,mean),lty=2,lwd=2,type="l",xlab="sigma",ylab="CV",cex.lab=1.5)
points( sig.lims, apply(cv.tot,1,mean),pch=19,cex=1)
abline(v=sig.opt,col="red",lty=2)
}
#return( cbind(sig.lims,apply(cv.tot,1,mean)) )
return(sig.opt)
}
##' Explore lambda
##'
##' Explores multiple lambda and choose the optimal
##' @param X gene (rows) vs cell (columns) expression matrix
##' @param M number of principal points in pptree modeling
##' @param sigma fixed parameter sigma used in pptree modeling
##' @param emb embdedding to visalize cells and principal tree together. If emb is given than pptrees for a range of lambda are shown
##' @export
lambda.explore <- function(X=NA,M=ncol(X),sigma=0.1,emb=NA,metrics="cosine",tips.min=2,tips.max=10,base=2,lambda.init=100,err.cut=5e-3,n.steps=40,p.power=1){
if (is.na(X)) {stop("matrix X should be specified")}
if (is.na(M)) {stop("number of principal points M should be specified")}
cells <- colnames(X)
min.reached <- FALSE;max.reached <- FALSE
lambda <- round(lambda.init)
tr.list <- list()
while (min.reached==FALSE | max.reached==FALSE){
print(paste("lambda:",round(lambda,2) ))
tr <- ppt.tree(X=X,M=M,lambda=lambda,sigma=sig,err.cut=err.cut,metrics=metrics,n.steps=n.steps,p.power = p.power,plot=FALSE,output=FALSE,seed=sample(100,1))
tr <- setroot(tr,root=as.character(tr$tips[1]))
tr.list[[as.character(round(lambda,1))]] <- tr#c(tr.list,tr)
tips <- length(tr$tips);
len <- sum(tr$pp.segments$d)
entropy.ind <- sum(tr$pp.segments$d*log(tr$pp.segments$d))
# add entry to the lambda.info matrix
if (lambda == lambda.init){
lambda.info <- matrix(c(lambda=lambda,tips=tips,length=len,entropy=entropy.ind),nrow=1,ncol=4)
#tr.list[[as.character(lambda)]] <- tr
}else{
if (lambda < lambda.info[1,1]){
lambda.info <- rbind(c(lambda=lambda,tips=tips,length=len,entropy=entropy.ind),lambda.info)
#tr.list[[as.character(lambda)]] <- tr#c(tr,tr.list)
}else{
lambda.info <- rbind(lambda.info,c(lambda=lambda,tips=tips,length=len,entropy=entropy.ind))
#tr.list[[as.character(lambda)]] <- #c(tr.list,tr)
}
}
# update lambda
if (min.reached == FALSE & tips < tips.max){
lambda <- lambda/base
}else if (min.reached == FALSE & tips >= tips.max){
min.reached <- TRUE
lambda <- lambda.info[nrow(lambda.info),1]*base
}else if (tips <= tips.min ){# | tips >= lambda.info[nrow(lambda.info)-1,2]){
max.reached <- TRUE
}else{
lambda <- lambda.info[nrow(lambda.info),1]*base
}
}
ent.per.tip <- lambda.info[,4]/lambda.info[,2]
i.opt <- which.min(ent.per.tip)
if (!is.na(emb)){
par(mfrow=c(2,2))
par(mar=c(5,5,1,1))
plot( lambda.info[,1], ent.per.tip,log="x",lty=2,lwd=2,type="l",xlab="lambda",ylab="entropy per tip",cex.lab=1.5)
points(lambda.info[,1], ent.per.tip,pch=19,cex=1)
abline(v=lambda.info[i.opt,1],col="red",lty=2)
par(mar=rep(1,4))
lamb <- lambda.info[i.opt,1]; lamb <- round(lamb,1)
plotppt(tr.list[[as.character(lamb)]],emb,cex.tree = 0.1,lwd.tree = 3,main=paste("lambda =",lamb))
box(col="red",lwd=3);
lamb <- lambda.info[median(1:i.opt),1]; lamb <- round(lamb,1)
plotppt(tr.list[[as.character(lamb)]],emb,cex.tree = 0.1,lwd.tree = 3,main=paste("lambda =",lamb))
lamb <- lambda.info[median((i.opt+1):nrow(lambda.info)),1]; lamb <- round(lamb,1)
plotppt(tr.list[[as.character(lamb)]],emb,cex.tree = 0.1,lwd.tree = 3,main=paste("lambda =",lamb))
}
return(lambda.info)
#return(list(lambda.info[i.opt,1],lambda.info))
}
##' Visualize pptree onto embedding
##'
##' Projects pptree onto embedding (e.g. tSNE)
##' @name plotppt
##' @param r - pptree object
##' @param emb - (x,y) coordinates data frame (e.g Rtsne $Y result)
##' @param F - coordinates of principal points (optional)
##' @param gene - a gene to show expression of (optional)
##' @param mat - gene vs cell expression matrix (needed if option 'gene' is activated)
##' @param pattern.cell - numeric profile of a quantity for each cell (e.g. expression of a gene or cell cycle stage)
##' @param pattern.tree - numeric profile of a quantity for each principal point (e.g. expression of a gene or cell cycle stage)
##' @param cex.main - cex of points
##' @param cex.col - color of points
##' @param cex.title - cex of title
##' @param cex.tree - cex of principal points
##' @param tips - logical, to draw indecies of tips of the tree. Usefull before usage of cleanup.branches()
##' @export
plotppt <- function(r,emb,F=NULL, gene=NULL, main=gene, mat=NULL, pattern.cell=NULL, pattern.tree=NULL,
cex.col=NA, tree.col = NULL,
cex.main=0.5, cex.title=1,
cex.tree=1.5,lwd.tree=1,par=TRUE,tips=FALSE,forks=FALSE,subtree=NA,pallete=NULL,...) {
if ( sum(!rownames(r$R)%in%rownames(emb))>0 ) { stop("cell names used for tree reconstruction are not consistent with row names of embedding (emb)") }
if (sum(!is.na(cex.col))==0 ) {cex.col=rep("grey70",nrow(emb)); names(cex.col) <- rownames(emb)}
vi = rownames(emb)%in%rownames(r$R); names(vi) <- rownames(emb)
if(is.null(F)) { F <- t(t(t(emb[rownames(r$R),])%*%r$R)/colSums(r$R)) }
if ( is.null(pattern.cell) & !is.null(gene) ){
if (is.null(mat)) { stop("mat expression matrix should be defined together with gene parameter") }
if (gene %in% rownames(mat) == FALSE) { stop("gene is not in mat matrix") }
if ( sum(!rownames(r$R) %in% colnames(mat)) > 0 ) { stop("cell names used for tree reconstruction are not consistent with mat column names") }
pattern.cell = mat[gene,rownames(r$R)]#mat[gene,rownames(r$R)]
}
if (is.null(pallete)) {pallete <- colorRampPalette(c("blue","gray50","red"))(1024)}else{pallete <- pallete(1024)}
if ( !is.null(pattern.tree) & length(pattern.tree) != ncol(r$R) ) { stop("length of pattern.tree vector is inconsistent with cell number used for tree reconstruction") }
if ( !is.null(pattern.cell) & is.null(pattern.tree) ){
if ( sum(!names(pattern.cell) %in% rownames(r$R)) > 0 ){ stop("pattern.cell vector should contain names for all cells used to reconstruct the tree")}
pattern.cell <- pattern.cell[rownames(r$R)] ## is it correct?
aggr <- colSums(r$R)
pattern.tree <- t(r$R)%*%pattern.cell[rownames(r$R)]/aggr
pattern.tree[aggr==0] <- NA
}
if (is.null(tree.col)) {tree.col = "black"}
if( !is.null(pattern.cell) ){
cex.col <- rep("black",nrow(emb)); names(cex.col) <- rownames(emb)
cex.col[names(pattern.cell)] <- pallete[round((pattern.cell-min(pattern.cell))/diff(range(pattern.cell))*1023)+1]
#cex.col <- colorRampPalette(c("blue","gray50","red"))(1024)[round((pattern.cell-min(pattern.cell))/diff(range(pattern.cell))*1023)+1]
}
if ( !is.null(pattern.tree) ){
tree.col <- pallete[round((pattern.tree-min(pattern.tree,na.rm=T))/diff(range(pattern.tree,na.rm = T))*1023)+1]
#r$fitting$pp.fitted[gene,]
}
if (!is.na(subtree)){
#cex.col[rownames(r$cell.summary)][!r$cell.summary$seg %in% subtree$seg] <- "black"
tree.col[!r$pp.info$seg %in% subtree$seg] <- "grey80"
vi[vi==TRUE][rownames(r$cell.summary)][!r$cell.summary$seg %in% subtree$seg] <- FALSE
}
if ( sum(names(cex.col)%in%rownames(emb))==0 ) {stop('cex.col names do not match row names of emb')}
cols <- rep("black",nrow(emb)); names(cols) <- rownames(emb)
cols[ intersect(names(cex.col),rownames(emb)) ] <- cex.col[intersect(names(cex.col),rownames(emb))]
if (par==TRUE) {par(mar=rep(1,4))}
plot(emb,pch=ifelse(vi,19,1),cex=cex.main,col = adjustcolor(cols,ifelse(is.null(pattern.tree),1,0.1)),xlab=NA,ylab=NA,xaxt='n',yaxt='n',main=main,cex.main=cex.title,font.main=1)
al <- get.edgelist(graph.adjacency(r$B>0))
al <- matrix(as.integer(al),ncol=2)
segments(F[1,al[,1]],F[2,al[,1]],F[1,al[,2]],F[2,al[,2]],lwd=lwd.tree)
points(t(F),pch=21,
col=tree.col,bg=tree.col,cex=cex.tree)
if (tips==TRUE){
coord = do.call(rbind,lapply(r$tips,function(tip){
x1 = F[1,tip]; y1 = F[2,tip]
x2 = F[1,which(r$B[tip,]>0)]; y2 = F[2,which(r$B[tip,]>0)]
xnew = x1 + 1.5*sign(x1-x2)#(1+sign(x1-x2)/0.5)*sign(x1-x2)#alpha*(x1-x2)
ynew = y1 + 1.5*sign(y1-y2)#xnew*(y2-y1)/(x2-x1) + (y1*x2-y2*x1)/(x2-x1)
c(xnew,ynew)
}))
text((coord),col=1,cex=1,adj=c(0,0),labels=r$tips,font=2);#text(t(F[, r$tips ]),col=1,cex=1.2,adj=c(0,0),labels=r$tips);
}
if (forks==TRUE & length(r$forks) > 0){
coord = do.call(rbind,lapply(r$forks,function(fork){
x1 = F[1,fork]; y1 = F[2,fork]
x2 = F[1,which(r$B[fork,]>0)]; y2 = F[2,which(r$B[fork,]>0)]
xnew = x1 #+ 1.5*sign(x1-x2)#(1+sign(x1-x2)/0.5)*sign(x1-x2)#alpha*(x1-x2)
ynew = y1 #+ 1.5*sign(y1-y2)#xnew*(y2-y1)/(x2-x1) + (y1*x2-y2*x1)/(x2-x1)
c(xnew,ynew)
}))
text((coord),col=1,cex=1,adj=c(0,0),labels=r$forks,font=2);#text(t(F[, r$tips ]),col=1,cex=1.2,adj=c(0,0),labels=r$tips);
}
#legend(x="bottomright",legend=c(paste("lambda=",r$lambda[1],sep=""),paste("sigma=",r$sigma[1],sep="")))
}
##' Visualize list of pptree objects onto embedding
##'
##' Projects pptree objects onto embedding (e.g. tSNE)
##' @param rl list of pptree objects (as calculated using bootstrap.tree or mppt.tree)
##' @param emb (x,y) coordinates data frame (e.g Rtsne $Y result)
##' @param cols vector of colors for cells in emb.
##' @export
plotpptl <- function(rl,emb, cols=adjustcolor(1,alpha=0.3),alpha=1, lwd =1, ...) {
par(mfrow=c(1,1), mar = c(3.5,3.5,2.0,0.5), mgp = c(2,0.65,0), cex = 0.8);
plot(emb,col=cols,cex=1,pch=19,xlab="",ylab="", ...)
lapply(rl,function(r) {
F <- t(t(t(emb[rownames(r$R),])%*%r$R)/colSums(r$R))
al <- get.edgelist(graph.adjacency(r$B>0))
al <- matrix(as.integer(al),ncol=2)
#points( t(F),col=adjustcolor(cols,alpha=0.1),lwd=1,cex=0.2 )
segments(F[1,al[,1]],F[2,al[,1]],F[1,al[,2]],F[2,al[,2]],lwd=lwd,col=adjustcolor("black",alpha))
})
#legend(x="bottomright",legend=c(paste("lambda=",rl[[1]]$lambda[1],sep=""),paste("sigma=",rl[[1]]$sigma[1],sep="")))
}
##' Remove spurious branches of pptree
##' @param r ppt.tree result
##' @param tips.number select and retain only fixed number of tips (tips.number) that explain the most cell-cell variation.
##' @param tips.remove vector of tips indices to remove
##' @param min.branch.length remove all branches with length less or equal than min.branch.length principal points
##' @return modified ppt.tree object with cleaned up structure
##' @export
cleanup.branches <- function(r,tips.remove=NULL,min.branch.length=3) {
#colnames(r$F) <- NULL; colnames(r$B) <- rownames(r$B) <- NULL;
repeat {
g <- graph.adjacency(r$B>0,mode="undirected")
leaves <- V(g)[igraph::degree(g)==1]
branches <- V(g)[igraph::degree(g)>2]
bd <-shortest.paths(g,v=leaves,to=branches)
ivi <- which(apply(bd,1,min)<=min.branch.length)
ivi <- unique( c(ivi, which( leaves %in% tips.remove) ) )
if(length(ivi)==0) { break }
toremove <- c();
for(x in ivi) {
bdp <- get.shortest.paths(g,leaves[x],to=branches[which.min(bd[x,])])
toremove <- c(toremove,bdp$vpath[[1]][-length(bdp$vpath[[1]])])
}
# remove from the graph (B)
r$B <- r$B[-toremove,-toremove]
# remove from F
r$F <- r$F[,-toremove];
# remove from lRu
r$lRu <- r$lRu[,-toremove]
# remove from R and renormalize
r$R <- r$R[,-toremove];
r$R <- r$R/rowSums(r$R);
}
colnames(r$F) <- colnames(r$B) <- rownames(r$B) <- as.character(1:nrow(r$B));
g = graph.adjacency(r$B,mode="undirected");r$tips = V(g)[igraph::degree(g)==1];r$forks = V(g)[igraph::degree(g)>2]
r
}
##' Orient the tree by setting up the root
##'
##' Assign root, pseudotime and segment to each principal point of the tree
##' @param r pptree object
##' @param root root principal point (plotppt(tips=TRUE,..) can be used to visualize candidate tips for a root)
##' @return modified ppt.tree object with new fields r$pp.info (estimated pseudotime and branch of principal points), r$pp.segments (segments information), r$root (root id).
##' @export
setroot <- function(r,root=NULL,plot=TRUE) {
if (is.null(root)) { stop("Assign correct root number") }
if ( ! root %in% r$tips ) {stop("Root should be one of the tree tips")}
# calculate time of each PP
if (r$metrics=="euclidean"){d <- 1e-6+euclidean.mat(r$F,r$F)
}else if (r$metrics=="cosine"){
d <- abs( 1e-2 + 1-cor.mat(r$F,r$F))
}
g <- graph.adjacency(r$B*d,weighted=T,mode="undirected")
pp.info <- data.frame( cbind( V(g),as.double(shortest.paths(g,root,V(g))),rep(0,length(V(g))) ));
colnames(pp.info)=c("PP","time","seg")
# infer all segments (and put in segs) of the tree
nodes <- V(g)[ igraph::degree(g)!=2 ]
pp.segs = data.frame(n=numeric(),from=character(),to=character(),d=numeric())
for (i in 1:(length(nodes)-1) ){
for (j in (i+1):length(nodes)){
node1 = nodes[i];node2=nodes[j];
path12 = unlist(get.shortest.paths(g,from=as.character(node1),to=as.character(node2)))
if ( sum(nodes %in% path12) == 2 ) {
from = node1$name;to=node2$name
if ( !is.null(root)){
path_root = shortest.paths(g,root,c(node1,node2))
from = colnames(path_root)[which.min(path_root)]
to = colnames(path_root)[which.max(path_root)]
}
pp.info[path12,]$seg = nrow(pp.segs)+1
pp.segs=rbind(pp.segs,data.frame(n=nrow(pp.segs)+1,from=from,to=to,d=shortest.paths(g,as.character(node1),as.character(node2))[1]))
}}}
pp.segs$color=rainbow(nrow(pp.segs))
pp.info$color=pp.segs$color[pp.info$seg]
r$pp.segments <- pp.segs;
r$root <- root;
r$pp.info <- pp.info
r
}
##' Project cells onto the principal tree
##' @param r pptree object
##' @param emb if not NULL than cell branch assignment and color code of branches are shown
##' @param n.mapping number of probabilistic mapping of cells onto the tree to use. If n.mapping=1 then likelihood cell mapping is used.
##' @return modified pptree object with new fields r$cell.summary, r$cell.info and r$img.list. r$cell.summary contains information about cells projected onto the tree, including pseudotime and branch.
##' @export
project.cells.onto.ppt <- function(r,emb=NULL,n.mapping=1) {
if (is.null(r$root)) { stop("Assign root first") }
g <- graph.adjacency(r$B,weighted=TRUE,mode="undirected")
df.list <- pblapply(1:n.mapping,function(nm){
#print(paste("mapping",nm))
# assign nearest principal point for each cell
if (nm > 1){
rrm = apply(r$R,1,function(v){sample(1:length(v),size=1,prob=v/sum(v))})
}else{
rrm <- apply(r$R,1,which.max)
}
# idenfity edge onto which each cell lies
df <- do.call(rbind,lapply(1:ncol(r$R),function(v) {
vcells <- which(rrm==v);
if(length(vcells)>0) {
# determine which edge the cells belong to neighboring PPs
nv <- as.integer(neighborhood(g,1,nodes=c(v))[[1]])
nvd <- shortest.paths(g,v,nv)
spi <- apply(r$R[vcells,nv[-1],drop=FALSE],1,which.max)+1
ndf <- data.frame(cell=vcells,v0=v,v1=nv[spi],d=nvd[spi])
p0 <- r$R[vcells,v]
p1 <- unlist(lapply(1:length(vcells),function(i) r$R[vcells[i],ndf$v1[i]] ))
alpha <- runif(length(vcells))
f <- abs( (sqrt(alpha*p1^2+(1-alpha)*p0^2)-p0)/(p1-p0) )
ndf$t <- r$pp.info[ndf$v0,]$time+(r$pp.info[ndf$v1,]$time-r$pp.info[ndf$v0,]$time)*alpha
ndf$seg <- ifelse( r$pp.info[ndf$v0,]$PP %in% r$forks,r$pp.info[ndf$v1,]$seg,r$pp.info[ndf$v0,]$seg)
ndf$color <- ifelse( r$pp.info[ndf$v0,]$PP %in% r$forks,r$pp.info[ndf$v1,]$color,r$pp.info[ndf$v0,]$color)
ndf
} else {
return(NULL);
}
}))
df$edge <- apply(df,1,function(x) paste(sort(as.numeric(x[c(2,3)])),collapse="|"))
df <- df[order(df$t,decreasing=FALSE),]
### assign data from ndf table of z.ensemble1
#ndf <- z.ensemble1[[nm]]$ndf[,1:5]
#ndf[,6:8] <- z.ensemble1[[nm]]$cell.pseudotime[match(z.ensemble1[[nm]]$ndf$cell,z.ensemble1[[nm]]$cell.pseudotime$cell),2:4]
#colnames(ndf)[6] <- "t"
#rownames(ndf) <- nc.cells[ndf$cell]
#df <- ndf
#df <- df[order(df$t,decreasing=FALSE),]
return(df)
})
# generate graph of cells and PPs for each mapping
img.list <- pblapply(df.list,function(df){
img <- g#graph.adjacency(r$B,weighted=TRUE,mode="undirected")
img <- set.vertex.attribute(img,"type",value="pp")
for(e in unique(df$edge)){
ii <- which(df$edge==e);
vc <- as.integer(strsplit(e,'\\|')[[1]]);
imin <- which.min(r$pp.info$time[vc])
#print(imin)
#imin <- 1
#print(c(imin,3-imin))
# insert the cells
if (imin==1){
img <- add_vertices(img,length(ii),type="cell",name=paste('c',df[ii,]$cell,sep=''))
}else{
img <- add_vertices(img,length(ii),type="cell",name=paste('c',rev(df[ii,]$cell),sep=''))
}
tw <- 1-E(g,path=c(vc[1],vc[2]))$weight
img <- delete_edges(img,e)
if (imin==1){
img <- add_edges(img,c(vc[1],rep(paste0('c',df$cell[ii]),each=2),vc[2]), weight=1-tw*diff(c(0,df$t[ii],1)) )
}else{
img <- add_edges(img,c(vc[1],rep(paste0('c',rev(df$cell[ii])),each=2),vc[2]), weight=1-tw*diff(c(0,df$t[ii],1)) )
}
}
return(img)
})
if (n.mapping > 1) {
df.sd <- apply(do.call(cbind,lapply(df.list,function(el)el[rownames(r$R),]$t)),1,sd)
}else {df.sd <- NA}
df.summary <- cbind(df.list[[1]],t.sd=df.sd)
if (!is.null(emb)){
cols <- adjustcolor(df.summary[rownames(r$R),]$color,0.2); names(cols) <- rownames(r$R)
plotppt(r,emb,cex.col=cols, tree.col = r$pp.info$color,cex.main=0.5, cex.title=1,cex.tree=1,lwd.tree=1)
}
r$cell.summary <- df.summary
r$cell.info <- df.list
r$img.list <- img.list
#r$mg <- mg;
return(invisible(r))
}
##' Determine a set of genes significantly associated with the tree
##' @param r pptree object
##' @param X expressinon matrix of genes (row) vs cells (column)
##' @param fdr.cut FDR (Benjamini-Hochberg adjustment) cutoff on significance; significance if FDR < fdr.cut
##' @param A.cut cmplitude cutoff on significance; significance if A > A.cut
##' @param st.cut cutoff on stability (fraction of mappings with significant (fdr,A) pair) of association; significance, significance if A > A.cut
##' @param summary show plot of amplitude vs FDR of each gene's association. By default FALSE.
##' @param subtree restrict statistical assesment to a subtree
##' @param fdr.method a method to adjust for multiple testing. Default - Bonferroni. Alternatively, "BH" can be used.
##' @return modified pptree object with a new field r$stat.association that includes pvalue, amplitude, fdr, stability and siginificane (TRUE/FALSE) of gene associations
##' @export
test.associated.genes <- function(r,X,n.map=1,n.cores=(parallel::detectCores()/2),spline.df=3,fdr.cut=1e-4,A.cut=1,st.cut=0.8,summary=FALSE,subtree=NA,fdr.method=NULL, ...) {
if (is.null(r$root)) {stop("assign root first")}
if (is.null(r$cell.summary) | is.null(r$cell.info)) {stop("project cells onto the tree first")}
X <- X[,intersect(colnames(X),rownames(r$cell.summary))]
if (sum(!colnames(X) %in% rownames(r$cell.summary)) > 0) {stop( paste("Expression matrix X contains cells not mapped onto the tree, e.g. cell",colnames(X)[!colnames(X) %in% rownames(r$cell.summary)][1]) )}
if (n.map < 0 | n.map > length(r$cell.info)) {stop("n.map should be more than 0 and less than number of mappings")}
genes <- rownames(X)
subseg <- unique(r$cell.summary$seg);
if (!is.na(subtree)) {subseg <- subtree$segs}
# for every gene
gtl <- lapply(1:n.map,function(ix){
print(paste("mapping",ix,"of",n.map))
if (n.map==1){ inf <- r$cell.summary}else{
inf <- r$cell.info[[ix]]
}
gt <- do.call(rbind,mclapply(genes,function(gene) {
#sdf <- inf; sdf$exp <- X[gene,rownames(inf)]
sdf <- inf[inf$seg%in%subseg,]; sdf$exp <- X[gene,rownames(sdf)]#[inf$seg%in%subseg]
# time-based models
mdl <- tapply(1:nrow(sdf),as.factor(sdf$seg),function(ii) {
# TODO: adjust df according to branch length?
m <- mgcv::gam(exp~s(t,k=spline.df),data=sdf[ii,],familly=gaussian())
rl <- list(d=deviance(m),df=df.residual(m))
rl$p <- predict(m);
return(rl)
})
mdf <- data.frame(do.call(rbind,lapply(mdl,function(x) c(d=x$d,df=x$df))))
# background model
odf <- sum(mdf$df)-nrow(mdf); # correct for multiple segments
m0 <- mgcv::gam(exp~1,data=sdf,familly=gaussian())
if (sum(mdf$d)==0){ fstat <- 0}else{
fstat <- (deviance(m0) - sum(mdf$d))/(df.residual(m0)-odf)/(sum(mdf$d)/odf)
}
pval <- pf(fstat,df.residual(m0)-odf,odf,lower.tail = FALSE);#1-pf(fstat,df.residual(m0)-odf,odf,lower.tail = T);
pr <- unlist(lapply(mdl,function(x) x$p))
return(c(pval=pval,A=max(pr)-min(pr)))
},mc.cores=n.cores,mc.preschedule=T))
gt <- data.frame(gt); rownames(gt) <- genes
if (is.null(fdr.method)) {
gt$fdr <- p.adjust(gt$pval)
}else{
gt$fdr <- p.adjust(gt$pval,method=fdr.method)
}
gt
})
stat.association <- data.frame(cbind( apply(do.call(cbind,lapply(gtl,function(gt)gt$pval)),1,median),
apply(do.call(cbind,lapply(gtl,function(gt)gt$A)),1,median),
apply(do.call(cbind,lapply(gtl,function(gt)gt$fdr)),1,median),
apply(do.call(cbind,lapply(gtl,function(gt) gt$fdr < fdr.cut & gt$A > A.cut )),1,sum)/length(gtl)
))
rownames(stat.association) <- genes; colnames(stat.association) <- c("pval","A","fdr","st")
stat.association$sign <- stat.association$fdr < fdr.cut & stat.association$A > A.cut & stat.association$st > st.cut
# plot amplitude vs FDR and color genes that were idenfitied as significantly associated with the tree
if (summary==TRUE){
par(mfrow=c(1,1),mar=c(4.5,4.5,1,1))
plot(stat.association$A,stat.association$fdr,xlab="Amplitude",ylab="FDR, log",log="y",pch=19,cex=0.5,
col=adjustcolor( ifelse(stat.association$sign==TRUE,"red","black") ,0.4),cex.lab=1.5)
legend("bottomleft", legend=c( paste("DE,",sum(stat.association$sign)), paste("non-DE,",sum(!stat.association$sign))),
col=c("red", "black"), bty="n",pch=19,cex=1,pt.cex=1)
}
if (is.na(subtree)){
r$stat.association <- stat.association
return(r)
}else{
return(stat.association)
}
}
##' Model gene expression levels as a function of tree positions.
##' @param r pptree object
##' @param X expressinon matrix of genes (rows) vs cells (columns)
##' @param n.map number of probabilistic cell-to-tree mappings to use
##' @param method method of modeling. Currently only splines with option 'ts' are supported.
##' @param knn use expression averaging among knn cells
##' @param gamma stringency of penalty.
##' @return modified pptree object with new fields r$fit.list, r$fit.summary and r$fit.pattern. r$fit.pattern contains matrix of fitted gene expression levels
##' @export
fit.associated.genes <- function(r,X,n.map=1,n.cores=parallel::detectCores()/2,method="ts",knn=1,gamma=1.5) {
if (is.null(r$root)) {stop("assign root first")}
if (is.null(r$cell.summary) | is.null(r$cell.info)) {stop("project cells onto the tree first")}
X <- X[,intersect(colnames(X),rownames(r$cell.summary))]
if (sum(!colnames(X) %in% rownames(r$cell.summary)) > 0) {stop( paste("Expression matrix X contains cells not mapped onto the tree, e.g. cell",colnames(X)[!colnames(X) %in% rownames(r$cell.summary)][1]) )}
if (n.map < 0 | n.map > length(r$cell.info)) {stop("n.map should be more than 0 and less than number of mappings")}
if ( is.null(r$stat.association) ) {stop("identify significantly associated genes using test.associated.genes()")}
genes <- intersect(rownames(X),rownames(r$stat.association)[r$stat.association$sign])
#gtl <- lapply(1:n.map,function(ix){
# print(paste("mapping",ix,"of",n.map))
# if (n.map==1){ inf <- r$cell.summary}else{
# inf <- r$cell.info[[ix]]
# }
if (method=="ts"){
gtl <- fit.ts(r,X[genes,],n.map,n.cores,gamma,knn)
}else if (method=="sf"){
gtl <- t.fit.sf(r,X[genes,],n.map,n.cores,gamma)
}else if (method=="av"){
gtl <- t.fit.av(r,X[genes,],n.map,n.cores)
}else{stop("please choose correct method name")}
#})
ft.summary <- matrix(0,nrow=nrow(gtl[[1]]),ncol=ncol(gtl[[1]]))
rownames(ft.summary) <- rownames(gtl[[1]]); colnames(ft.summary) <- colnames(gtl[[1]])
if (length(gtl)>=1){
for (k in 1:length(gtl)){
#indx <- unlist(lapply(1:nrow(r$cell.summary),function(i) {
# #ind <- rownames(r$cell.info[[k]])[r$cell.info[[k]]$seg==r$cell.summary$seg[i]]
# #ind[which.min(abs(r$cell.info[[k]][ind,]$t-r$cell.summary$t[i]))]
# ind <- rownames(r$cell.summary)[r$cell.summary$seg==r$cell.summary$seg[i]]
# ind[which.min(abs(r$cell.summary[ind,]$t-r$cell.summary$t[i]))]
#}))
ft.summary <- ft.summary + gtl[[k]]#[,indx]
}
}
ft.summary <- ft.summary/length(gtl)
#colnames(ft.summary) <- rownames(r$cell.summary)
r$fit.list <- gtl
r$fit.summary <- ft.summary
r$fit.pattern <- classify.genes(r)
print(table(r$fit.pattern))
return(r)
}
##' Model gene expression levels as a brancing spline function of tree positions.
##' @param r pptree object
##' @param X expressinon matrix of genes (rows) vs cells (columns)
##' @param n.map number of probabilistic cell-to-tree mappings to use
##' @param knn use expression averaging among knn cells
##' @param gamma stringency of penalty.
##' @return matrix of fitted gene expression levels to the tree
##' @export
fit.ts <- function(r,X,n.map,n.cores=parallel::detectCores()/2,gamma=1.5,knn=1) {
ix <- 1
img = r$img.list[[ix]];
root = r$root
tips = r$tips[r$tips != root]
branches.ll = do.call(rbind,lapply(tips, function(tip){
b = get.shortest.paths(img,from=as.character(root),to=as.character(tip))$vpath[[1]]$name
b = b[grepl("^c",b)]
ind <- paste('c',r$cell.info[[ix]]$cell,sep="") %in% b
cbind( ids=rownames(r$cell.info[[ix]])[ind], r$cell.info[[ix]][ind,],branch=rep( which(tips==tip),length(b)) )
}))
# calculate knn for each vertex along the tree
for (v in r$pp.info$PP){img <- delete_vertices(img,as.character(v))}
dst.tree <- distances(img,v=V(img),to=V(img));
dst.tree <- dst.tree[ paste("c",r$cell.summary$cell,sep=""),paste("c",r$cell.summary$cell,sep="") ]
rownames(dst.tree) <- colnames(dst.tree) <- rownames(r$cell.summary)
dst.tree[dst.tree <= knn] <- 1; dst.tree[dst.tree > knn] <- 0
gtl <- lapply(1:n.map,function(ix){
print(paste("fit gene expression for mapping",ix))
img = r$img.list[[ix]];
root = r$root
tips = r$tips[r$tips != root]
branches = do.call(rbind,lapply(tips, function(tip){
b = get.shortest.paths(img,from=as.character(root),to=as.character(tip))$vpath[[1]]$name
b = b[grepl("^c",b)]
ind <- paste('c',r$cell.info[[ix]]$cell,sep="") %in% b
cbind( ids=rownames(r$cell.info[[ix]])[ind], r$cell.info[[ix]][ind,],branch=rep( which(tips==tip),length(b)) )
}))
#branches.ll <- branches
#genes <- intersect(rownames(X),rownames(r$stat.association)[r$stat.association$sign])
genes <- rownames(X)
gt <- do.call(rbind,mclapply(genes,function(gene) {
expr.fitted <- unlist(lapply(unique(branches$branch),function(br){
branches1 <- branches[branches$branch==br,]
expr <- X[gene,as.character(branches1$ids)]
#gene.fit1 = gam( expr ~ s( branches1$time,k=length(branches1$time),bs="ts"),knots=list(branches1$time) )
tt <- branches1$t
#tt <- 1:length(tt)
gene.fit1 = mgcv::gam( expr ~ s(tt,bs="ts"),gamma=gamma)
#ggplot()+geom_point(aes(tt,expr))+geom_line(aes(tt,gene.fit1$fitted.values))
td <- data.frame(matrix(branches.ll[branches.ll$branch==br,]$t,nrow=sum(branches.ll$branch==br)));
rownames(td) <- branches.ll[branches.ll$branch==br,]$ids; colnames(td) <- "tt"
predict(gene.fit1,td )
}))
# old version - averaging along shared branches
#for( cell in names(which(table(branches.ll$ids) > 1))){
# expr.fitted[branches.ll$ids==cell] <- mean(expr.fitted[branches.ll$ids==cell])
#}
# new version - knn smoothing, where knns are estimated along the tree.
expr.fitted <- (dst.tree[names(expr.fitted),names(expr.fitted)] %*% expr.fitted) / (apply(dst.tree[names(expr.fitted),names(expr.fitted)],1,sum))
expr.fitted <- expr.fitted[,1]
return(expr.fitted[!duplicated(names(expr.fitted))])
},mc.cores = n.cores))
rownames(gt) <- genes
return(gt)
})
return(gtl)
}
##' Classify tree-associated genes
##'
##' Tree-associated genes are classified in branch-monotonous, transiently expressed and having complex patterns.
##' @param r tree
##' @param X expressinon matrix of genes (rows) vs cell (columns)
##' @param cutoff expression in local optimum should be higher/lower than both terminal branch values by cutoff.
##' @return vector of predicted classification for fitted genes.
##' @export
classify.genes <- function(r,n.cores=parallel::detectCores()/2,cutoff=0.2) {
if (is.null(r$fit.summary)) {stop("fit gene expression to the tree first")}
a <- do.call(cbind,lapply(unique(r$cell.summary$seg),function(seg){
seg.summary <- r$cell.summary[r$cell.summary$seg==seg,]
tt <- r$fit.summary[,rownames(seg.summary)][,order(seg.summary$t)]
# calculate number of inner local optima
apply(tt,1,function(x) {
res <- loc.opt(x)
if ( sum(!is.na(res))==0 ){0}else{nrow(res)}
})
}))
apply(a,1,function(v){
if (sum(v)==0) {return("branch-monotonous")}else
if (sum(v)==1) {return("transiently expressed")}else
if (sum(v)>1) {return("complex patterns")}
})
}
##' Identify all local optima for a time series data
##' @name loc.opt
##' @param series - time series data
##' @param cutoff - expression in local optimum should be on cutoff higher/lower than nearby local optima. This parameter allows to eliminate small local optimas that are likely artifacts
##' @return data frame containing type of local optima (min/max) and time index.
##' @export
loc.opt <- function(series,cutoff=0.1){
dx <- diff(series)
cand <- (-dx[1:(length(dx)-1)]*dx[2:length(dx)]) > 0
# remove multiple rupture-related optima
cand[1:(length(cand)-1)][cand[1:(length(cand)-1)]&cand[2:length(cand)]] <- FALSE
if (sum(cand)>0){
cand <- c(TRUE,cand,TRUE)
ds <- diff(series[cand])
opt.type <- unlist(lapply(1:(sum(cand)-2),function(i){
if (ds[i] > cutoff & (-ds[i+1]) > cutoff ) {
"max"
}else if (ds[i] < -cutoff & (-ds[i+1]) < -cutoff ){
"min"
}else{
NA
}
}))
if ( sum(!is.na(opt.type))>0 ){
opt.inf <- data.frame(cbind( opt.type[!is.na(opt.type)],as.numeric(which(cand))[2:(sum(cand)-1)][!is.na(opt.type)]),stringsAsFactors=FALSE)
colnames(opt.inf) <- c("type","index"); opt.inf$index <- as.numeric(opt.inf$index)
return(opt.inf)
}
}
return(NA)
}
##' Visualize branching trajectories of a particular gene.
##' @param r pptree object
##' @param gene gene name
##' @param X matrix with a single row containing a gene expression levels (could be a vector of gene's expression). Columns of X reflect gene names.
##' @param cex.cell size of cells
##' @param cex.lab size of axis titles
##' @param cex.axis size of axis labels
##' @param cex.main size of title showing a gene name
##' @param lwd.t1 width of the main branching trajectory
##' @param lwd.t2 width of ensemble trajectories, typically thiner than that of main trajectory.
##' @param lwd.erbar width of error bars for uncertainty of cell pseudotime assignment
##' @param subtree visualise trajectory along a given subtree
##' @export
visualise.trajectory = function(r,gene,X,cex.cell=0.3,cex.lab=2,cex.axis=1.5,cex.main=1,lwd.erbar=0.0,lwd.t1=3,lwd.t2=0.2,switch.point=NA,subtree=NA){
if (is.null(dim(X))){
Xgene <- X
}else{
if ( gene %in% rownames(X) == FALSE ) {stop("gene is not in matrix X")}
Xgene <- X[gene,]
}
Xgene <- Xgene[intersect(names(Xgene),rownames(r$cell.summary))]
if ( sum(!names(Xgene)%in%rownames(r$cell.summary)) > 0 ) {stop("matrix/vector X does not contain some cells used to recostruct tree")}
segs <- unique(r$cell.summary$seg)
# restrict considered segments to subtree if given
if (!is.na(subtree)){
segs <- intersect(segs,subtree$seg)
}
par(mar=c(5,5,3,1))
# draw cells
ind <- r$cell.summary$seg%in%segs
plot(r$cell.summary$t[ind],Xgene[rownames(r$cell.summary)][ind],type = "n",
xlab="pseudotime",ylab="expression",cex.axis=cex.axis,cex.lab=cex.lab,main=gene,font.main=3,cex.main=cex.main)
grid(5,5,lwd=1.5)
points(r$cell.summary$t[ind],Xgene[rownames(r$cell.summary)][ind],col=adjustcolor(r$cell.summary$color[ind],0.5),pch=19,cex=cex.cell)
# draw error bars of pseudotime uncertainty if given
if ( sum(!is.na(r$cell.summary$t.sd))>0 ){
segments( r$cell.summary$t[ind]-r$cell.summary$t.sd[ind], Xgene[rownames(r$cell.summary)][ind], r$cell.summary$t[ind]+r$cell.summary$t.sd[ind], y1 = Xgene[rownames(r$cell.summary)][ind],
col=adjustcolor(r$cell.summary$color[ind],0.1),lwd=lwd.erbar)
}
# draw ensemble of sampled trajectries if given
if (length(r$fit.list)>1){
for (j in 2:length(r$fit.list)){
for(seg in segs ){
#ind <- r$cell.info[[j]]$seg == seg
#t.ord <- order(r$cell.info[[j]]$t[ind])
#lines(r$cell.info[[j]]$t[ind][t.ord],r$fit.list[[j]][gene,rownames(r$cell.info[[j]])][ind][t.ord],
# col=adjustcolor(r$cell.info[[j]]$color[ind][t.ord],0.4),lwd=lwd.t2)
ind <- r$cell.summary$seg == seg
t.ord <- order(r$cell.summary$t[ind])
lines(r$cell.summary$t[ind][t.ord],r$fit.list[[j]][gene,rownames(r$cell.summary)][ind][t.ord],
col=adjustcolor(r$cell.summary$color[ind][t.ord],0.4),lwd=lwd.t2)
}
}
}
# draw likelihood trajectory
for(seg in segs ){
ind <- r$cell.summary$seg == seg
t.ord <- order(r$cell.summary$t[ind])
lines(r$cell.summary$t[ind][t.ord],r$fit.summary[gene,rownames(r$cell.summary)][ind][t.ord],
col=r$cell.summary$color[ind][t.ord],lwd=lwd.t1)
}
if (!is.na(switch.point)){
abline(v=switch.point,lty=1,lwd=3,col=adjustcolor("black",0.5))
}
# connect boundary cells from different branches
g <- r$img.list[[1]]
for (seg in segs){
ind <- r$cell.summary$seg==seg
c2.name <- rownames(r$cell.summary[ind,])[which.min(r$cell.summary$t[ind])]
c2 <- r$cell.summary$cell[ind][which.min(r$cell.summary$t[ind])]
c2.seg <- r$cell.summary$seg[ind][which.min(r$cell.summary$t[ind])]
c2.path <- names(shortest_paths(g,r$root,paste("c",c2,sep="") )$vpath[[1]])
c2.path <- c2.path[unlist(lapply(1:length(c2.path),function(i) grepl("c",c2.path[i])))]
c2.path <- as.numeric(unlist(lapply(strsplit(c2.path,"c"),function(x)x[2])))
ind <- r$cell.summary$cell %in% c2.path & r$cell.summary$cell != c2 #& !(r$cell.summary$seg %in% r$cell.summary[c2.name,]$seg)
if (sum(ind)>0){
c1.name <- rownames(r$cell.summary[ind,])[which.max(r$cell.summary$t[ind])]
segments(r$cell.summary[c(c1.name),]$t,r$fit.summary[gene,c(c1.name)],r$cell.summary[c(c2.name),]$t,r$fit.summary[gene,c(c2.name)],
col=r$cell.summary[c2.name,]$color,lwd=lwd.t1)
}
}
}
##' Visualize clusters of genes using heatmap and consensus tree-projected pattern.
##' @param r pptree object
##' @param emb cells embedding
##' @param clust a vector of cluster numbers named by genes
##' @param n.best show n.best the most representative genes on the heatmap for each cluster
##' @param best.method use method to select the most representative genes. Current options: "pca" selects genes with the highest loading on pc1 component reconstructed using genes from a cluster, "cor" selects genes that have the highest average correlation with other genes from a cluster.
##' @param cex.gene size of gene names
##' @param cex.cell size of cells on embedding
##' @param cex.tree width of line of tree on embedding
##' @param reclust whether to reorder cells inside individual clusters on heatmap according to hierarchical clustering using Ward linkage and 1-Pearson as a distance between genes. By default is FALSE.
##' @param subtree visualize clusters for a given subtree
##' @export
visualise.clusters <-function(r,emb,clust=NA,clust.n=5,n.best=4,best.method="cor",cex.gene=1,cex.cell=0.1,cex.tree=2,subtree=NA, reclust=TRUE){
if ( !is.na(clust) & sum(!names(clust)%in%rownames(r$fit.summary))>0) {stop( paste("Expression is not fitted for",sum(!names(clust)%in%rownames(r$fit.summary)),"genes" ))}
if (best.method!="pca" & best.method!="cor") {stop(paste("incorrect best.method option",best.method) )}
tseg <- unlist(lapply( unique(r$cell.summary$seg),function(seg)mean(r$cell.summary$t[r$cell.summary$seg==seg]))); names(tseg) <- unique(r$cell.summary$seg)
tseg <- tseg[as.character(r$cell.summary$seg)]
gns <- rownames(ppt$fit.summary)
if (!is.na(clust)){gns <- names(clust)}
emat <- r$fit.summary[gns,rownames(r$cell.summary)][,order(tseg,r$cell.summary$t)]
emat <- t(apply(emat,1,function(x) (x-mean(x))/sd(x) ))
cols <- r$cell.summary$col[order(tseg,r$cell.summary$t)]
subcells = TRUE; if (!is.na(subtree)){subcells <- r$cell.summary$seg[order(tseg,r$cell.summary$t)]%in%subtree$seg}
# cluster genes if necessary
if (is.na(clust)){
gns <- rownames(emat)#names(clust)[clust==cln]
dst.cor <- 1-cor(t(emat[gns,]))
hcl <- hclust(as.dist(dst.cor),method="ward.D")
clust <- cutree(hcl,clust.n)
}
k <- length(unique(clust))
genes.show <- unlist(lapply(1:k,function(i){
n <- n.best; if ( sum(clust==i) < n) {n <- sum(clust==i)}
if (best.method=="pca"){
pr <- pca(t(emat[clust==i,]),center = TRUE, scale = "uv")
pr.best <- rep(i,n); names(pr.best) <- names(sort(pr@loadings[,1],decreasing = T))[1:n]
return(pr.best)
}else if (best.method=="cor"){
cr <- cor(t(emat[clust==i,]))
cr.best <- rep(i,n); names(cr.best) <- names(sort(apply(cr,1,mean),decreasing = TRUE))[1:n]
return(cr.best)
}
}))
nf <- layout( matrix(unlist(lapply(1:k,function(i) 5*(i-1)+c(1,2,3,1,4,5))),2*k,3, byrow=T),respect = T,width=c(1,1,0.1),heights=rep(c(0.1,1),k) )
#layout.show(nf)
for (cln in 1:k){
# recluster genes inside module if necessary
gns <- names(clust)[clust==cln]
if (reclust==TRUE){
dst.cor <- 1-cor(t(emat[gns,]))
hclust.cor <- hclust(as.dist(dst.cor),method="ward.D")
gns <- gns[hclust.cor$order]
}
# draw cluster-wise pattern
par(mar=c(0.3,0.1,0.0,0.2))
plotppt(r,emb,pattern.cell = apply(emat[clust==cln,],2,mean),cex.main=cex.cell,cex.tree = cex.tree,lwd.tree = 0.1,subtree=subtree)
# draw color-scheme for branches
#par(mar=c(0.0,0.2,0.1,2))
par(mar=c(0.0,0.0,0.0,0))
col.ind <- 1:length(unique(cols)); names(col.ind) = unique(cols)
image( t(rbind( col.ind[cols[subcells]] )),axes=FALSE,col=(unique(cols[subcells])) )
box()
par(mar=c(0.0,0.0,0.0,0))
plot(0.2,0.2,ylim=c(0.05,0.95),xlim=c(0,1),xaxt='n',yaxt='n',pch='',ylab='',xlab='',bty='n')
#par(mar=c(0.2,0.2,0.0,2))
par(mar=c(0.3,0.0,0.0,0))
image( t(emat[gns,subcells]),axes=FALSE,col=colorRampPalette(c("blue","grey80","red"))(n = 60))
#axis( 4, at=seq(0,1,length.out=sum(clust==cln)),col.axis="black", labels=gns,hadj=0.1,xaxt="s",cex.axis=1.5,font = 3,las= 1,tick=FALSE)
box()
gns[! gns %in% names(genes.show)[genes.show==cln] ] <- ""
### calculate coordinates of genes.show with QP
coord <- which( names(clust)[clust==cln] %in% names(genes.show)[genes.show==cln] )/sum(clust==cln)
del <- 1/(sum(genes.show==cln))#0.1
Dmat <- diag(1,length(coord),length(coord))
dvec <- rep(0,length(coord))
Amat <- matrix(0,nrow= 3*length(coord)-1,ncol=length(coord)); bvec = rep(0,3*length(coord)-1)
for (i in 1:(length(coord)-1)){Amat[i,i] <- -1; Amat[i,i+1] <- 1; bvec[i] <- del - (coord[i+1]-coord[i])}
for (i in 1:(length(coord))){j <- i+length(coord)-1; Amat[j,i] <- 1; bvec[j] <- -coord[i]+0 }
for (i in 1:(length(coord))){j <- i+2*length(coord)-1; Amat[j,i] <- -1; bvec[j] <- coord[i]-1}
qp = solve.QP(Dmat, dvec, t(Amat), bvec, meq=0, factorized=FALSE)
coord_new = qp$solution + coord
par(mar=c(0.3,0,0,0))
plot(0.2,0.2,ylim=c(0.0,1),xlim=c(0,1),xaxt='n',yaxt='n',pch='',ylab='',xlab='',bty='n')
axis(side = 4, at = coord_new,lwd=0.0,lwd.ticks=0,font=3,cex.axis=cex.gene,labels=gns[gns!=""],tck=0.0,hadj=0.0,line=-0.9,las=1)
for (i in 1:length(coord)){
arrows( 0,coord[i],1,coord_new[i],length=0.0,lwd=0.7 )
}
###
}
}
##' Determine genes differentially upregulated after bifurcation point
##' @param r pptree object
##' @param mat expression matrix of genes (rows) and cells (columnts)
##' @param root a principal point of fork root
##' @param leaves vector of two principal points of fork leaves
##' @param genes optional set of genes to estimate association with fork
##' @param n.mapping number of probabilistic cell-to-tree projections to use for robustness
##' @param n.mapping.up number of probabilistic cell-to-tree projections to estimate the amount of upregulation relative to progenitor branch
##' @return summary statistics of size effect and p-value of association with bifurcaiton fork.
##' @export
test.fork.genes <- function(r,mat,matw=NULL,root,leaves,genes=rownames(mat),n.mapping=1,n.mapping.up=1,n.cores=parallel::detectCores()/2) {
g <- graph.adjacency(r$B>0,mode="undirected")
vpath = get.shortest.paths(g,root,leaves)
interPP = intersection(vpath$vpath[[1]],vpath$vpath[[2]])
which.max(r$pp.info[interPP,]$time)
vpath = get.shortest.paths(g, r$pp.info[interPP,]$PP[which.max(r$pp.info[interPP,]$time)],leaves)
cat("testing differential expression between branches ..");cat("\n")
gtll <- lapply( 1:n.mapping,function(nm){
cat("mapping ");cat(nm);cat("\n")
cell.info <- r$cell.info[[nm]]
brcells = do.call(rbind,lapply( 1:length(vpath$vpath), function(i){
x=vpath$vpath[[i]]
segs = as.numeric(names(table(r$pp.info[x,]$seg))[table(r$pp.info[x,]$seg)>1])
return(cbind(cell.info[cell.info$seg %in% segs,],i))
}))
# for every gene
gtl <- do.call(rbind,mclapply(genes,function(gene) {
brcells$exp <- mat[gene,rownames(brcells)]
if (is.null(matw)) {brcells$w = 1
}else {brcells$w <- matw[gene,r$cells][as.integer(gsub("c","",brcells$node))]}
# time-based models
m <- mgcv::gam(exp ~ s(t)+s(t,by=as.factor(i))+as.factor(i),data=brcells,familly=gaussian(),weights=brcells$w)
return( c(mean(brcells$exp[brcells$i==1])-mean(brcells$exp[brcells$i==2]) , min(summary(m)$p.pv[2]) ) )
#m <- mgcv::gam(exp ~ s(t)+as.factor(i),data=brcells,familly=gaussian(),weights=brcells$w)
#return( c(mean(brcells$exp[brcells$i==2])-mean(brcells$exp[brcells$i==1]) , min(summary(m)$s.pv[2:3]) ) )
},mc.cores=n.cores,mc.preschedule=T));
colnames(gtl) = c("effect","p"); rownames(gtl) = genes; gtl = as.data.frame(gtl)
return(gtl)
})
effect = do.call(cbind,lapply(gtll,function(gtl) gtl$effect ))
if (length(gtll) > 1) {effect <- apply(effect,1,median)}
pval = do.call(cbind,lapply(gtll,function(gtl) gtl$p ))
if (length(gtll) > 1) {pval <- apply(pval,1,median)}
fdr = do.call(cbind,lapply(gtll,function(gtl) p.adjust(gtl$p,"BH") ))
if (length(gtll) > 1) {fdr <- apply(fdr,1,median)}
st = do.call(cbind,lapply(gtll,function(gtl) gtl$p < 5e-2 ))
if (length(gtll) > 1) {st <- apply(st,1,mean)}
stf = do.call(cbind,lapply(gtll,function(gtl) p.adjust(gtl$p,"BH") < 5e-2 ))
if (length(gtll) > 1) {stf <- apply(stf,1,mean)}
### here add a code that estimates the amount of upregulation relative to progenitor branch.
cat("testing upregulation in derivative relative to progenitor branch ..");cat("\n")
# n.mapping.up
eu <- do.call(cbind,lapply(leaves[1:2],function(leave){
segs = extract.subtree(ppt,c(root,leave))
posit = do.call(rbind,(mclapply(genes,function(gene){
eu <- do.call(rbind,lapply(1:n.mapping.up,function(j){
cells = rownames(r$cell.info[[j]])[r$cell.info[[j]]$seg %in% segs$segs]
ft = lm( mat[gene,cells] ~ r$cell.info[[j]][cells,]$t )
return( c(ft$coefficients[2],summary(ft)$coefficients[2,4] ) )
}))
if (n.mapping.up > 1) {eu <- apply(eu,2,median)}
return(eu)
},mc.cores = n.cores,mc.preschedule = TRUE)))
}))
colnames(eu) <- c("pd1.a","pd1.p","pd2.a","pd2.p")
res <- as.data.frame(cbind(effect = effect, p = pval, fdr = fdr, st = st,stf = stf))
colnames(res) <- c("effect","p","fdr","st","stf")
rownames(res) <- genes
res <- cbind(res,eu)
return(res)
}
##' Assign genes differentially expressed between two post-bifurcation branches
##' @param fork.de statistics on expression differences betwee post-bifurcation branches, return of test.fork.genes
##' @param stf.cut fraction of projections when gene passed fdr < 0.05
##' @param effect.b1 expression differences to call gene as differentially upregulated at branch 1
##' @param effect.b2 expression differences to call gene as differentially upregulated at branch 2
##' @param pd.a minium expression increase at derivative compared to progenitor branches to call gene as branch-specific
##' @param pd.p p-value of expression changes of derivative compared to progenitor branches to call gene as branch-specific
##' @return table fork.de with added column stat, which classfies genes in branch-specifc (1 or 2) and non-branch-specific (0)
##' @export
branch.specific.genes <- function(fork.de,stf.cut = 0.7, effect.b1 = 0.1,effect.b2 = 0.3, pd.a = 0, pd.p = 5e-2){
ind <- fork.de$stf >= stf.cut & fork.de$effect > effect.b1 & fork.de$pd1.a > pd.a & fork.de$pd1.p < pd.p
gns1 <- rownames(fork.de)[ind]
ind <- fork.de$stf >= stf.cut & fork.de$effect < -effect.b2 & fork.de$pd2.a > pd.a & fork.de$pd2.p < pd.p
gns2 <- rownames(fork.de)[ind]
state <- rep(0,nrow(fork.de)); names(state) <- rownames(fork.de)
state[gns1] <- 1
state[gns2] <- 2
return(cbind(fork.de,state))
}
##' Estimate optimum of expression and time of activation
##' @param r ppt.tree object
##' @param mat expression matrix
##' @param root root of progenitor branch of bifurcation
##' @param leaves leaves of derivative branches of bifurcation
##' @param genes genes to estimate parameters
##' @param deriv.cutoff a first passage of derivative through cutoff 'deriv.cutoff' to predict activation timing
##' @param gamma gamma parameter in gam function
##' @param n.mapping results are averaged among n.mapping number of probabilsitic cell projections
##' @param n.cores number of cores to use
##' @return per gene timing of optimum and activation
##' @export
activation.statistics <- function(r,mat,root,leave,genes=rownames(mat),deriv.cutoff = 0.015,gamma=1,n.mapping=1,n.cores=parallel::detectCores()/2){
xx = do.call(rbind,(mclapply(genes,function(gene){
gres <- do.call(rbind,lapply(1:n.mapping,function(i){
segs = extract.subtree(ppt,c(root,leave))
cell.summary <- r$cell.info[[i]]
cells <- rownames(cell.summary)[cell.summary$seg %in% segs$segs]
ft = gam( mat[gene,cells] ~ s(cell.summary[cells,]$t),gamma=gamma)
ord <- order(cell.summary[cells,]$t)
deriv.n <- ft$fitted.values[ord][-1]-ft$fitted.values[ord][-length(ord)]
#deriv.d <- r$cell.summary[cells,]$t[-1]-r$cell.summary[cells,]$t[-length(ord)]
deriv.d <- max(ft$fitted.values[ord]) - min(ft$fitted.values[ord])
deriv <- deriv.n/deriv.d
c(cell.summary[cells,]$t[which.max(ft$fitted.values)],
min(c(cell.summary[cells,]$t[-1][ deriv > deriv.cutoff ],max(cell.summary[cells,]$t))) )
}))
c( median(gres[,1]),median(gres[,2]) )
},mc.cores = n.cores,mc.preschedule = TRUE)))
rownames(xx) <- genes
colnames(xx) <- c("optimum","activation")
return(xx)
}
##' Estimate optimum of expression and time of activation
##' @param r ppt.tree object
##' @param fork.de outcome of test.fork.genes function
##' @param mat expression matrix
##' @param root root of progenitor branch of bifurcation
##' @param leaves leaves of derivative branches of bifurcation
##' @param deriv.cutoff a first passage of derivative through cutoff 'deriv.cutoff' to predict activation timing
##' @param gamma gamma parameter in gam function
##' @param n.mapping results are averaged among n.mapping number of probabilsitic cell projections
##' @param n.cores number of cores to use
##' @return table fork.de with added per gene timing of optimum and activation
##' @export
activation.fork <- function(r,fork.de,mat,root,leaves,deriv.cutoff = 0.015,gamma=1,n.mapping=1,n.cores=parallel::detectCores()/2){
cat("estimate activation patterns .. branch 1"); cat("\n")
gg1 <- rownames(fork.de)[fork.de$state==1]
act1 <- activation.statistics(r,mat,root,leaves[1],genes=gg1,deriv.cutoff = deriv.cutoff,gamma=gamma,n.mapping=n.mapping,n.cores=n.cores)
cat("estimate activation patterns .. branch 2"); cat("\n")
gg2 <- rownames(fork.de)[fork.de$state==2]
act2 <- activation.statistics(r,fpm,root,leaves[2],genes=gg2,deriv.cutoff = deriv.cutoff,gamma=gamma,n.mapping=n.mapping,n.cores=n.cores)
act <- cbind( rep(NA,nrow(fork.de)),rep(NA,nrow(fork.de)) );
rownames(act) <- rownames(fork.de); colnames(act) <- colnames(act1)
act[gg1,] <- act1
act[gg2,] <- act2
return( cbind(fork.de,act) )
}
##' Extract subtree of the tree
##' @param r ppt.tree object
##' @param nodes set tips or internal nodes (bifurcations) to extract subtree
##' @return list of segments comprising a subtree.
##' @export
extract.subtree = function(r,nodes){
g <- graph.adjacency(r$B>0,mode="undirected")
if ( sum(!nodes%in%V(g)) > 0 ) {stop(paste("the following nodes are not in the tree:",nodes[!nodes%in%V(g)],collapse = " ") )}
if ( sum( igraph::degree(g)==2 & (V(g) %in% nodes) ) > 0 ) {stop( paste("the following nodes are nethier terminal nor fork:",nodes[nodes %in% V(g)[V(g)==2] ],collapse=" ") )}
vpath = get.shortest.paths(g,nodes[1],nodes)
v = c()
for (i in 1:length(vpath$vpath)){
v=c(v,unlist(vpath$vpath[[i]]))
}
v=unique(v)
segs = r$pp.info$seg[r$pp.info$PP %in% v]
segs = segs[segs %in% names(table(segs))[table(segs) > 1]]
#v=v[ r$pp.info[v,]$seg %in% unique(segs) ] #list( segs = unique(segs), pp = v )
list( segs = unique(segs) )
}
##' Extract subtree of the tree
##' @param r ppt.tree object
##' @param nodes set tips or internal nodes (bifurcations) to extract subtree
##' @return list of segments comprising a subtree.
##' @export
fork.pt = function(r,root,leaves){
b1 <- extract.subtree(r,c(root,leaves[1]))
b2 <- extract.subtree(r,c(root,leaves[2]))
segs.prog <- intersect(b1$segs,b2$segs)
segs.b1 <- setdiff(b1$segs,segs.prog)
segs.b2 <- setdiff(b2$segs,segs.prog)
time.stat <- c( min(r$pp.info$time[r$pp.info$seg %in% segs.prog]),
max(r$pp.info$time[r$pp.info$seg %in% segs.prog]),
max(r$pp.info$time[r$pp.info$seg %in% segs.b1]),
max(r$pp.info$time[r$pp.info$seg %in% segs.b2])
)
names(time.stat) <- c("root","bifurcation","leave 1","leave 2")
return(time.stat)
}
##' Predict regulatory impact (activity) of transcription factors
##' @param em matrix of expression levels
##' @param motmat matrix of target-TF scores
##' @param perm boolean, do permutations if TRUE.
##' @param n.cores number of cores to use
##' @return matrix of predited TF activities in cells.
##' @export
activity.lasso <- function(em,motmat,perm=FALSE,n.cores=1){
gns <- intersect(rownames(em),rownames(motmat))
# center expression and TF-target scores
em.norm = em[gns,]-apply(em[gns,],1,mean)
motmat.norm <- motmat[gns,]-apply(motmat[gns,],2,mean)
poss <- 1:nrow(em.norm)
if (perm==TRUE) {poss <- sample(1:nrow(em.norm))}
# lasso regression for each cell
cv.lasso = do.call(cbind, mclapply(1:ncol(em.norm),function(i){
cv.lasso <- cv.glmnet( motmat.norm,em.norm[poss,i],alpha=1,intercept=FALSE, standardize=TRUE)#,type.measure='auc')
return( coef(cv.lasso,s=cv.lasso$lambda.min)[2:(ncol(motmat)+1),1] )
},mc.cores = n.cores))
rownames(cv.lasso) = colnames(motmat); colnames(cv.lasso) = colnames(em.norm)
return(cv.lasso)
}
##' Decompose a number by degrees of 2.
##' @param n number
decompose <- function(n){
base.binary = c()
while (n > 0){
x <- as.integer(log2(n))
base.binary <- c(base.binary,x)
n = n - 2^x
}
return(base.binary)
}
|
/R/crestree.functions.R
|
no_license
|
hms-dbmi/crestree
|
R
| false
| false
| 61,222
|
r
|
#' @useDynLib crestree
NULL
##' Sample pptree objects using different seeds
##' @param n.samples a number of seed samplings.
##' @param seeds a vector of seeds to use. Overwrites n.samples.
##' @return a list of pptree objects
##' @export
mppt.tree <- function( ... , n.cores=parallel::detectCores()/2,n.samples=n.cores, seed=NULL,seeds=NULL) {
if(!is.null(seed)) {
set.seed(seed);
}
# sample seeds
if(is.null(seeds)) {
seeds <- round(runif(n.samples,0,.Machine$integer.max))
}
mclapply(seeds,function(i) ppt.tree(..., seed=i),mc.cores=n.cores)
}
##' Sample pptree objects using bootstrap
##' @param X expression matrix of genes (rows) and cells (columns).
##' @param M number of principal points of pptree.
##' @param n.samples a number of seed samplings.
##' @param replace sampling with replacement (logical).
##' @return a list of pptree objects
##' @export
bootstrap.ppt <- function( ..., X, M=ncol(X),n.cores=parallel::detectCores()/2,n.samples=n.cores, seed=NULL,replace=T) {
if(!is.null(seed)) {
set.seed(seed);
}
parallel::mclapply(1:n.samples,function(i) {
# take a bootstrap sample
b.X <- X[,sample(1:ncol(X),M,replace=replace)];
ppt.tree(..., X=b.X, M=M, init=b.X)
},mc.cores=n.cores)
}
##' Calculate weighted pairwise correlations between columns of matrices A and B
##' @export
cor.mat <- function(A,B){
A1 <- t(t(A)-colMeans(A))
B1 <- t(t(B)-colMeans(B))
res <- (crossprod(A1,B1))/sqrt( tcrossprod(colSums(A1^2),(colSums(B1^2))) )
return(res)
}
##' Calculate pairwise euclidean distances between columns of matrices A and B
euclidean.mat <- function(A,B){
x <- do.call(cbind,rep(list(colSums(A^2)),ncol(B)))
y <- do.call(rbind,rep(list(colSums(B^2)),ncol(A)))
suppressWarnings(res <- sqrt(x + y - 2*crossprod(A,B)))
res[is.na(res) | is.nan(res)] <- 0
return(res)
}
##' calculate weighted correlation between columns of a matrix and a given vector
wcr <- function(X,y,w){
w <- w/sum(w)
X1 <- X*w
y1 <- y*w
X2 <- t(t(X)-colSums(X1))
y2 <- y - sum(y1)
cv1 <- (y2*w)%*%X2
cv2 <- sqrt(colSums(X2^2*w)*sum(y2^2*w))
cvv <- cv1/cv2
return(cvv[1,])
}
##' Reconstruction of the tree
##'
##' Using SimplePPT approach to model principal tree (pptree) of the data
##' @name ppt.tree
##' @param X gene (row) vs cell (columns) expression matrix
##' @param emb embdedding to visalize cells and principal tree together
##' @param M number of principal points to use (more than zero, no more than number of cells)
##' @param init matrix of initial gene coordinates of principal points
##' @param plot plot or not intermediate trees
##' @param lambda penalty for the tree length, as used in SimplePPT
##' @param sigma parameter as used in SimplePPT
##' @param seed used to make initial assignment of principal points to a subset of cells
##' @param n.steps number of iteraions
##' @param metrics metrics used to calculated distances between cells or principal points. "euclidean" or "cosine"
##' @param p.power if cosine metrics used, option p.power allows to use (1-cor)^p.power (p.power=1 by default)
##' @param err.cut stop algorithm if proximity of principal points between iterations less than err.cut
##' @return pptree object
##' @export
ppt.tree <- function(X,W=NA,emb=NA,M,init=NULL,plot=TRUE,output=TRUE,lambda=1e1,sigma=0.1,seed=NULL,n.steps=50,err.cut = 5e-2,metrics="cosine",p.power=1,knn=NULL,...) {
if ( metrics!="euclidean" & metrics!="cosine" ){ stop("metrics paramterer is nethier 'euclidean' nor 'cosine'") }
if ( M < 0 | M > ncol(X)) { stop("M should be more than zero and less or equal than the number of cells") }
if (!is.na(emb)){
if ( sum(!colnames(X)%in%rownames(emb))>0 ) { stop("column names of gene expression matrix (X) are not consistent with row names of embedding (emb)") }
}
X <- as.matrix(X)
wt <- TRUE
if (is.na(W)) {
wt <- FALSE
W <- matrix(1,nrow=nrow(X),ncol=ncol(X))
}else{
W <- as.matrix(W[rownames(X),colnames(X)])
}
if(is.null(init)){
if(!is.null(seed)){
set.seed(seed);
}
F.mat <- X[,sample(1:ncol(X),M)]; rownames(F.mat) <- NULL; colnames(F.mat) <- NULL;
} else {
F.mat <- init;
}
# row-normalize W
rwm <- matrix(rowSums(W),nrow=nrow(F.mat),ncol(F.mat))
W <- W/rowSums(W)*ncol(W);
# repeat untile convergence
j=1; err=100;
while(j <= n.steps & err > err.cut) {
# calculate R
if (metrics=="euclidean"){
# simple correlation or column-wise weighted correlation.
if (wt==FALSE) {
R <- euclidean.mat(F.mat,X)^p.power
}else{
R <- do.call(cbind,lapply(1:ncol(X),function(i) {
sqrt(colSums(((F.mat-X[,i])^2)*W[,i]))^p.power
}))
}
R <- t(exp(-R/sigma))
}else if(metrics=="cosine"){
# simple correlation or column-wise weighted correlation.
if (wt==FALSE) {
cordist <- (1-cor.mat(F.mat,X))^p.power
}else{
cordist <- do.call(cbind,lapply(1:ncol(X),function(i) {
(1-matWVCorr(F.mat,X[,i],W[,i]))^p.power
#(1-wcr(F.mat,X[,i],W[,i]))^p.power
}))
colnames(cordist) <- colnames(X)
}
cordist <- (cordist-mean(cordist))
R <- t(exp( -(cordist)/sigma ))
}
R[is.na(R) | is.nan(R)] <- 0
if (!is.null(knn)){
R = apply(R,2,function(x){
x[ x < sort(x,decreasing = TRUE)[knn] ] <- 0
x
})
}
R <- R/rowSums(R)
R[is.na(R) | is.nan(R)] <- 0
# calculate distance between principal points
if (metrics=="euclidean"){
d <- euclidean.mat(F.mat,F.mat)
}else if (metrics=="cosine"){
if (wt==FALSE) {
d <- 1-cor.mat(F.mat,F.mat)
}
else{
d <- do.call(cbind,lapply(1:ncol(F.mat),function(i) {
(1-matWVCorr(F.mat,F.mat[,i],rwm[,i]))^p.power
#(1-wcr(F.mat,F.mat[,i],rwm[,i]))^p.power
}))
}
d <- abs(d)^p.power*sign(d)
}
bt <- minimum.spanning.tree(graph.adjacency(as.matrix(d),weighted=T,mode="undirected"))
B <- as.matrix(get.adjacency(bt))
D <- diag(nrow(B))*rowSums(B)
L <- D-B
M <- L*lambda + diag(ncol(R))*colSums(R)
old.F <- F.mat;
#F.mat <- (X%*%R) %*% chol2inv(chol(M))
F.mat <- t(solve( t(M),t((X*W)%*%R) ))# slightly faster, 15%
F.mat <- as.matrix(F.mat)
if (plot==TRUE){plotppt(list(F=F.mat,B=B,R=R,L=L,lambda=lambda,sigma=sigma),emb,...)}
if (output==TRUE){
cat(j,":")
cat("\n")
err = max(sqrt(colSums(F.mat-old.F)^2)/apply(F.mat,2,function(x)sqrt(sum(x^2))))
cat(err,"\n")
}
j=j+1
}
if (plot==TRUE){plotppt(list(F=F.mat,B=B,R=R,L=L,lambda=lambda,sigma=sigma),emb,...)}
g = graph.adjacency(B,mode="undirected");tips = V(g)[igraph::degree(g)==1];forks = V(g)[igraph::degree(g)>2]
score = c( sum( t(1-cor.mat(F.mat,X))*R)/nrow(R), sigma/nrow(R)*sum(R*log(R),na.rm=T),lambda/2*sum(d*B))
colnames(R) <- colnames(F.mat) <- rownames(B) <- colnames(B) <- as.character(1:nrow(B))
invisible(list(score=score,F=F.mat,B=B,R=R,L=L,DT=d,lambda=lambda,sigma=sigma,n.steps=n.steps,metrics=metrics,M=M,cells=vi,tips=tips,forks=forks))
}
##' Estimate optimal sigma parameter.
##'
##' Using cross-validation criteria to select sigma parameter.
##' @param X gene (rows) vs cell (columns) expression matrix
##' @param M number of principal points in pptree modeling
##' @param n.sample number of sampled trees per each sigma
##' @param sig.lims a vector of sigma for which cross-validation estimated
##' @param metrics similarity measure. "cosine" or "euclidean"
##' @return optimal sigma parameter
##' @export
sig.explore <- function(X,W=NA,M=as.integer(ncol(X)/2),n.sample=1,sig.lims=seq(0.01,0.2,0.03),metrics="cosine",p.power = 1,plot=TRUE,err.cut=5e-1,n.steps=20,n.cores=1){
if (is.na(X)) {stop("matrix X should be specified")}
if (is.na(M)) {stop("number of principal points M should be specified")}
cells <- colnames(X)
for (i in 1:n.sample){
cv <- do.call(rbind,mclapply(sig.lims,function(sig){
x <- ppt.tree(X = X,W,M=M,err.cut=err.cut,metrics=metrics,n.steps=n.steps,p.power = p.power,lambda=0,sigma=sig,plot=FALSE,output=FALSE,seed=sample(100,1))
y <- cor(X,x$F)
apply(y,1,max)
},mc.cores = n.cores))
if (i==1){
cv.tot <- cv
}
else{
cv.tot <- cv.tot + cv
}
}
cv.tot <- cv.tot/n.sample
sig.opt <- sig.lims[which.max(apply(cv.tot,1,mean))]
if (plot==TRUE){
par(mfrow=c(1,1),mar=c(5,5,1,1))
plot( sig.lims, apply(cv.tot,1,mean),lty=2,lwd=2,type="l",xlab="sigma",ylab="CV",cex.lab=1.5)
points( sig.lims, apply(cv.tot,1,mean),pch=19,cex=1)
abline(v=sig.opt,col="red",lty=2)
}
#return( cbind(sig.lims,apply(cv.tot,1,mean)) )
return(sig.opt)
}
##' Explore lambda
##'
##' Explores multiple lambda and choose the optimal
##' @param X gene (rows) vs cell (columns) expression matrix
##' @param M number of principal points in pptree modeling
##' @param sigma fixed parameter sigma used in pptree modeling
##' @param emb embdedding to visalize cells and principal tree together. If emb is given than pptrees for a range of lambda are shown
##' @export
lambda.explore <- function(X=NA,M=ncol(X),sigma=0.1,emb=NA,metrics="cosine",tips.min=2,tips.max=10,base=2,lambda.init=100,err.cut=5e-3,n.steps=40,p.power=1){
if (is.na(X)) {stop("matrix X should be specified")}
if (is.na(M)) {stop("number of principal points M should be specified")}
cells <- colnames(X)
min.reached <- FALSE;max.reached <- FALSE
lambda <- round(lambda.init)
tr.list <- list()
while (min.reached==FALSE | max.reached==FALSE){
print(paste("lambda:",round(lambda,2) ))
tr <- ppt.tree(X=X,M=M,lambda=lambda,sigma=sig,err.cut=err.cut,metrics=metrics,n.steps=n.steps,p.power = p.power,plot=FALSE,output=FALSE,seed=sample(100,1))
tr <- setroot(tr,root=as.character(tr$tips[1]))
tr.list[[as.character(round(lambda,1))]] <- tr#c(tr.list,tr)
tips <- length(tr$tips);
len <- sum(tr$pp.segments$d)
entropy.ind <- sum(tr$pp.segments$d*log(tr$pp.segments$d))
# add entry to the lambda.info matrix
if (lambda == lambda.init){
lambda.info <- matrix(c(lambda=lambda,tips=tips,length=len,entropy=entropy.ind),nrow=1,ncol=4)
#tr.list[[as.character(lambda)]] <- tr
}else{
if (lambda < lambda.info[1,1]){
lambda.info <- rbind(c(lambda=lambda,tips=tips,length=len,entropy=entropy.ind),lambda.info)
#tr.list[[as.character(lambda)]] <- tr#c(tr,tr.list)
}else{
lambda.info <- rbind(lambda.info,c(lambda=lambda,tips=tips,length=len,entropy=entropy.ind))
#tr.list[[as.character(lambda)]] <- #c(tr.list,tr)
}
}
# update lambda
if (min.reached == FALSE & tips < tips.max){
lambda <- lambda/base
}else if (min.reached == FALSE & tips >= tips.max){
min.reached <- TRUE
lambda <- lambda.info[nrow(lambda.info),1]*base
}else if (tips <= tips.min ){# | tips >= lambda.info[nrow(lambda.info)-1,2]){
max.reached <- TRUE
}else{
lambda <- lambda.info[nrow(lambda.info),1]*base
}
}
ent.per.tip <- lambda.info[,4]/lambda.info[,2]
i.opt <- which.min(ent.per.tip)
if (!is.na(emb)){
par(mfrow=c(2,2))
par(mar=c(5,5,1,1))
plot( lambda.info[,1], ent.per.tip,log="x",lty=2,lwd=2,type="l",xlab="lambda",ylab="entropy per tip",cex.lab=1.5)
points(lambda.info[,1], ent.per.tip,pch=19,cex=1)
abline(v=lambda.info[i.opt,1],col="red",lty=2)
par(mar=rep(1,4))
lamb <- lambda.info[i.opt,1]; lamb <- round(lamb,1)
plotppt(tr.list[[as.character(lamb)]],emb,cex.tree = 0.1,lwd.tree = 3,main=paste("lambda =",lamb))
box(col="red",lwd=3);
lamb <- lambda.info[median(1:i.opt),1]; lamb <- round(lamb,1)
plotppt(tr.list[[as.character(lamb)]],emb,cex.tree = 0.1,lwd.tree = 3,main=paste("lambda =",lamb))
lamb <- lambda.info[median((i.opt+1):nrow(lambda.info)),1]; lamb <- round(lamb,1)
plotppt(tr.list[[as.character(lamb)]],emb,cex.tree = 0.1,lwd.tree = 3,main=paste("lambda =",lamb))
}
return(lambda.info)
#return(list(lambda.info[i.opt,1],lambda.info))
}
##' Visualize pptree onto embedding
##'
##' Projects pptree onto embedding (e.g. tSNE)
##' @name plotppt
##' @param r - pptree object
##' @param emb - (x,y) coordinates data frame (e.g Rtsne $Y result)
##' @param F - coordinates of principal points (optional)
##' @param gene - a gene to show expression of (optional)
##' @param mat - gene vs cell expression matrix (needed if option 'gene' is activated)
##' @param pattern.cell - numeric profile of a quantity for each cell (e.g. expression of a gene or cell cycle stage)
##' @param pattern.tree - numeric profile of a quantity for each principal point (e.g. expression of a gene or cell cycle stage)
##' @param cex.main - cex of points
##' @param cex.col - color of points
##' @param cex.title - cex of title
##' @param cex.tree - cex of principal points
##' @param tips - logical, to draw indecies of tips of the tree. Usefull before usage of cleanup.branches()
##' @export
plotppt <- function(r,emb,F=NULL, gene=NULL, main=gene, mat=NULL, pattern.cell=NULL, pattern.tree=NULL,
cex.col=NA, tree.col = NULL,
cex.main=0.5, cex.title=1,
cex.tree=1.5,lwd.tree=1,par=TRUE,tips=FALSE,forks=FALSE,subtree=NA,pallete=NULL,...) {
if ( sum(!rownames(r$R)%in%rownames(emb))>0 ) { stop("cell names used for tree reconstruction are not consistent with row names of embedding (emb)") }
if (sum(!is.na(cex.col))==0 ) {cex.col=rep("grey70",nrow(emb)); names(cex.col) <- rownames(emb)}
vi = rownames(emb)%in%rownames(r$R); names(vi) <- rownames(emb)
if(is.null(F)) { F <- t(t(t(emb[rownames(r$R),])%*%r$R)/colSums(r$R)) }
if ( is.null(pattern.cell) & !is.null(gene) ){
if (is.null(mat)) { stop("mat expression matrix should be defined together with gene parameter") }
if (gene %in% rownames(mat) == FALSE) { stop("gene is not in mat matrix") }
if ( sum(!rownames(r$R) %in% colnames(mat)) > 0 ) { stop("cell names used for tree reconstruction are not consistent with mat column names") }
pattern.cell = mat[gene,rownames(r$R)]#mat[gene,rownames(r$R)]
}
if (is.null(pallete)) {pallete <- colorRampPalette(c("blue","gray50","red"))(1024)}else{pallete <- pallete(1024)}
if ( !is.null(pattern.tree) & length(pattern.tree) != ncol(r$R) ) { stop("length of pattern.tree vector is inconsistent with cell number used for tree reconstruction") }
if ( !is.null(pattern.cell) & is.null(pattern.tree) ){
if ( sum(!names(pattern.cell) %in% rownames(r$R)) > 0 ){ stop("pattern.cell vector should contain names for all cells used to reconstruct the tree")}
pattern.cell <- pattern.cell[rownames(r$R)] ## is it correct?
aggr <- colSums(r$R)
pattern.tree <- t(r$R)%*%pattern.cell[rownames(r$R)]/aggr
pattern.tree[aggr==0] <- NA
}
if (is.null(tree.col)) {tree.col = "black"}
if( !is.null(pattern.cell) ){
cex.col <- rep("black",nrow(emb)); names(cex.col) <- rownames(emb)
cex.col[names(pattern.cell)] <- pallete[round((pattern.cell-min(pattern.cell))/diff(range(pattern.cell))*1023)+1]
#cex.col <- colorRampPalette(c("blue","gray50","red"))(1024)[round((pattern.cell-min(pattern.cell))/diff(range(pattern.cell))*1023)+1]
}
if ( !is.null(pattern.tree) ){
tree.col <- pallete[round((pattern.tree-min(pattern.tree,na.rm=T))/diff(range(pattern.tree,na.rm = T))*1023)+1]
#r$fitting$pp.fitted[gene,]
}
if (!is.na(subtree)){
#cex.col[rownames(r$cell.summary)][!r$cell.summary$seg %in% subtree$seg] <- "black"
tree.col[!r$pp.info$seg %in% subtree$seg] <- "grey80"
vi[vi==TRUE][rownames(r$cell.summary)][!r$cell.summary$seg %in% subtree$seg] <- FALSE
}
if ( sum(names(cex.col)%in%rownames(emb))==0 ) {stop('cex.col names do not match row names of emb')}
cols <- rep("black",nrow(emb)); names(cols) <- rownames(emb)
cols[ intersect(names(cex.col),rownames(emb)) ] <- cex.col[intersect(names(cex.col),rownames(emb))]
if (par==TRUE) {par(mar=rep(1,4))}
plot(emb,pch=ifelse(vi,19,1),cex=cex.main,col = adjustcolor(cols,ifelse(is.null(pattern.tree),1,0.1)),xlab=NA,ylab=NA,xaxt='n',yaxt='n',main=main,cex.main=cex.title,font.main=1)
al <- get.edgelist(graph.adjacency(r$B>0))
al <- matrix(as.integer(al),ncol=2)
segments(F[1,al[,1]],F[2,al[,1]],F[1,al[,2]],F[2,al[,2]],lwd=lwd.tree)
points(t(F),pch=21,
col=tree.col,bg=tree.col,cex=cex.tree)
if (tips==TRUE){
coord = do.call(rbind,lapply(r$tips,function(tip){
x1 = F[1,tip]; y1 = F[2,tip]
x2 = F[1,which(r$B[tip,]>0)]; y2 = F[2,which(r$B[tip,]>0)]
xnew = x1 + 1.5*sign(x1-x2)#(1+sign(x1-x2)/0.5)*sign(x1-x2)#alpha*(x1-x2)
ynew = y1 + 1.5*sign(y1-y2)#xnew*(y2-y1)/(x2-x1) + (y1*x2-y2*x1)/(x2-x1)
c(xnew,ynew)
}))
text((coord),col=1,cex=1,adj=c(0,0),labels=r$tips,font=2);#text(t(F[, r$tips ]),col=1,cex=1.2,adj=c(0,0),labels=r$tips);
}
if (forks==TRUE & length(r$forks) > 0){
coord = do.call(rbind,lapply(r$forks,function(fork){
x1 = F[1,fork]; y1 = F[2,fork]
x2 = F[1,which(r$B[fork,]>0)]; y2 = F[2,which(r$B[fork,]>0)]
xnew = x1 #+ 1.5*sign(x1-x2)#(1+sign(x1-x2)/0.5)*sign(x1-x2)#alpha*(x1-x2)
ynew = y1 #+ 1.5*sign(y1-y2)#xnew*(y2-y1)/(x2-x1) + (y1*x2-y2*x1)/(x2-x1)
c(xnew,ynew)
}))
text((coord),col=1,cex=1,adj=c(0,0),labels=r$forks,font=2);#text(t(F[, r$tips ]),col=1,cex=1.2,adj=c(0,0),labels=r$tips);
}
#legend(x="bottomright",legend=c(paste("lambda=",r$lambda[1],sep=""),paste("sigma=",r$sigma[1],sep="")))
}
##' Visualize list of pptree objects onto embedding
##'
##' Projects pptree objects onto embedding (e.g. tSNE)
##' @param rl list of pptree objects (as calculated using bootstrap.tree or mppt.tree)
##' @param emb (x,y) coordinates data frame (e.g Rtsne $Y result)
##' @param cols vector of colors for cells in emb.
##' @export
plotpptl <- function(rl,emb, cols=adjustcolor(1,alpha=0.3),alpha=1, lwd =1, ...) {
par(mfrow=c(1,1), mar = c(3.5,3.5,2.0,0.5), mgp = c(2,0.65,0), cex = 0.8);
plot(emb,col=cols,cex=1,pch=19,xlab="",ylab="", ...)
lapply(rl,function(r) {
F <- t(t(t(emb[rownames(r$R),])%*%r$R)/colSums(r$R))
al <- get.edgelist(graph.adjacency(r$B>0))
al <- matrix(as.integer(al),ncol=2)
#points( t(F),col=adjustcolor(cols,alpha=0.1),lwd=1,cex=0.2 )
segments(F[1,al[,1]],F[2,al[,1]],F[1,al[,2]],F[2,al[,2]],lwd=lwd,col=adjustcolor("black",alpha))
})
#legend(x="bottomright",legend=c(paste("lambda=",rl[[1]]$lambda[1],sep=""),paste("sigma=",rl[[1]]$sigma[1],sep="")))
}
##' Remove spurious branches of pptree
##' @param r ppt.tree result
##' @param tips.number select and retain only fixed number of tips (tips.number) that explain the most cell-cell variation.
##' @param tips.remove vector of tips indices to remove
##' @param min.branch.length remove all branches with length less or equal than min.branch.length principal points
##' @return modified ppt.tree object with cleaned up structure
##' @export
cleanup.branches <- function(r,tips.remove=NULL,min.branch.length=3) {
#colnames(r$F) <- NULL; colnames(r$B) <- rownames(r$B) <- NULL;
repeat {
g <- graph.adjacency(r$B>0,mode="undirected")
leaves <- V(g)[igraph::degree(g)==1]
branches <- V(g)[igraph::degree(g)>2]
bd <-shortest.paths(g,v=leaves,to=branches)
ivi <- which(apply(bd,1,min)<=min.branch.length)
ivi <- unique( c(ivi, which( leaves %in% tips.remove) ) )
if(length(ivi)==0) { break }
toremove <- c();
for(x in ivi) {
bdp <- get.shortest.paths(g,leaves[x],to=branches[which.min(bd[x,])])
toremove <- c(toremove,bdp$vpath[[1]][-length(bdp$vpath[[1]])])
}
# remove from the graph (B)
r$B <- r$B[-toremove,-toremove]
# remove from F
r$F <- r$F[,-toremove];
# remove from lRu
r$lRu <- r$lRu[,-toremove]
# remove from R and renormalize
r$R <- r$R[,-toremove];
r$R <- r$R/rowSums(r$R);
}
colnames(r$F) <- colnames(r$B) <- rownames(r$B) <- as.character(1:nrow(r$B));
g = graph.adjacency(r$B,mode="undirected");r$tips = V(g)[igraph::degree(g)==1];r$forks = V(g)[igraph::degree(g)>2]
r
}
##' Orient the tree by setting up the root
##'
##' Assign root, pseudotime and segment to each principal point of the tree
##' @param r pptree object
##' @param root root principal point (plotppt(tips=TRUE,..) can be used to visualize candidate tips for a root)
##' @return modified ppt.tree object with new fields r$pp.info (estimated pseudotime and branch of principal points), r$pp.segments (segments information), r$root (root id).
##' @export
setroot <- function(r,root=NULL,plot=TRUE) {
if (is.null(root)) { stop("Assign correct root number") }
if ( ! root %in% r$tips ) {stop("Root should be one of the tree tips")}
# calculate time of each PP
if (r$metrics=="euclidean"){d <- 1e-6+euclidean.mat(r$F,r$F)
}else if (r$metrics=="cosine"){
d <- abs( 1e-2 + 1-cor.mat(r$F,r$F))
}
g <- graph.adjacency(r$B*d,weighted=T,mode="undirected")
pp.info <- data.frame( cbind( V(g),as.double(shortest.paths(g,root,V(g))),rep(0,length(V(g))) ));
colnames(pp.info)=c("PP","time","seg")
# infer all segments (and put in segs) of the tree
nodes <- V(g)[ igraph::degree(g)!=2 ]
pp.segs = data.frame(n=numeric(),from=character(),to=character(),d=numeric())
for (i in 1:(length(nodes)-1) ){
for (j in (i+1):length(nodes)){
node1 = nodes[i];node2=nodes[j];
path12 = unlist(get.shortest.paths(g,from=as.character(node1),to=as.character(node2)))
if ( sum(nodes %in% path12) == 2 ) {
from = node1$name;to=node2$name
if ( !is.null(root)){
path_root = shortest.paths(g,root,c(node1,node2))
from = colnames(path_root)[which.min(path_root)]
to = colnames(path_root)[which.max(path_root)]
}
pp.info[path12,]$seg = nrow(pp.segs)+1
pp.segs=rbind(pp.segs,data.frame(n=nrow(pp.segs)+1,from=from,to=to,d=shortest.paths(g,as.character(node1),as.character(node2))[1]))
}}}
pp.segs$color=rainbow(nrow(pp.segs))
pp.info$color=pp.segs$color[pp.info$seg]
r$pp.segments <- pp.segs;
r$root <- root;
r$pp.info <- pp.info
r
}
##' Project cells onto the principal tree
##' @param r pptree object
##' @param emb if not NULL than cell branch assignment and color code of branches are shown
##' @param n.mapping number of probabilistic mapping of cells onto the tree to use. If n.mapping=1 then likelihood cell mapping is used.
##' @return modified pptree object with new fields r$cell.summary, r$cell.info and r$img.list. r$cell.summary contains information about cells projected onto the tree, including pseudotime and branch.
##' @export
project.cells.onto.ppt <- function(r,emb=NULL,n.mapping=1) {
if (is.null(r$root)) { stop("Assign root first") }
g <- graph.adjacency(r$B,weighted=TRUE,mode="undirected")
df.list <- pblapply(1:n.mapping,function(nm){
#print(paste("mapping",nm))
# assign nearest principal point for each cell
if (nm > 1){
rrm = apply(r$R,1,function(v){sample(1:length(v),size=1,prob=v/sum(v))})
}else{
rrm <- apply(r$R,1,which.max)
}
# idenfity edge onto which each cell lies
df <- do.call(rbind,lapply(1:ncol(r$R),function(v) {
vcells <- which(rrm==v);
if(length(vcells)>0) {
# determine which edge the cells belong to neighboring PPs
nv <- as.integer(neighborhood(g,1,nodes=c(v))[[1]])
nvd <- shortest.paths(g,v,nv)
spi <- apply(r$R[vcells,nv[-1],drop=FALSE],1,which.max)+1
ndf <- data.frame(cell=vcells,v0=v,v1=nv[spi],d=nvd[spi])
p0 <- r$R[vcells,v]
p1 <- unlist(lapply(1:length(vcells),function(i) r$R[vcells[i],ndf$v1[i]] ))
alpha <- runif(length(vcells))
f <- abs( (sqrt(alpha*p1^2+(1-alpha)*p0^2)-p0)/(p1-p0) )
ndf$t <- r$pp.info[ndf$v0,]$time+(r$pp.info[ndf$v1,]$time-r$pp.info[ndf$v0,]$time)*alpha
ndf$seg <- ifelse( r$pp.info[ndf$v0,]$PP %in% r$forks,r$pp.info[ndf$v1,]$seg,r$pp.info[ndf$v0,]$seg)
ndf$color <- ifelse( r$pp.info[ndf$v0,]$PP %in% r$forks,r$pp.info[ndf$v1,]$color,r$pp.info[ndf$v0,]$color)
ndf
} else {
return(NULL);
}
}))
df$edge <- apply(df,1,function(x) paste(sort(as.numeric(x[c(2,3)])),collapse="|"))
df <- df[order(df$t,decreasing=FALSE),]
### assign data from ndf table of z.ensemble1
#ndf <- z.ensemble1[[nm]]$ndf[,1:5]
#ndf[,6:8] <- z.ensemble1[[nm]]$cell.pseudotime[match(z.ensemble1[[nm]]$ndf$cell,z.ensemble1[[nm]]$cell.pseudotime$cell),2:4]
#colnames(ndf)[6] <- "t"
#rownames(ndf) <- nc.cells[ndf$cell]
#df <- ndf
#df <- df[order(df$t,decreasing=FALSE),]
return(df)
})
# generate graph of cells and PPs for each mapping
img.list <- pblapply(df.list,function(df){
img <- g#graph.adjacency(r$B,weighted=TRUE,mode="undirected")
img <- set.vertex.attribute(img,"type",value="pp")
for(e in unique(df$edge)){
ii <- which(df$edge==e);
vc <- as.integer(strsplit(e,'\\|')[[1]]);
imin <- which.min(r$pp.info$time[vc])
#print(imin)
#imin <- 1
#print(c(imin,3-imin))
# insert the cells
if (imin==1){
img <- add_vertices(img,length(ii),type="cell",name=paste('c',df[ii,]$cell,sep=''))
}else{
img <- add_vertices(img,length(ii),type="cell",name=paste('c',rev(df[ii,]$cell),sep=''))
}
tw <- 1-E(g,path=c(vc[1],vc[2]))$weight
img <- delete_edges(img,e)
if (imin==1){
img <- add_edges(img,c(vc[1],rep(paste0('c',df$cell[ii]),each=2),vc[2]), weight=1-tw*diff(c(0,df$t[ii],1)) )
}else{
img <- add_edges(img,c(vc[1],rep(paste0('c',rev(df$cell[ii])),each=2),vc[2]), weight=1-tw*diff(c(0,df$t[ii],1)) )
}
}
return(img)
})
if (n.mapping > 1) {
df.sd <- apply(do.call(cbind,lapply(df.list,function(el)el[rownames(r$R),]$t)),1,sd)
}else {df.sd <- NA}
df.summary <- cbind(df.list[[1]],t.sd=df.sd)
if (!is.null(emb)){
cols <- adjustcolor(df.summary[rownames(r$R),]$color,0.2); names(cols) <- rownames(r$R)
plotppt(r,emb,cex.col=cols, tree.col = r$pp.info$color,cex.main=0.5, cex.title=1,cex.tree=1,lwd.tree=1)
}
r$cell.summary <- df.summary
r$cell.info <- df.list
r$img.list <- img.list
#r$mg <- mg;
return(invisible(r))
}
##' Determine a set of genes significantly associated with the tree
##' @param r pptree object
##' @param X expressinon matrix of genes (row) vs cells (column)
##' @param fdr.cut FDR (Benjamini-Hochberg adjustment) cutoff on significance; significance if FDR < fdr.cut
##' @param A.cut cmplitude cutoff on significance; significance if A > A.cut
##' @param st.cut cutoff on stability (fraction of mappings with significant (fdr,A) pair) of association; significance, significance if A > A.cut
##' @param summary show plot of amplitude vs FDR of each gene's association. By default FALSE.
##' @param subtree restrict statistical assesment to a subtree
##' @param fdr.method a method to adjust for multiple testing. Default - Bonferroni. Alternatively, "BH" can be used.
##' @return modified pptree object with a new field r$stat.association that includes pvalue, amplitude, fdr, stability and siginificane (TRUE/FALSE) of gene associations
##' @export
test.associated.genes <- function(r,X,n.map=1,n.cores=(parallel::detectCores()/2),spline.df=3,fdr.cut=1e-4,A.cut=1,st.cut=0.8,summary=FALSE,subtree=NA,fdr.method=NULL, ...) {
if (is.null(r$root)) {stop("assign root first")}
if (is.null(r$cell.summary) | is.null(r$cell.info)) {stop("project cells onto the tree first")}
X <- X[,intersect(colnames(X),rownames(r$cell.summary))]
if (sum(!colnames(X) %in% rownames(r$cell.summary)) > 0) {stop( paste("Expression matrix X contains cells not mapped onto the tree, e.g. cell",colnames(X)[!colnames(X) %in% rownames(r$cell.summary)][1]) )}
if (n.map < 0 | n.map > length(r$cell.info)) {stop("n.map should be more than 0 and less than number of mappings")}
genes <- rownames(X)
subseg <- unique(r$cell.summary$seg);
if (!is.na(subtree)) {subseg <- subtree$segs}
# for every gene
gtl <- lapply(1:n.map,function(ix){
print(paste("mapping",ix,"of",n.map))
if (n.map==1){ inf <- r$cell.summary}else{
inf <- r$cell.info[[ix]]
}
gt <- do.call(rbind,mclapply(genes,function(gene) {
#sdf <- inf; sdf$exp <- X[gene,rownames(inf)]
sdf <- inf[inf$seg%in%subseg,]; sdf$exp <- X[gene,rownames(sdf)]#[inf$seg%in%subseg]
# time-based models
mdl <- tapply(1:nrow(sdf),as.factor(sdf$seg),function(ii) {
# TODO: adjust df according to branch length?
m <- mgcv::gam(exp~s(t,k=spline.df),data=sdf[ii,],familly=gaussian())
rl <- list(d=deviance(m),df=df.residual(m))
rl$p <- predict(m);
return(rl)
})
mdf <- data.frame(do.call(rbind,lapply(mdl,function(x) c(d=x$d,df=x$df))))
# background model
odf <- sum(mdf$df)-nrow(mdf); # correct for multiple segments
m0 <- mgcv::gam(exp~1,data=sdf,familly=gaussian())
if (sum(mdf$d)==0){ fstat <- 0}else{
fstat <- (deviance(m0) - sum(mdf$d))/(df.residual(m0)-odf)/(sum(mdf$d)/odf)
}
pval <- pf(fstat,df.residual(m0)-odf,odf,lower.tail = FALSE);#1-pf(fstat,df.residual(m0)-odf,odf,lower.tail = T);
pr <- unlist(lapply(mdl,function(x) x$p))
return(c(pval=pval,A=max(pr)-min(pr)))
},mc.cores=n.cores,mc.preschedule=T))
gt <- data.frame(gt); rownames(gt) <- genes
if (is.null(fdr.method)) {
gt$fdr <- p.adjust(gt$pval)
}else{
gt$fdr <- p.adjust(gt$pval,method=fdr.method)
}
gt
})
stat.association <- data.frame(cbind( apply(do.call(cbind,lapply(gtl,function(gt)gt$pval)),1,median),
apply(do.call(cbind,lapply(gtl,function(gt)gt$A)),1,median),
apply(do.call(cbind,lapply(gtl,function(gt)gt$fdr)),1,median),
apply(do.call(cbind,lapply(gtl,function(gt) gt$fdr < fdr.cut & gt$A > A.cut )),1,sum)/length(gtl)
))
rownames(stat.association) <- genes; colnames(stat.association) <- c("pval","A","fdr","st")
stat.association$sign <- stat.association$fdr < fdr.cut & stat.association$A > A.cut & stat.association$st > st.cut
# plot amplitude vs FDR and color genes that were idenfitied as significantly associated with the tree
if (summary==TRUE){
par(mfrow=c(1,1),mar=c(4.5,4.5,1,1))
plot(stat.association$A,stat.association$fdr,xlab="Amplitude",ylab="FDR, log",log="y",pch=19,cex=0.5,
col=adjustcolor( ifelse(stat.association$sign==TRUE,"red","black") ,0.4),cex.lab=1.5)
legend("bottomleft", legend=c( paste("DE,",sum(stat.association$sign)), paste("non-DE,",sum(!stat.association$sign))),
col=c("red", "black"), bty="n",pch=19,cex=1,pt.cex=1)
}
if (is.na(subtree)){
r$stat.association <- stat.association
return(r)
}else{
return(stat.association)
}
}
##' Model gene expression levels as a function of tree positions.
##' @param r pptree object
##' @param X expressinon matrix of genes (rows) vs cells (columns)
##' @param n.map number of probabilistic cell-to-tree mappings to use
##' @param method method of modeling. Currently only splines with option 'ts' are supported.
##' @param knn use expression averaging among knn cells
##' @param gamma stringency of penalty.
##' @return modified pptree object with new fields r$fit.list, r$fit.summary and r$fit.pattern. r$fit.pattern contains matrix of fitted gene expression levels
##' @export
fit.associated.genes <- function(r,X,n.map=1,n.cores=parallel::detectCores()/2,method="ts",knn=1,gamma=1.5) {
if (is.null(r$root)) {stop("assign root first")}
if (is.null(r$cell.summary) | is.null(r$cell.info)) {stop("project cells onto the tree first")}
X <- X[,intersect(colnames(X),rownames(r$cell.summary))]
if (sum(!colnames(X) %in% rownames(r$cell.summary)) > 0) {stop( paste("Expression matrix X contains cells not mapped onto the tree, e.g. cell",colnames(X)[!colnames(X) %in% rownames(r$cell.summary)][1]) )}
if (n.map < 0 | n.map > length(r$cell.info)) {stop("n.map should be more than 0 and less than number of mappings")}
if ( is.null(r$stat.association) ) {stop("identify significantly associated genes using test.associated.genes()")}
genes <- intersect(rownames(X),rownames(r$stat.association)[r$stat.association$sign])
#gtl <- lapply(1:n.map,function(ix){
# print(paste("mapping",ix,"of",n.map))
# if (n.map==1){ inf <- r$cell.summary}else{
# inf <- r$cell.info[[ix]]
# }
if (method=="ts"){
gtl <- fit.ts(r,X[genes,],n.map,n.cores,gamma,knn)
}else if (method=="sf"){
gtl <- t.fit.sf(r,X[genes,],n.map,n.cores,gamma)
}else if (method=="av"){
gtl <- t.fit.av(r,X[genes,],n.map,n.cores)
}else{stop("please choose correct method name")}
#})
ft.summary <- matrix(0,nrow=nrow(gtl[[1]]),ncol=ncol(gtl[[1]]))
rownames(ft.summary) <- rownames(gtl[[1]]); colnames(ft.summary) <- colnames(gtl[[1]])
if (length(gtl)>=1){
for (k in 1:length(gtl)){
#indx <- unlist(lapply(1:nrow(r$cell.summary),function(i) {
# #ind <- rownames(r$cell.info[[k]])[r$cell.info[[k]]$seg==r$cell.summary$seg[i]]
# #ind[which.min(abs(r$cell.info[[k]][ind,]$t-r$cell.summary$t[i]))]
# ind <- rownames(r$cell.summary)[r$cell.summary$seg==r$cell.summary$seg[i]]
# ind[which.min(abs(r$cell.summary[ind,]$t-r$cell.summary$t[i]))]
#}))
ft.summary <- ft.summary + gtl[[k]]#[,indx]
}
}
ft.summary <- ft.summary/length(gtl)
#colnames(ft.summary) <- rownames(r$cell.summary)
r$fit.list <- gtl
r$fit.summary <- ft.summary
r$fit.pattern <- classify.genes(r)
print(table(r$fit.pattern))
return(r)
}
##' Model gene expression levels as a brancing spline function of tree positions.
##' @param r pptree object
##' @param X expressinon matrix of genes (rows) vs cells (columns)
##' @param n.map number of probabilistic cell-to-tree mappings to use
##' @param knn use expression averaging among knn cells
##' @param gamma stringency of penalty.
##' @return matrix of fitted gene expression levels to the tree
##' @export
fit.ts <- function(r,X,n.map,n.cores=parallel::detectCores()/2,gamma=1.5,knn=1) {
ix <- 1
img = r$img.list[[ix]];
root = r$root
tips = r$tips[r$tips != root]
branches.ll = do.call(rbind,lapply(tips, function(tip){
b = get.shortest.paths(img,from=as.character(root),to=as.character(tip))$vpath[[1]]$name
b = b[grepl("^c",b)]
ind <- paste('c',r$cell.info[[ix]]$cell,sep="") %in% b
cbind( ids=rownames(r$cell.info[[ix]])[ind], r$cell.info[[ix]][ind,],branch=rep( which(tips==tip),length(b)) )
}))
# calculate knn for each vertex along the tree
for (v in r$pp.info$PP){img <- delete_vertices(img,as.character(v))}
dst.tree <- distances(img,v=V(img),to=V(img));
dst.tree <- dst.tree[ paste("c",r$cell.summary$cell,sep=""),paste("c",r$cell.summary$cell,sep="") ]
rownames(dst.tree) <- colnames(dst.tree) <- rownames(r$cell.summary)
dst.tree[dst.tree <= knn] <- 1; dst.tree[dst.tree > knn] <- 0
gtl <- lapply(1:n.map,function(ix){
print(paste("fit gene expression for mapping",ix))
img = r$img.list[[ix]];
root = r$root
tips = r$tips[r$tips != root]
branches = do.call(rbind,lapply(tips, function(tip){
b = get.shortest.paths(img,from=as.character(root),to=as.character(tip))$vpath[[1]]$name
b = b[grepl("^c",b)]
ind <- paste('c',r$cell.info[[ix]]$cell,sep="") %in% b
cbind( ids=rownames(r$cell.info[[ix]])[ind], r$cell.info[[ix]][ind,],branch=rep( which(tips==tip),length(b)) )
}))
#branches.ll <- branches
#genes <- intersect(rownames(X),rownames(r$stat.association)[r$stat.association$sign])
genes <- rownames(X)
gt <- do.call(rbind,mclapply(genes,function(gene) {
expr.fitted <- unlist(lapply(unique(branches$branch),function(br){
branches1 <- branches[branches$branch==br,]
expr <- X[gene,as.character(branches1$ids)]
#gene.fit1 = gam( expr ~ s( branches1$time,k=length(branches1$time),bs="ts"),knots=list(branches1$time) )
tt <- branches1$t
#tt <- 1:length(tt)
gene.fit1 = mgcv::gam( expr ~ s(tt,bs="ts"),gamma=gamma)
#ggplot()+geom_point(aes(tt,expr))+geom_line(aes(tt,gene.fit1$fitted.values))
td <- data.frame(matrix(branches.ll[branches.ll$branch==br,]$t,nrow=sum(branches.ll$branch==br)));
rownames(td) <- branches.ll[branches.ll$branch==br,]$ids; colnames(td) <- "tt"
predict(gene.fit1,td )
}))
# old version - averaging along shared branches
#for( cell in names(which(table(branches.ll$ids) > 1))){
# expr.fitted[branches.ll$ids==cell] <- mean(expr.fitted[branches.ll$ids==cell])
#}
# new version - knn smoothing, where knns are estimated along the tree.
expr.fitted <- (dst.tree[names(expr.fitted),names(expr.fitted)] %*% expr.fitted) / (apply(dst.tree[names(expr.fitted),names(expr.fitted)],1,sum))
expr.fitted <- expr.fitted[,1]
return(expr.fitted[!duplicated(names(expr.fitted))])
},mc.cores = n.cores))
rownames(gt) <- genes
return(gt)
})
return(gtl)
}
##' Classify tree-associated genes
##'
##' Tree-associated genes are classified in branch-monotonous, transiently expressed and having complex patterns.
##' @param r tree
##' @param X expressinon matrix of genes (rows) vs cell (columns)
##' @param cutoff expression in local optimum should be higher/lower than both terminal branch values by cutoff.
##' @return vector of predicted classification for fitted genes.
##' @export
classify.genes <- function(r,n.cores=parallel::detectCores()/2,cutoff=0.2) {
if (is.null(r$fit.summary)) {stop("fit gene expression to the tree first")}
a <- do.call(cbind,lapply(unique(r$cell.summary$seg),function(seg){
seg.summary <- r$cell.summary[r$cell.summary$seg==seg,]
tt <- r$fit.summary[,rownames(seg.summary)][,order(seg.summary$t)]
# calculate number of inner local optima
apply(tt,1,function(x) {
res <- loc.opt(x)
if ( sum(!is.na(res))==0 ){0}else{nrow(res)}
})
}))
apply(a,1,function(v){
if (sum(v)==0) {return("branch-monotonous")}else
if (sum(v)==1) {return("transiently expressed")}else
if (sum(v)>1) {return("complex patterns")}
})
}
##' Identify all local optima for a time series data
##' @name loc.opt
##' @param series - time series data
##' @param cutoff - expression in local optimum should be on cutoff higher/lower than nearby local optima. This parameter allows to eliminate small local optimas that are likely artifacts
##' @return data frame containing type of local optima (min/max) and time index.
##' @export
loc.opt <- function(series,cutoff=0.1){
dx <- diff(series)
cand <- (-dx[1:(length(dx)-1)]*dx[2:length(dx)]) > 0
# remove multiple rupture-related optima
cand[1:(length(cand)-1)][cand[1:(length(cand)-1)]&cand[2:length(cand)]] <- FALSE
if (sum(cand)>0){
cand <- c(TRUE,cand,TRUE)
ds <- diff(series[cand])
opt.type <- unlist(lapply(1:(sum(cand)-2),function(i){
if (ds[i] > cutoff & (-ds[i+1]) > cutoff ) {
"max"
}else if (ds[i] < -cutoff & (-ds[i+1]) < -cutoff ){
"min"
}else{
NA
}
}))
if ( sum(!is.na(opt.type))>0 ){
opt.inf <- data.frame(cbind( opt.type[!is.na(opt.type)],as.numeric(which(cand))[2:(sum(cand)-1)][!is.na(opt.type)]),stringsAsFactors=FALSE)
colnames(opt.inf) <- c("type","index"); opt.inf$index <- as.numeric(opt.inf$index)
return(opt.inf)
}
}
return(NA)
}
##' Visualize branching trajectories of a particular gene.
##' @param r pptree object
##' @param gene gene name
##' @param X matrix with a single row containing a gene expression levels (could be a vector of gene's expression). Columns of X reflect gene names.
##' @param cex.cell size of cells
##' @param cex.lab size of axis titles
##' @param cex.axis size of axis labels
##' @param cex.main size of title showing a gene name
##' @param lwd.t1 width of the main branching trajectory
##' @param lwd.t2 width of ensemble trajectories, typically thiner than that of main trajectory.
##' @param lwd.erbar width of error bars for uncertainty of cell pseudotime assignment
##' @param subtree visualise trajectory along a given subtree
##' @export
visualise.trajectory = function(r,gene,X,cex.cell=0.3,cex.lab=2,cex.axis=1.5,cex.main=1,lwd.erbar=0.0,lwd.t1=3,lwd.t2=0.2,switch.point=NA,subtree=NA){
if (is.null(dim(X))){
Xgene <- X
}else{
if ( gene %in% rownames(X) == FALSE ) {stop("gene is not in matrix X")}
Xgene <- X[gene,]
}
Xgene <- Xgene[intersect(names(Xgene),rownames(r$cell.summary))]
if ( sum(!names(Xgene)%in%rownames(r$cell.summary)) > 0 ) {stop("matrix/vector X does not contain some cells used to recostruct tree")}
segs <- unique(r$cell.summary$seg)
# restrict considered segments to subtree if given
if (!is.na(subtree)){
segs <- intersect(segs,subtree$seg)
}
par(mar=c(5,5,3,1))
# draw cells
ind <- r$cell.summary$seg%in%segs
plot(r$cell.summary$t[ind],Xgene[rownames(r$cell.summary)][ind],type = "n",
xlab="pseudotime",ylab="expression",cex.axis=cex.axis,cex.lab=cex.lab,main=gene,font.main=3,cex.main=cex.main)
grid(5,5,lwd=1.5)
points(r$cell.summary$t[ind],Xgene[rownames(r$cell.summary)][ind],col=adjustcolor(r$cell.summary$color[ind],0.5),pch=19,cex=cex.cell)
# draw error bars of pseudotime uncertainty if given
if ( sum(!is.na(r$cell.summary$t.sd))>0 ){
segments( r$cell.summary$t[ind]-r$cell.summary$t.sd[ind], Xgene[rownames(r$cell.summary)][ind], r$cell.summary$t[ind]+r$cell.summary$t.sd[ind], y1 = Xgene[rownames(r$cell.summary)][ind],
col=adjustcolor(r$cell.summary$color[ind],0.1),lwd=lwd.erbar)
}
# draw ensemble of sampled trajectries if given
if (length(r$fit.list)>1){
for (j in 2:length(r$fit.list)){
for(seg in segs ){
#ind <- r$cell.info[[j]]$seg == seg
#t.ord <- order(r$cell.info[[j]]$t[ind])
#lines(r$cell.info[[j]]$t[ind][t.ord],r$fit.list[[j]][gene,rownames(r$cell.info[[j]])][ind][t.ord],
# col=adjustcolor(r$cell.info[[j]]$color[ind][t.ord],0.4),lwd=lwd.t2)
ind <- r$cell.summary$seg == seg
t.ord <- order(r$cell.summary$t[ind])
lines(r$cell.summary$t[ind][t.ord],r$fit.list[[j]][gene,rownames(r$cell.summary)][ind][t.ord],
col=adjustcolor(r$cell.summary$color[ind][t.ord],0.4),lwd=lwd.t2)
}
}
}
# draw likelihood trajectory
for(seg in segs ){
ind <- r$cell.summary$seg == seg
t.ord <- order(r$cell.summary$t[ind])
lines(r$cell.summary$t[ind][t.ord],r$fit.summary[gene,rownames(r$cell.summary)][ind][t.ord],
col=r$cell.summary$color[ind][t.ord],lwd=lwd.t1)
}
if (!is.na(switch.point)){
abline(v=switch.point,lty=1,lwd=3,col=adjustcolor("black",0.5))
}
# connect boundary cells from different branches
g <- r$img.list[[1]]
for (seg in segs){
ind <- r$cell.summary$seg==seg
c2.name <- rownames(r$cell.summary[ind,])[which.min(r$cell.summary$t[ind])]
c2 <- r$cell.summary$cell[ind][which.min(r$cell.summary$t[ind])]
c2.seg <- r$cell.summary$seg[ind][which.min(r$cell.summary$t[ind])]
c2.path <- names(shortest_paths(g,r$root,paste("c",c2,sep="") )$vpath[[1]])
c2.path <- c2.path[unlist(lapply(1:length(c2.path),function(i) grepl("c",c2.path[i])))]
c2.path <- as.numeric(unlist(lapply(strsplit(c2.path,"c"),function(x)x[2])))
ind <- r$cell.summary$cell %in% c2.path & r$cell.summary$cell != c2 #& !(r$cell.summary$seg %in% r$cell.summary[c2.name,]$seg)
if (sum(ind)>0){
c1.name <- rownames(r$cell.summary[ind,])[which.max(r$cell.summary$t[ind])]
segments(r$cell.summary[c(c1.name),]$t,r$fit.summary[gene,c(c1.name)],r$cell.summary[c(c2.name),]$t,r$fit.summary[gene,c(c2.name)],
col=r$cell.summary[c2.name,]$color,lwd=lwd.t1)
}
}
}
##' Visualize clusters of genes using heatmap and consensus tree-projected pattern.
##' @param r pptree object
##' @param emb cells embedding
##' @param clust a vector of cluster numbers named by genes
##' @param n.best show n.best the most representative genes on the heatmap for each cluster
##' @param best.method use method to select the most representative genes. Current options: "pca" selects genes with the highest loading on pc1 component reconstructed using genes from a cluster, "cor" selects genes that have the highest average correlation with other genes from a cluster.
##' @param cex.gene size of gene names
##' @param cex.cell size of cells on embedding
##' @param cex.tree width of line of tree on embedding
##' @param reclust whether to reorder cells inside individual clusters on heatmap according to hierarchical clustering using Ward linkage and 1-Pearson as a distance between genes. By default is FALSE.
##' @param subtree visualize clusters for a given subtree
##' @export
visualise.clusters <-function(r,emb,clust=NA,clust.n=5,n.best=4,best.method="cor",cex.gene=1,cex.cell=0.1,cex.tree=2,subtree=NA, reclust=TRUE){
if ( !is.na(clust) & sum(!names(clust)%in%rownames(r$fit.summary))>0) {stop( paste("Expression is not fitted for",sum(!names(clust)%in%rownames(r$fit.summary)),"genes" ))}
if (best.method!="pca" & best.method!="cor") {stop(paste("incorrect best.method option",best.method) )}
tseg <- unlist(lapply( unique(r$cell.summary$seg),function(seg)mean(r$cell.summary$t[r$cell.summary$seg==seg]))); names(tseg) <- unique(r$cell.summary$seg)
tseg <- tseg[as.character(r$cell.summary$seg)]
gns <- rownames(ppt$fit.summary)
if (!is.na(clust)){gns <- names(clust)}
emat <- r$fit.summary[gns,rownames(r$cell.summary)][,order(tseg,r$cell.summary$t)]
emat <- t(apply(emat,1,function(x) (x-mean(x))/sd(x) ))
cols <- r$cell.summary$col[order(tseg,r$cell.summary$t)]
subcells = TRUE; if (!is.na(subtree)){subcells <- r$cell.summary$seg[order(tseg,r$cell.summary$t)]%in%subtree$seg}
# cluster genes if necessary
if (is.na(clust)){
gns <- rownames(emat)#names(clust)[clust==cln]
dst.cor <- 1-cor(t(emat[gns,]))
hcl <- hclust(as.dist(dst.cor),method="ward.D")
clust <- cutree(hcl,clust.n)
}
k <- length(unique(clust))
genes.show <- unlist(lapply(1:k,function(i){
n <- n.best; if ( sum(clust==i) < n) {n <- sum(clust==i)}
if (best.method=="pca"){
pr <- pca(t(emat[clust==i,]),center = TRUE, scale = "uv")
pr.best <- rep(i,n); names(pr.best) <- names(sort(pr@loadings[,1],decreasing = T))[1:n]
return(pr.best)
}else if (best.method=="cor"){
cr <- cor(t(emat[clust==i,]))
cr.best <- rep(i,n); names(cr.best) <- names(sort(apply(cr,1,mean),decreasing = TRUE))[1:n]
return(cr.best)
}
}))
nf <- layout( matrix(unlist(lapply(1:k,function(i) 5*(i-1)+c(1,2,3,1,4,5))),2*k,3, byrow=T),respect = T,width=c(1,1,0.1),heights=rep(c(0.1,1),k) )
#layout.show(nf)
for (cln in 1:k){
# recluster genes inside module if necessary
gns <- names(clust)[clust==cln]
if (reclust==TRUE){
dst.cor <- 1-cor(t(emat[gns,]))
hclust.cor <- hclust(as.dist(dst.cor),method="ward.D")
gns <- gns[hclust.cor$order]
}
# draw cluster-wise pattern
par(mar=c(0.3,0.1,0.0,0.2))
plotppt(r,emb,pattern.cell = apply(emat[clust==cln,],2,mean),cex.main=cex.cell,cex.tree = cex.tree,lwd.tree = 0.1,subtree=subtree)
# draw color-scheme for branches
#par(mar=c(0.0,0.2,0.1,2))
par(mar=c(0.0,0.0,0.0,0))
col.ind <- 1:length(unique(cols)); names(col.ind) = unique(cols)
image( t(rbind( col.ind[cols[subcells]] )),axes=FALSE,col=(unique(cols[subcells])) )
box()
par(mar=c(0.0,0.0,0.0,0))
plot(0.2,0.2,ylim=c(0.05,0.95),xlim=c(0,1),xaxt='n',yaxt='n',pch='',ylab='',xlab='',bty='n')
#par(mar=c(0.2,0.2,0.0,2))
par(mar=c(0.3,0.0,0.0,0))
image( t(emat[gns,subcells]),axes=FALSE,col=colorRampPalette(c("blue","grey80","red"))(n = 60))
#axis( 4, at=seq(0,1,length.out=sum(clust==cln)),col.axis="black", labels=gns,hadj=0.1,xaxt="s",cex.axis=1.5,font = 3,las= 1,tick=FALSE)
box()
gns[! gns %in% names(genes.show)[genes.show==cln] ] <- ""
### calculate coordinates of genes.show with QP
coord <- which( names(clust)[clust==cln] %in% names(genes.show)[genes.show==cln] )/sum(clust==cln)
del <- 1/(sum(genes.show==cln))#0.1
Dmat <- diag(1,length(coord),length(coord))
dvec <- rep(0,length(coord))
Amat <- matrix(0,nrow= 3*length(coord)-1,ncol=length(coord)); bvec = rep(0,3*length(coord)-1)
for (i in 1:(length(coord)-1)){Amat[i,i] <- -1; Amat[i,i+1] <- 1; bvec[i] <- del - (coord[i+1]-coord[i])}
for (i in 1:(length(coord))){j <- i+length(coord)-1; Amat[j,i] <- 1; bvec[j] <- -coord[i]+0 }
for (i in 1:(length(coord))){j <- i+2*length(coord)-1; Amat[j,i] <- -1; bvec[j] <- coord[i]-1}
qp = solve.QP(Dmat, dvec, t(Amat), bvec, meq=0, factorized=FALSE)
coord_new = qp$solution + coord
par(mar=c(0.3,0,0,0))
plot(0.2,0.2,ylim=c(0.0,1),xlim=c(0,1),xaxt='n',yaxt='n',pch='',ylab='',xlab='',bty='n')
axis(side = 4, at = coord_new,lwd=0.0,lwd.ticks=0,font=3,cex.axis=cex.gene,labels=gns[gns!=""],tck=0.0,hadj=0.0,line=-0.9,las=1)
for (i in 1:length(coord)){
arrows( 0,coord[i],1,coord_new[i],length=0.0,lwd=0.7 )
}
###
}
}
##' Determine genes differentially upregulated after bifurcation point
##' @param r pptree object
##' @param mat expression matrix of genes (rows) and cells (columnts)
##' @param root a principal point of fork root
##' @param leaves vector of two principal points of fork leaves
##' @param genes optional set of genes to estimate association with fork
##' @param n.mapping number of probabilistic cell-to-tree projections to use for robustness
##' @param n.mapping.up number of probabilistic cell-to-tree projections to estimate the amount of upregulation relative to progenitor branch
##' @return summary statistics of size effect and p-value of association with bifurcaiton fork.
##' @export
test.fork.genes <- function(r,mat,matw=NULL,root,leaves,genes=rownames(mat),n.mapping=1,n.mapping.up=1,n.cores=parallel::detectCores()/2) {
g <- graph.adjacency(r$B>0,mode="undirected")
vpath = get.shortest.paths(g,root,leaves)
interPP = intersection(vpath$vpath[[1]],vpath$vpath[[2]])
which.max(r$pp.info[interPP,]$time)
vpath = get.shortest.paths(g, r$pp.info[interPP,]$PP[which.max(r$pp.info[interPP,]$time)],leaves)
cat("testing differential expression between branches ..");cat("\n")
gtll <- lapply( 1:n.mapping,function(nm){
cat("mapping ");cat(nm);cat("\n")
cell.info <- r$cell.info[[nm]]
brcells = do.call(rbind,lapply( 1:length(vpath$vpath), function(i){
x=vpath$vpath[[i]]
segs = as.numeric(names(table(r$pp.info[x,]$seg))[table(r$pp.info[x,]$seg)>1])
return(cbind(cell.info[cell.info$seg %in% segs,],i))
}))
# for every gene
gtl <- do.call(rbind,mclapply(genes,function(gene) {
brcells$exp <- mat[gene,rownames(brcells)]
if (is.null(matw)) {brcells$w = 1
}else {brcells$w <- matw[gene,r$cells][as.integer(gsub("c","",brcells$node))]}
# time-based models
m <- mgcv::gam(exp ~ s(t)+s(t,by=as.factor(i))+as.factor(i),data=brcells,familly=gaussian(),weights=brcells$w)
return( c(mean(brcells$exp[brcells$i==1])-mean(brcells$exp[brcells$i==2]) , min(summary(m)$p.pv[2]) ) )
#m <- mgcv::gam(exp ~ s(t)+as.factor(i),data=brcells,familly=gaussian(),weights=brcells$w)
#return( c(mean(brcells$exp[brcells$i==2])-mean(brcells$exp[brcells$i==1]) , min(summary(m)$s.pv[2:3]) ) )
},mc.cores=n.cores,mc.preschedule=T));
colnames(gtl) = c("effect","p"); rownames(gtl) = genes; gtl = as.data.frame(gtl)
return(gtl)
})
effect = do.call(cbind,lapply(gtll,function(gtl) gtl$effect ))
if (length(gtll) > 1) {effect <- apply(effect,1,median)}
pval = do.call(cbind,lapply(gtll,function(gtl) gtl$p ))
if (length(gtll) > 1) {pval <- apply(pval,1,median)}
fdr = do.call(cbind,lapply(gtll,function(gtl) p.adjust(gtl$p,"BH") ))
if (length(gtll) > 1) {fdr <- apply(fdr,1,median)}
st = do.call(cbind,lapply(gtll,function(gtl) gtl$p < 5e-2 ))
if (length(gtll) > 1) {st <- apply(st,1,mean)}
stf = do.call(cbind,lapply(gtll,function(gtl) p.adjust(gtl$p,"BH") < 5e-2 ))
if (length(gtll) > 1) {stf <- apply(stf,1,mean)}
### here add a code that estimates the amount of upregulation relative to progenitor branch.
cat("testing upregulation in derivative relative to progenitor branch ..");cat("\n")
# n.mapping.up
eu <- do.call(cbind,lapply(leaves[1:2],function(leave){
segs = extract.subtree(ppt,c(root,leave))
posit = do.call(rbind,(mclapply(genes,function(gene){
eu <- do.call(rbind,lapply(1:n.mapping.up,function(j){
cells = rownames(r$cell.info[[j]])[r$cell.info[[j]]$seg %in% segs$segs]
ft = lm( mat[gene,cells] ~ r$cell.info[[j]][cells,]$t )
return( c(ft$coefficients[2],summary(ft)$coefficients[2,4] ) )
}))
if (n.mapping.up > 1) {eu <- apply(eu,2,median)}
return(eu)
},mc.cores = n.cores,mc.preschedule = TRUE)))
}))
colnames(eu) <- c("pd1.a","pd1.p","pd2.a","pd2.p")
res <- as.data.frame(cbind(effect = effect, p = pval, fdr = fdr, st = st,stf = stf))
colnames(res) <- c("effect","p","fdr","st","stf")
rownames(res) <- genes
res <- cbind(res,eu)
return(res)
}
##' Assign genes differentially expressed between two post-bifurcation branches
##' @param fork.de statistics on expression differences betwee post-bifurcation branches, return of test.fork.genes
##' @param stf.cut fraction of projections when gene passed fdr < 0.05
##' @param effect.b1 expression differences to call gene as differentially upregulated at branch 1
##' @param effect.b2 expression differences to call gene as differentially upregulated at branch 2
##' @param pd.a minium expression increase at derivative compared to progenitor branches to call gene as branch-specific
##' @param pd.p p-value of expression changes of derivative compared to progenitor branches to call gene as branch-specific
##' @return table fork.de with added column stat, which classfies genes in branch-specifc (1 or 2) and non-branch-specific (0)
##' @export
branch.specific.genes <- function(fork.de,stf.cut = 0.7, effect.b1 = 0.1,effect.b2 = 0.3, pd.a = 0, pd.p = 5e-2){
ind <- fork.de$stf >= stf.cut & fork.de$effect > effect.b1 & fork.de$pd1.a > pd.a & fork.de$pd1.p < pd.p
gns1 <- rownames(fork.de)[ind]
ind <- fork.de$stf >= stf.cut & fork.de$effect < -effect.b2 & fork.de$pd2.a > pd.a & fork.de$pd2.p < pd.p
gns2 <- rownames(fork.de)[ind]
state <- rep(0,nrow(fork.de)); names(state) <- rownames(fork.de)
state[gns1] <- 1
state[gns2] <- 2
return(cbind(fork.de,state))
}
##' Estimate optimum of expression and time of activation
##' @param r ppt.tree object
##' @param mat expression matrix
##' @param root root of progenitor branch of bifurcation
##' @param leaves leaves of derivative branches of bifurcation
##' @param genes genes to estimate parameters
##' @param deriv.cutoff a first passage of derivative through cutoff 'deriv.cutoff' to predict activation timing
##' @param gamma gamma parameter in gam function
##' @param n.mapping results are averaged among n.mapping number of probabilsitic cell projections
##' @param n.cores number of cores to use
##' @return per gene timing of optimum and activation
##' @export
activation.statistics <- function(r,mat,root,leave,genes=rownames(mat),deriv.cutoff = 0.015,gamma=1,n.mapping=1,n.cores=parallel::detectCores()/2){
xx = do.call(rbind,(mclapply(genes,function(gene){
gres <- do.call(rbind,lapply(1:n.mapping,function(i){
segs = extract.subtree(ppt,c(root,leave))
cell.summary <- r$cell.info[[i]]
cells <- rownames(cell.summary)[cell.summary$seg %in% segs$segs]
ft = gam( mat[gene,cells] ~ s(cell.summary[cells,]$t),gamma=gamma)
ord <- order(cell.summary[cells,]$t)
deriv.n <- ft$fitted.values[ord][-1]-ft$fitted.values[ord][-length(ord)]
#deriv.d <- r$cell.summary[cells,]$t[-1]-r$cell.summary[cells,]$t[-length(ord)]
deriv.d <- max(ft$fitted.values[ord]) - min(ft$fitted.values[ord])
deriv <- deriv.n/deriv.d
c(cell.summary[cells,]$t[which.max(ft$fitted.values)],
min(c(cell.summary[cells,]$t[-1][ deriv > deriv.cutoff ],max(cell.summary[cells,]$t))) )
}))
c( median(gres[,1]),median(gres[,2]) )
},mc.cores = n.cores,mc.preschedule = TRUE)))
rownames(xx) <- genes
colnames(xx) <- c("optimum","activation")
return(xx)
}
##' Estimate optimum of expression and time of activation
##' @param r ppt.tree object
##' @param fork.de outcome of test.fork.genes function
##' @param mat expression matrix
##' @param root root of progenitor branch of bifurcation
##' @param leaves leaves of derivative branches of bifurcation
##' @param deriv.cutoff a first passage of derivative through cutoff 'deriv.cutoff' to predict activation timing
##' @param gamma gamma parameter in gam function
##' @param n.mapping results are averaged among n.mapping number of probabilsitic cell projections
##' @param n.cores number of cores to use
##' @return table fork.de with added per gene timing of optimum and activation
##' @export
activation.fork <- function(r,fork.de,mat,root,leaves,deriv.cutoff = 0.015,gamma=1,n.mapping=1,n.cores=parallel::detectCores()/2){
cat("estimate activation patterns .. branch 1"); cat("\n")
gg1 <- rownames(fork.de)[fork.de$state==1]
act1 <- activation.statistics(r,mat,root,leaves[1],genes=gg1,deriv.cutoff = deriv.cutoff,gamma=gamma,n.mapping=n.mapping,n.cores=n.cores)
cat("estimate activation patterns .. branch 2"); cat("\n")
gg2 <- rownames(fork.de)[fork.de$state==2]
act2 <- activation.statistics(r,fpm,root,leaves[2],genes=gg2,deriv.cutoff = deriv.cutoff,gamma=gamma,n.mapping=n.mapping,n.cores=n.cores)
act <- cbind( rep(NA,nrow(fork.de)),rep(NA,nrow(fork.de)) );
rownames(act) <- rownames(fork.de); colnames(act) <- colnames(act1)
act[gg1,] <- act1
act[gg2,] <- act2
return( cbind(fork.de,act) )
}
##' Extract subtree of the tree
##' @param r ppt.tree object
##' @param nodes set tips or internal nodes (bifurcations) to extract subtree
##' @return list of segments comprising a subtree.
##' @export
extract.subtree = function(r,nodes){
g <- graph.adjacency(r$B>0,mode="undirected")
if ( sum(!nodes%in%V(g)) > 0 ) {stop(paste("the following nodes are not in the tree:",nodes[!nodes%in%V(g)],collapse = " ") )}
if ( sum( igraph::degree(g)==2 & (V(g) %in% nodes) ) > 0 ) {stop( paste("the following nodes are nethier terminal nor fork:",nodes[nodes %in% V(g)[V(g)==2] ],collapse=" ") )}
vpath = get.shortest.paths(g,nodes[1],nodes)
v = c()
for (i in 1:length(vpath$vpath)){
v=c(v,unlist(vpath$vpath[[i]]))
}
v=unique(v)
segs = r$pp.info$seg[r$pp.info$PP %in% v]
segs = segs[segs %in% names(table(segs))[table(segs) > 1]]
#v=v[ r$pp.info[v,]$seg %in% unique(segs) ] #list( segs = unique(segs), pp = v )
list( segs = unique(segs) )
}
##' Extract subtree of the tree
##' @param r ppt.tree object
##' @param nodes set tips or internal nodes (bifurcations) to extract subtree
##' @return list of segments comprising a subtree.
##' @export
fork.pt = function(r,root,leaves){
b1 <- extract.subtree(r,c(root,leaves[1]))
b2 <- extract.subtree(r,c(root,leaves[2]))
segs.prog <- intersect(b1$segs,b2$segs)
segs.b1 <- setdiff(b1$segs,segs.prog)
segs.b2 <- setdiff(b2$segs,segs.prog)
time.stat <- c( min(r$pp.info$time[r$pp.info$seg %in% segs.prog]),
max(r$pp.info$time[r$pp.info$seg %in% segs.prog]),
max(r$pp.info$time[r$pp.info$seg %in% segs.b1]),
max(r$pp.info$time[r$pp.info$seg %in% segs.b2])
)
names(time.stat) <- c("root","bifurcation","leave 1","leave 2")
return(time.stat)
}
##' Predict regulatory impact (activity) of transcription factors
##' @param em matrix of expression levels
##' @param motmat matrix of target-TF scores
##' @param perm boolean, do permutations if TRUE.
##' @param n.cores number of cores to use
##' @return matrix of predited TF activities in cells.
##' @export
activity.lasso <- function(em,motmat,perm=FALSE,n.cores=1){
gns <- intersect(rownames(em),rownames(motmat))
# center expression and TF-target scores
em.norm = em[gns,]-apply(em[gns,],1,mean)
motmat.norm <- motmat[gns,]-apply(motmat[gns,],2,mean)
poss <- 1:nrow(em.norm)
if (perm==TRUE) {poss <- sample(1:nrow(em.norm))}
# lasso regression for each cell
cv.lasso = do.call(cbind, mclapply(1:ncol(em.norm),function(i){
cv.lasso <- cv.glmnet( motmat.norm,em.norm[poss,i],alpha=1,intercept=FALSE, standardize=TRUE)#,type.measure='auc')
return( coef(cv.lasso,s=cv.lasso$lambda.min)[2:(ncol(motmat)+1),1] )
},mc.cores = n.cores))
rownames(cv.lasso) = colnames(motmat); colnames(cv.lasso) = colnames(em.norm)
return(cv.lasso)
}
##' Decompose a number by degrees of 2.
##' @param n number
decompose <- function(n){
base.binary = c()
while (n > 0){
x <- as.integer(log2(n))
base.binary <- c(base.binary,x)
n = n - 2^x
}
return(base.binary)
}
|
rm(list = ls())
library(plyr)
library(tidyverse)
library(data.table)
library(lpa.mi.src)
library(mice)
library(dplyr)
library(doParallel)
library(foreach)
library(doRNG)
require(snow)
require(doSNOW)
require(foreach)
require(pbapply)
computer_name = "MC1"
Processors = 10
z_vec = 1:40
# Directories
dropbox_wd = "D:/Dropbox"
#dropbox_wd = "C:/Users/marcu/Dropbox"
results_wd = paste0(dropbox_wd, "/Dissertation/lpa-mi-impute/stage4c-combine-results")
stage6_wd = paste0(dropbox_wd, "/Dissertation/lpa-mi-impute/stage6-classification-accuracy")
environment_wd = paste0(dropbox_wd,"/Dissertation/environmental-variables/")
pingpong_wd = paste0("S:/ping-pong")
system("rm -r H:\\rdata-files")
system("rm-r H:\\classify-accuracy-files")
Processors = 10
cl<-makeSOCKcluster(Processors)
doSNOW::registerDoSNOW(cl)
# Load in the results
setwd(results_wd)
load(file ="parameters-combined-results-lpa-mi-impute.RData")
parameters_combined_df$pva[parameters_combined_df$data_type=="Complete data"] = -1
parameters_combined_df$pva[parameters_combined_df$data_type=="Observed data"] = 0
for(rep_x in sample(1:500,500,replace=F)){
if( !(paste0("rep",rep_x,".csv")%in%list.files(path = pingpong_wd)) ){
print(paste0("Replication: ", rep_x))
tic = proc.time()
write.csv(x = data.frame(computer = computer_name, total_time = NA), file = paste0(pingpong_wd,"/rep",rep_x,".csv"), row.names = FALSE)
# Make replication directories
system("rm -r H:\\rdata-files")
system('mkdir H:\\rdata-files')
system("rm-r H:\\classify-accuracy-files")
system('mkdir H:\\classify-accuracy-files')
# Copy over the complete data files
system(paste0('xcopy "S:\\rdata-files\\list-complete rep',rep_x,' *.RData" H:\\rdata-files'))
# copy over the observed data files
system(paste0('xcopy "S:\\rdata-files\\list-observed rep',rep_x,' *.RData" H:\\rdata-files'))
# copy over the imputed data files
system(paste0('xcopy "S:\\rdata-files\\list-imputed rep',rep_x,' *.RData" H:\\rdata-files'))
# create a subpopulation list
list_subpop<-
lapply(X = z_vec, FUN = function(zz){
tmp1 = "complete"; tmp2=".RData";
load(paste0("H:/rdata-files/list-",tmp1," rep",rep_x," z",zz,tmp2))
return(list_complete$dfcom %>% select("subpop"))
})
out_x = expand.grid(z = z_vec, pva = c(-1:4), pm=1) %>% data.frame() %>% transform(data_type = NA)
out_x$data_type[out_x$pva==-1] = "Complete data"
out_x$data_type[out_x$pva==0] = "Observed data"
out_x$data_type[out_x$pva>0] = "Imputation"
out_x = out_x %>% transform(kappa1c1=NA, kappa1c2=NA, kappa1c3=NA, kappa2c1=NA, kappa2c2=NA, kappa2c3=NA, kappa3c1=NA, kappa3c2=NA, kappa3c3=NA)
pb <- pbapply::timerProgressBar(max = nrow(out_x), style = 1, width = getOption("width")/4)
progress <- function(x){setTimerProgressBar(pb, x)}
opts <- list(progress = progress)
outlist_x<-
foreach(x = 1:nrow(out_x),
.packages = c("mice","plyr","tidyverse","data.table","dplyr","lpa.mi.src"),
.inorder = TRUE,
.options.snow = opts) %dopar% {
#for(x in 1:nrow(out_x)){print(x)
z_x = out_x$z[x]; pva_x = out_x$pva[x]; pm_x = out_x$pm[x]; type_x = out_x$data_type[x];
# Get the parameters
parameters_x = parameters_combined_df %>%
filter(rep==rep_x & z==z_x & pm==pm_x & data_type==type_x & pva==pva_x)
if(nrow(parameters_x)>0){
Qlist_x <- parameters_x %>%
select(paramHeader,param,LatentClass,est) %>%
Mplus2Qlist()
# Load the data
if(type_x=="Complete data"){tmp1 = "complete"; tmp2=".RData"}
if(type_x=="Observed data"){tmp1 = "observed"; tmp2=paste0(" pm",pm_x,".RData")}
if(type_x=="Imputation"){tmp1 = "imputed"; tmp2 = paste0(" pm",pm_x," pva",pva_x,".RData")}
load(paste0("H:/rdata-files/list-",tmp1," rep",rep_x," z",z_x,tmp2))
if(type_x!="Imputation"){
if(type_x=="Complete data"){Y_x = list_complete$dfcom %>% select(starts_with("Y"))}
if(type_x=="Observed data"){Y_x = list_observed$list_obsdf$pm1 %>% select(starts_with("Y"))}
cprob_x <- lpa.mi.src::cprobs(Y_i = Y_x, pi_vec = Qlist_x$pi, mu_mat = Qlist_x$mu, S_array = Qlist_x$S)
} else {
tmp_mids = list_imputed$obj_call[[pm_x]][[1]]
tmp_cprobs<-
lapply(X = 1:tmp_mids$m,
FUN = function(m){
Y_x = mice::complete(tmp_mids, action = m) %>% select(starts_with("Y"));
cprob_x = lpa.mi.src::cprobs(Y_i = Y_x, pi_vec = Qlist_x$pi, mu_mat = Qlist_x$mu, S_array = Qlist_x$S) %>% data.frame() %>% transform(id = 1:nrow(Y_x), m = m)
return(cprob_x)
} ) %>% data.table::rbindlist()
cprob_x = tmp_cprobs %>% group_by(id) %>% summarise(X1 = mean(X1), X2 = mean(X2), X3 = mean(X3)) %>% select(X1,X2,X3) %>% data.frame()
}
modal_x = apply(cprob_x, 1, which.max)
table_x = table(list_subpop[[z_x]]$subpop, modal_x)
out_x$kappa1c1[x] = table_x[1,1]
out_x$kappa1c2[x] = table_x[1,2]
out_x$kappa1c3[x] = table_x[1,3]
out_x$kappa2c1[x] = table_x[2,1]
out_x$kappa2c2[x] = table_x[2,2]
out_x$kappa2c3[x] = table_x[2,3]
out_x$kappa3c1[x] = table_x[3,1]
out_x$kappa3c2[x] = table_x[3,2]
out_x$kappa3c3[x] = table_x[3,3]
return(out_x[x, ])
} #if(nrow(parameters_x)>0)
}#end for(x = )
out_x<-rbindlist(outlist_x) %>% data.frame()
out_x$pva[out_x$data_type!="Imputation"] = NA
save(out_x, file = paste0("H:/classify-accuracy-files/classify-accuracy-rep",rep_x,".RData"))
system(paste0('xcopy H:\\classify-accuracy-files\\classify-accuracy-rep',rep_x,'.RData S:\\classify-accuracy-files /J /Y'))
toc = proc.time()-tic; toc = round(toc[[3]],0)
write.csv(x = data.frame(computer = computer_name, total_time = toc), file = paste0(pingpong_wd,"/rep",rep_x,".csv"), row.names = FALSE)
# Clean up
system("rm -r H:\\rdata-files")
system("rm-r H:\\classify-accuracy-files")
}#end if(ping)
}# end for rep=
stopCluster(cl)
|
/stage6-classification-accuracy/archieve/ver1/MC1-stage6-classification-accuracy.R
|
no_license
|
marcus-waldman/lpa-mi-impute
|
R
| false
| false
| 7,757
|
r
|
rm(list = ls())
library(plyr)
library(tidyverse)
library(data.table)
library(lpa.mi.src)
library(mice)
library(dplyr)
library(doParallel)
library(foreach)
library(doRNG)
require(snow)
require(doSNOW)
require(foreach)
require(pbapply)
computer_name = "MC1"
Processors = 10
z_vec = 1:40
# Directories
dropbox_wd = "D:/Dropbox"
#dropbox_wd = "C:/Users/marcu/Dropbox"
results_wd = paste0(dropbox_wd, "/Dissertation/lpa-mi-impute/stage4c-combine-results")
stage6_wd = paste0(dropbox_wd, "/Dissertation/lpa-mi-impute/stage6-classification-accuracy")
environment_wd = paste0(dropbox_wd,"/Dissertation/environmental-variables/")
pingpong_wd = paste0("S:/ping-pong")
system("rm -r H:\\rdata-files")
system("rm-r H:\\classify-accuracy-files")
Processors = 10
cl<-makeSOCKcluster(Processors)
doSNOW::registerDoSNOW(cl)
# Load in the results
setwd(results_wd)
load(file ="parameters-combined-results-lpa-mi-impute.RData")
parameters_combined_df$pva[parameters_combined_df$data_type=="Complete data"] = -1
parameters_combined_df$pva[parameters_combined_df$data_type=="Observed data"] = 0
for(rep_x in sample(1:500,500,replace=F)){
if( !(paste0("rep",rep_x,".csv")%in%list.files(path = pingpong_wd)) ){
print(paste0("Replication: ", rep_x))
tic = proc.time()
write.csv(x = data.frame(computer = computer_name, total_time = NA), file = paste0(pingpong_wd,"/rep",rep_x,".csv"), row.names = FALSE)
# Make replication directories
system("rm -r H:\\rdata-files")
system('mkdir H:\\rdata-files')
system("rm-r H:\\classify-accuracy-files")
system('mkdir H:\\classify-accuracy-files')
# Copy over the complete data files
system(paste0('xcopy "S:\\rdata-files\\list-complete rep',rep_x,' *.RData" H:\\rdata-files'))
# copy over the observed data files
system(paste0('xcopy "S:\\rdata-files\\list-observed rep',rep_x,' *.RData" H:\\rdata-files'))
# copy over the imputed data files
system(paste0('xcopy "S:\\rdata-files\\list-imputed rep',rep_x,' *.RData" H:\\rdata-files'))
# create a subpopulation list
list_subpop<-
lapply(X = z_vec, FUN = function(zz){
tmp1 = "complete"; tmp2=".RData";
load(paste0("H:/rdata-files/list-",tmp1," rep",rep_x," z",zz,tmp2))
return(list_complete$dfcom %>% select("subpop"))
})
out_x = expand.grid(z = z_vec, pva = c(-1:4), pm=1) %>% data.frame() %>% transform(data_type = NA)
out_x$data_type[out_x$pva==-1] = "Complete data"
out_x$data_type[out_x$pva==0] = "Observed data"
out_x$data_type[out_x$pva>0] = "Imputation"
out_x = out_x %>% transform(kappa1c1=NA, kappa1c2=NA, kappa1c3=NA, kappa2c1=NA, kappa2c2=NA, kappa2c3=NA, kappa3c1=NA, kappa3c2=NA, kappa3c3=NA)
pb <- pbapply::timerProgressBar(max = nrow(out_x), style = 1, width = getOption("width")/4)
progress <- function(x){setTimerProgressBar(pb, x)}
opts <- list(progress = progress)
outlist_x<-
foreach(x = 1:nrow(out_x),
.packages = c("mice","plyr","tidyverse","data.table","dplyr","lpa.mi.src"),
.inorder = TRUE,
.options.snow = opts) %dopar% {
#for(x in 1:nrow(out_x)){print(x)
z_x = out_x$z[x]; pva_x = out_x$pva[x]; pm_x = out_x$pm[x]; type_x = out_x$data_type[x];
# Get the parameters
parameters_x = parameters_combined_df %>%
filter(rep==rep_x & z==z_x & pm==pm_x & data_type==type_x & pva==pva_x)
if(nrow(parameters_x)>0){
Qlist_x <- parameters_x %>%
select(paramHeader,param,LatentClass,est) %>%
Mplus2Qlist()
# Load the data
if(type_x=="Complete data"){tmp1 = "complete"; tmp2=".RData"}
if(type_x=="Observed data"){tmp1 = "observed"; tmp2=paste0(" pm",pm_x,".RData")}
if(type_x=="Imputation"){tmp1 = "imputed"; tmp2 = paste0(" pm",pm_x," pva",pva_x,".RData")}
load(paste0("H:/rdata-files/list-",tmp1," rep",rep_x," z",z_x,tmp2))
if(type_x!="Imputation"){
if(type_x=="Complete data"){Y_x = list_complete$dfcom %>% select(starts_with("Y"))}
if(type_x=="Observed data"){Y_x = list_observed$list_obsdf$pm1 %>% select(starts_with("Y"))}
cprob_x <- lpa.mi.src::cprobs(Y_i = Y_x, pi_vec = Qlist_x$pi, mu_mat = Qlist_x$mu, S_array = Qlist_x$S)
} else {
tmp_mids = list_imputed$obj_call[[pm_x]][[1]]
tmp_cprobs<-
lapply(X = 1:tmp_mids$m,
FUN = function(m){
Y_x = mice::complete(tmp_mids, action = m) %>% select(starts_with("Y"));
cprob_x = lpa.mi.src::cprobs(Y_i = Y_x, pi_vec = Qlist_x$pi, mu_mat = Qlist_x$mu, S_array = Qlist_x$S) %>% data.frame() %>% transform(id = 1:nrow(Y_x), m = m)
return(cprob_x)
} ) %>% data.table::rbindlist()
cprob_x = tmp_cprobs %>% group_by(id) %>% summarise(X1 = mean(X1), X2 = mean(X2), X3 = mean(X3)) %>% select(X1,X2,X3) %>% data.frame()
}
modal_x = apply(cprob_x, 1, which.max)
table_x = table(list_subpop[[z_x]]$subpop, modal_x)
out_x$kappa1c1[x] = table_x[1,1]
out_x$kappa1c2[x] = table_x[1,2]
out_x$kappa1c3[x] = table_x[1,3]
out_x$kappa2c1[x] = table_x[2,1]
out_x$kappa2c2[x] = table_x[2,2]
out_x$kappa2c3[x] = table_x[2,3]
out_x$kappa3c1[x] = table_x[3,1]
out_x$kappa3c2[x] = table_x[3,2]
out_x$kappa3c3[x] = table_x[3,3]
return(out_x[x, ])
} #if(nrow(parameters_x)>0)
}#end for(x = )
out_x<-rbindlist(outlist_x) %>% data.frame()
out_x$pva[out_x$data_type!="Imputation"] = NA
save(out_x, file = paste0("H:/classify-accuracy-files/classify-accuracy-rep",rep_x,".RData"))
system(paste0('xcopy H:\\classify-accuracy-files\\classify-accuracy-rep',rep_x,'.RData S:\\classify-accuracy-files /J /Y'))
toc = proc.time()-tic; toc = round(toc[[3]],0)
write.csv(x = data.frame(computer = computer_name, total_time = toc), file = paste0(pingpong_wd,"/rep",rep_x,".csv"), row.names = FALSE)
# Clean up
system("rm -r H:\\rdata-files")
system("rm-r H:\\classify-accuracy-files")
}#end if(ping)
}# end for rep=
stopCluster(cl)
|
#' Quality control samples (QCs) checking
#'
#' Quality control samples (QCs) are checked to data irregularities. It is used for data from untargeted metabolomic analysis.
#' @param data Data table with variables (metabolites) in columns. Samples in rows are sorted according to specific groups.
#' @param name A character string or expression indicating a name of data set. It occurs in names of every output.
#' @param groupnames A character vector defining specific groups in data. Every string must be specific for each group and they must not overlap.
#' @details Values of QCs are evaluated and questionable values for particular variables are denoted. There are two steps of evaluation: 1. QCs with completely higher values than the maximum of data, 2. QCs higher than majority of data.
#' @details Up to twenty different groups can be distinguished in data (including QCs).
#' @return Boxplots of QCs and the other data groups.
#' @return Excel file with the list of questionable variables from two steps of evaluation.
#' @import openxlsx
#' @examples data=metabol
#' name="Metabolomics" #name of the project
#' groupnames=c("Con","Pat","QC")
#' bigQC(data,name,groupnames)
#' @export
bigQC=function(data,name,groupnames){
################################################################################################################################
#data=as.matrix(data)
##########################################################################################################################
basecolor=c("blue","magenta","forestgreen","darkorange","deepskyblue","mediumaquamarine","lightslateblue","saddlebrown",
"gray40","darkslateblue","firebrick","darkcyan","darkmagenta", "deeppink1","limegreen","gold2","bisque2",
"lightcyan3","red","darkolivegreen3") # Basic colours from: http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf
basemarks=c(15,17,18,8,11,2,0,16,5,6,4,10,3,7,9,12)
groupnames=groupnames
#groupnames=unique(gsub("[[:digit:]]","",rownames(data)))
count=length(groupnames)
groups=NULL
marks=NULL
color=NULL
for (i in 1:count){
Gr=grep(groupnames[i],rownames(data))
gr=rep(i,length(Gr))
groups=c(groups,gr)
zn=rep(basemarks[i],length(Gr))
marks=c(marks,zn)
cl=rep(basecolor[i],length(Gr))
color=c(color,cl)
}
################################################################################################################################
# denoting of QCs
QCi=grep("QC",rownames(data))
dataQC=data[QCi,]
################################################################################################################################
# rule 1 - comparison of maximum of samples and minimum of QCs
rule1=matrix(rep(NA,ncol(data)),ncol=1)
for(i in 1:ncol(data)){
maxs=max(data[-QCi,i])
b=boxplot(data[QCi,i] ~ groups[QCi], names=groupnames[1],main=colnames(data)[i],notch=FALSE,plot=FALSE)
minQC=b$stats[1,1]
if (maxs<minQC){
rule1[i,1]=1
} else {
rule1[i,1]=0
}
}
rownames(rule1)=colnames(data)
#head(rule1)
idxrule1 = which(rule1 == 1)
if (length(idxrule1)!=0){
data2=data[,-idxrule1]
dataout=matrix(rep(0,nrow(data)*length(idxrule1)),nrow=nrow(data))
rownames(dataout)=rownames(data)
for (k in 1:length(idxrule1)){
dataout[,k]=data[,idxrule1[k]]
colnames(dataout)=colnames(data)[idxrule1]
}
write.xlsx(dataout,file = paste("Box_out_rule_1_",name,".xlsx",sep=""),sheetName="Out",
col.names=TRUE, row.names=TRUE, append=FALSE, showNA=TRUE)
labels=rownames(dataout)
pdf(paste("Box_out_rule_1_",name,".pdf",sep=""))
for(i in 1:ncol(dataout)){
boxplot(dataout[,i] ~ groups, names=groupnames,main=colnames(dataout)[i],notch=TRUE,outpch = NA)
text(groups,dataout[,i],label=labels,col="red",cex=0.5)
}
dev.off()
}else{
data2 = data
print("No questionable QCs in rule 1.")
}
#unique(gsub("[[:digit:]]","",rownames(dataSet)))
################################################################################################################################
# rule 2 - QCs higher than majority of data (some samples are higher than QCs)
rule2=matrix(rep(NA,ncol(data2)*1),ncol=1)
for(i in 1:ncol(data2)){
b=boxplot(data2[,i] ~ groups, names=groupnames,main=colnames(data2)[i],notch=FALSE,plot=FALSE)
qc=grep("QC",groupnames)
cAQC=b$conf[1,qc]
cBs=max(b$stats[4,-qc])
if (cAQC>cBs){
rule2[i,1]=1
} else {
rule2[i,1]=0
}
}
rownames(rule2)=colnames(data2)
#head(rule2)
idxrule2 = which(apply(rule2,1,sum) == 1)
if (length(idxrule2)!=0){
data3=data2[,-idxrule2]
dataout2=matrix(rep(0,nrow(data2)*length(idxrule2)),nrow=nrow(data2))
rownames(dataout2)=rownames(data)
for (k in 1:length(idxrule2)){
dataout2[,k]=data2[,idxrule2[k]]
colnames(dataout2)=colnames(data2)[idxrule2]
}
write.xlsx(dataout2,file = paste("Box_out_rule_2_",name,".xlsx",sep=""),sheetName="Out",
col.names=TRUE, row.names=TRUE, append=FALSE, showNA=TRUE)
labels=rownames(dataout2)
pdf(paste("Box_out_rule_2_",name,".pdf",sep=""))
for(i in 1:ncol(dataout2)){
b=boxplot(dataout2[,i] ~ groups, names=groupnames,main=colnames(dataout2)[i],notch=TRUE,outpch = NA)
text(groups,dataout2[,i],label=labels,col="red",cex=0.5)
}
dev.off()
pdf(paste("Box_rest_",name,".pdf",sep=""))
for(i in 1:ncol(data3)){
b=boxplot(data3[,i] ~ groups, names=groupnames,main=colnames(data3)[i],notch=TRUE,outpch = NA)
stripchart(data3[,i] ~ groups, vertical = TRUE, method = "jitter",pch = unique(marks), col = unique(color), add = TRUE)
}
dev.off()
}else{
data3 = data2
print("No questionable QCs in rule 2.")
}
}
|
/R/bigQC.R
|
no_license
|
AlzbetaG/Metabol
|
R
| false
| false
| 5,909
|
r
|
#' Quality control samples (QCs) checking
#'
#' Quality control samples (QCs) are checked to data irregularities. It is used for data from untargeted metabolomic analysis.
#' @param data Data table with variables (metabolites) in columns. Samples in rows are sorted according to specific groups.
#' @param name A character string or expression indicating a name of data set. It occurs in names of every output.
#' @param groupnames A character vector defining specific groups in data. Every string must be specific for each group and they must not overlap.
#' @details Values of QCs are evaluated and questionable values for particular variables are denoted. There are two steps of evaluation: 1. QCs with completely higher values than the maximum of data, 2. QCs higher than majority of data.
#' @details Up to twenty different groups can be distinguished in data (including QCs).
#' @return Boxplots of QCs and the other data groups.
#' @return Excel file with the list of questionable variables from two steps of evaluation.
#' @import openxlsx
#' @examples data=metabol
#' name="Metabolomics" #name of the project
#' groupnames=c("Con","Pat","QC")
#' bigQC(data,name,groupnames)
#' @export
bigQC=function(data,name,groupnames){
################################################################################################################################
#data=as.matrix(data)
##########################################################################################################################
basecolor=c("blue","magenta","forestgreen","darkorange","deepskyblue","mediumaquamarine","lightslateblue","saddlebrown",
"gray40","darkslateblue","firebrick","darkcyan","darkmagenta", "deeppink1","limegreen","gold2","bisque2",
"lightcyan3","red","darkolivegreen3") # Basic colours from: http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf
basemarks=c(15,17,18,8,11,2,0,16,5,6,4,10,3,7,9,12)
groupnames=groupnames
#groupnames=unique(gsub("[[:digit:]]","",rownames(data)))
count=length(groupnames)
groups=NULL
marks=NULL
color=NULL
for (i in 1:count){
Gr=grep(groupnames[i],rownames(data))
gr=rep(i,length(Gr))
groups=c(groups,gr)
zn=rep(basemarks[i],length(Gr))
marks=c(marks,zn)
cl=rep(basecolor[i],length(Gr))
color=c(color,cl)
}
################################################################################################################################
# denoting of QCs
QCi=grep("QC",rownames(data))
dataQC=data[QCi,]
################################################################################################################################
# rule 1 - comparison of maximum of samples and minimum of QCs
rule1=matrix(rep(NA,ncol(data)),ncol=1)
for(i in 1:ncol(data)){
maxs=max(data[-QCi,i])
b=boxplot(data[QCi,i] ~ groups[QCi], names=groupnames[1],main=colnames(data)[i],notch=FALSE,plot=FALSE)
minQC=b$stats[1,1]
if (maxs<minQC){
rule1[i,1]=1
} else {
rule1[i,1]=0
}
}
rownames(rule1)=colnames(data)
#head(rule1)
idxrule1 = which(rule1 == 1)
if (length(idxrule1)!=0){
data2=data[,-idxrule1]
dataout=matrix(rep(0,nrow(data)*length(idxrule1)),nrow=nrow(data))
rownames(dataout)=rownames(data)
for (k in 1:length(idxrule1)){
dataout[,k]=data[,idxrule1[k]]
colnames(dataout)=colnames(data)[idxrule1]
}
write.xlsx(dataout,file = paste("Box_out_rule_1_",name,".xlsx",sep=""),sheetName="Out",
col.names=TRUE, row.names=TRUE, append=FALSE, showNA=TRUE)
labels=rownames(dataout)
pdf(paste("Box_out_rule_1_",name,".pdf",sep=""))
for(i in 1:ncol(dataout)){
boxplot(dataout[,i] ~ groups, names=groupnames,main=colnames(dataout)[i],notch=TRUE,outpch = NA)
text(groups,dataout[,i],label=labels,col="red",cex=0.5)
}
dev.off()
}else{
data2 = data
print("No questionable QCs in rule 1.")
}
#unique(gsub("[[:digit:]]","",rownames(dataSet)))
################################################################################################################################
# rule 2 - QCs higher than majority of data (some samples are higher than QCs)
rule2=matrix(rep(NA,ncol(data2)*1),ncol=1)
for(i in 1:ncol(data2)){
b=boxplot(data2[,i] ~ groups, names=groupnames,main=colnames(data2)[i],notch=FALSE,plot=FALSE)
qc=grep("QC",groupnames)
cAQC=b$conf[1,qc]
cBs=max(b$stats[4,-qc])
if (cAQC>cBs){
rule2[i,1]=1
} else {
rule2[i,1]=0
}
}
rownames(rule2)=colnames(data2)
#head(rule2)
idxrule2 = which(apply(rule2,1,sum) == 1)
if (length(idxrule2)!=0){
data3=data2[,-idxrule2]
dataout2=matrix(rep(0,nrow(data2)*length(idxrule2)),nrow=nrow(data2))
rownames(dataout2)=rownames(data)
for (k in 1:length(idxrule2)){
dataout2[,k]=data2[,idxrule2[k]]
colnames(dataout2)=colnames(data2)[idxrule2]
}
write.xlsx(dataout2,file = paste("Box_out_rule_2_",name,".xlsx",sep=""),sheetName="Out",
col.names=TRUE, row.names=TRUE, append=FALSE, showNA=TRUE)
labels=rownames(dataout2)
pdf(paste("Box_out_rule_2_",name,".pdf",sep=""))
for(i in 1:ncol(dataout2)){
b=boxplot(dataout2[,i] ~ groups, names=groupnames,main=colnames(dataout2)[i],notch=TRUE,outpch = NA)
text(groups,dataout2[,i],label=labels,col="red",cex=0.5)
}
dev.off()
pdf(paste("Box_rest_",name,".pdf",sep=""))
for(i in 1:ncol(data3)){
b=boxplot(data3[,i] ~ groups, names=groupnames,main=colnames(data3)[i],notch=TRUE,outpch = NA)
stripchart(data3[,i] ~ groups, vertical = TRUE, method = "jitter",pch = unique(marks), col = unique(color), add = TRUE)
}
dev.off()
}else{
data3 = data2
print("No questionable QCs in rule 2.")
}
}
|
# CONSTANTS DEFINITIONS
date_col <- 'datetime'
data_url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
downloaded_file <- 'data.zip'
date_format <- "%d/%m/%Y %H:%M:%S"
file_name <- 'household_power_consumption.txt'
begin <- "1/2/2007 00:00:00"
end <- "3/2/2007 00:00:00"
par(mar=(c(4, 4, 4, 1) + 0.1))
# Load data
load_file <- function(file=file_name) read.table(file, header=TRUE, na.strings="?", sep=";")
load_file_from_the_internet <- function() {
if (! file.exists(downloaded_file))
download.file(data_url, downloaded_file)
f <- unz(downloaded_file, file_name)
load_file(f)
}
# Add posix date
format_date <- function(date) as.POSIXct(date, format=date_format)
update_date <- function(data) with(data, format_date(paste(Date, Time)))
begin <- format_date(begin)
end <- format_date(end)
add_date_col <- function(data) {
data[,date_col] <- update_date(data)
data
}
# Filter by date
select_data <- function(data) {
dates <- data[,date_col]
indices <- which(dates >= begin & dates < end)
data[indices,]
}
# Plot 1
plot_global_active_power_hist <- function(data) {
hist(data$Global_active_power, xlab="Global active power (kilowatts)", col='red', main="Global Active Power")
}
# Plot 2
plot_global_active_power <- function(data) {
plot(data[,date_col], data$Global_active_power, xlab="", ylab="Global active power (kilowatts)", main="", type='l')
}
# Plot 3
plot_submeterings <-function(data, box_type="n") {
dates <- data[,date_col]
plot(dates, data$Sub_metering_1, xlab="", ylab="Energy sub metering", main="", type='l')
lines(dates, data$Sub_metering_2, col='red')
lines(dates, data$Sub_metering_3, col='blue')
legend("topright", c("Sub_meterings_1", "Sub_meterings_2", "Sub_meterings_3"), col=c("black", "red", "blue"), lwd=c(1,1,1), bty=box_type)
}
# Plot 4.1
plot_voltage <- function(data) {
plot(data[,date_col], data$Voltage, type='l', xlab='datetime', ylab='Voltage')
}
# Plot 4.2
plot_global_reactive_power <- function(data) {
plot(data[,date_col], data$Global_reactive_power, type='l', xlab='datetime', ylab='Global reactive power')
}
# Plot 4
plot_collage <- function(data) {
par(mfrow=c(2,2))
plot_global_active_power(data)
plot_voltage(data)
plot_submeterings(data)
plot_global_reactive_power(data)
}
# Output to PNG
plot_to_png <- function(data, plot_function, filename, ...) {
png(filename=filename)
plot_function(data, ...)
dev.off()
}
# Plot all as expected
plot_exercise <- function(data) {
plot_to_png(data, plot_global_active_power_hist , 'plot1.png')
plot_to_png(data, plot_global_active_power, 'plot2.png')
plot_to_png(data, plot_submeterings, 'plot3.png', box_type="o")
plot_to_png(data, plot_collage, 'plot4.png')
}
data <- load_file_from_the_internet()
data <- add_date_col(data)
data <- select_data(data)
plot_to_png(data, plot_global_active_power, 'plot2.png')
|
/plot2.R
|
no_license
|
JorgeMonforte/ExData_Plotting1
|
R
| false
| false
| 2,988
|
r
|
# CONSTANTS DEFINITIONS
date_col <- 'datetime'
data_url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
downloaded_file <- 'data.zip'
date_format <- "%d/%m/%Y %H:%M:%S"
file_name <- 'household_power_consumption.txt'
begin <- "1/2/2007 00:00:00"
end <- "3/2/2007 00:00:00"
par(mar=(c(4, 4, 4, 1) + 0.1))
# Load data
load_file <- function(file=file_name) read.table(file, header=TRUE, na.strings="?", sep=";")
load_file_from_the_internet <- function() {
if (! file.exists(downloaded_file))
download.file(data_url, downloaded_file)
f <- unz(downloaded_file, file_name)
load_file(f)
}
# Add posix date
format_date <- function(date) as.POSIXct(date, format=date_format)
update_date <- function(data) with(data, format_date(paste(Date, Time)))
begin <- format_date(begin)
end <- format_date(end)
add_date_col <- function(data) {
data[,date_col] <- update_date(data)
data
}
# Filter by date
select_data <- function(data) {
dates <- data[,date_col]
indices <- which(dates >= begin & dates < end)
data[indices,]
}
# Plot 1
plot_global_active_power_hist <- function(data) {
hist(data$Global_active_power, xlab="Global active power (kilowatts)", col='red', main="Global Active Power")
}
# Plot 2
plot_global_active_power <- function(data) {
plot(data[,date_col], data$Global_active_power, xlab="", ylab="Global active power (kilowatts)", main="", type='l')
}
# Plot 3
plot_submeterings <-function(data, box_type="n") {
dates <- data[,date_col]
plot(dates, data$Sub_metering_1, xlab="", ylab="Energy sub metering", main="", type='l')
lines(dates, data$Sub_metering_2, col='red')
lines(dates, data$Sub_metering_3, col='blue')
legend("topright", c("Sub_meterings_1", "Sub_meterings_2", "Sub_meterings_3"), col=c("black", "red", "blue"), lwd=c(1,1,1), bty=box_type)
}
# Plot 4.1
plot_voltage <- function(data) {
plot(data[,date_col], data$Voltage, type='l', xlab='datetime', ylab='Voltage')
}
# Plot 4.2
plot_global_reactive_power <- function(data) {
plot(data[,date_col], data$Global_reactive_power, type='l', xlab='datetime', ylab='Global reactive power')
}
# Plot 4
plot_collage <- function(data) {
par(mfrow=c(2,2))
plot_global_active_power(data)
plot_voltage(data)
plot_submeterings(data)
plot_global_reactive_power(data)
}
# Output to PNG
plot_to_png <- function(data, plot_function, filename, ...) {
png(filename=filename)
plot_function(data, ...)
dev.off()
}
# Plot all as expected
plot_exercise <- function(data) {
plot_to_png(data, plot_global_active_power_hist , 'plot1.png')
plot_to_png(data, plot_global_active_power, 'plot2.png')
plot_to_png(data, plot_submeterings, 'plot3.png', box_type="o")
plot_to_png(data, plot_collage, 'plot4.png')
}
data <- load_file_from_the_internet()
data <- add_date_col(data)
data <- select_data(data)
plot_to_png(data, plot_global_active_power, 'plot2.png')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/requests.R
\name{functional_annotation}
\alias{functional_annotation}
\title{Retrieving functional annotation}
\source{
https://string-db.org/cgi/help.pl?subpage=api%23retrieving-functional-annotation
}
\usage{
functional_annotation(identifiers = NULL, species = 9606,
allow_pubmed = 0, caller_identity)
}
\arguments{
\item{identifiers}{A \code{character} string.}
\item{species}{A \code{numeric}.}
\item{allow_pubmed}{A \code{logical} in the form '1' (default) or '0'.}
\item{caller_identity}{A \code{character} string.}
}
\value{
A \code{tibble}.
\describe{
\item{category}{term category (e.g. GO Process, KEGG pathways)}
\item{term}{enriched term (GO term, domain or pathway)}
\item{number_of_genes}{number of genes in your input list with the term
assigned}
\item{ratio_in_set}{ratio of the proteins in your input list with the
term assigned}
\item{ncbiTaxonId}{NCBI taxon identifier}
\item{inputGenes}{gene names from your input}
\item{preferredNames}{common protein names (in the same order as your
input Genes)}
\item{description}{description of the enriched term}
}
}
\description{
Gets the functional annotation (Gene Ontology, UniProt Keywords, PFAM,
INTERPRO and SMART domains) of your list of proteins.
}
\examples{
\dontrun{
# make a functional_annotation request
functional_annotation(identifiers = 'cdk1')
}
}
\seealso{
\code{\link{get_string_ids}}
\code{\link{network}}
\code{\link{interaction_partners}}
\code{\link{homology}}
\code{\link{homology_best}}
\code{\link{enrichment}}
\code{\link{ppi_enrichment}}
Other API methods: \code{\link{enrichment}},
\code{\link{get_string_ids}},
\code{\link{homology_best}}, \code{\link{homology}},
\code{\link{interaction_partners}},
\code{\link{network}}, \code{\link{ppi_enrichment}}
}
\concept{API methods}
|
/man/functional_annotation.Rd
|
no_license
|
abifromr/stringapi
|
R
| false
| true
| 1,870
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/requests.R
\name{functional_annotation}
\alias{functional_annotation}
\title{Retrieving functional annotation}
\source{
https://string-db.org/cgi/help.pl?subpage=api%23retrieving-functional-annotation
}
\usage{
functional_annotation(identifiers = NULL, species = 9606,
allow_pubmed = 0, caller_identity)
}
\arguments{
\item{identifiers}{A \code{character} string.}
\item{species}{A \code{numeric}.}
\item{allow_pubmed}{A \code{logical} in the form '1' (default) or '0'.}
\item{caller_identity}{A \code{character} string.}
}
\value{
A \code{tibble}.
\describe{
\item{category}{term category (e.g. GO Process, KEGG pathways)}
\item{term}{enriched term (GO term, domain or pathway)}
\item{number_of_genes}{number of genes in your input list with the term
assigned}
\item{ratio_in_set}{ratio of the proteins in your input list with the
term assigned}
\item{ncbiTaxonId}{NCBI taxon identifier}
\item{inputGenes}{gene names from your input}
\item{preferredNames}{common protein names (in the same order as your
input Genes)}
\item{description}{description of the enriched term}
}
}
\description{
Gets the functional annotation (Gene Ontology, UniProt Keywords, PFAM,
INTERPRO and SMART domains) of your list of proteins.
}
\examples{
\dontrun{
# make a functional_annotation request
functional_annotation(identifiers = 'cdk1')
}
}
\seealso{
\code{\link{get_string_ids}}
\code{\link{network}}
\code{\link{interaction_partners}}
\code{\link{homology}}
\code{\link{homology_best}}
\code{\link{enrichment}}
\code{\link{ppi_enrichment}}
Other API methods: \code{\link{enrichment}},
\code{\link{get_string_ids}},
\code{\link{homology_best}}, \code{\link{homology}},
\code{\link{interaction_partners}},
\code{\link{network}}, \code{\link{ppi_enrichment}}
}
\concept{API methods}
|
# context = NULL means no context
get_features_for_continuations <- function(funs,
context,
continuations,
force_vectorise = TRUE) {
purrr::map(funs,
get_feature_for_continuation,
context,
continuations,
force_vectorise) %>%
tibble::as_tibble()
}
get_feature_for_continuation <- function(fun, context, continuations, force_vectorise) {
if (seqopt::is_context_sensitive(fun))
gffc_context_sensitive(fun, context, continuations, force_vectorise) else
gffc_context_insensitive(fun, continuations)
}
gffc_context_insensitive <- function(fun, continuations) {
purrr::map_dbl(continuations, ~ as.numeric(fun(.)))
}
gffc_context_sensitive <- function(fun, context, continuations, force_vectorise) {
if (is.null(context))
gffc_cs_no_context(continuations) else
gffc_cs_with_context(fun, context, continuations, force_vectorise)
}
gffc_cs_no_context <- function(continuations) {
rep(as.numeric(NA), times = length(continuations))
}
gffc_cs_with_context <- function(fun, context, continuations, force_vectorise) {
if (seqopt::is_vectorised(fun))
gffc_cs_wc_vectorised(fun, context, continuations) else
gffc_cs_wc_unvectorised(fun, context, continuations, force_vectorise)
}
gffc_cs_wc_unvectorised <- function(fun, context, continuations, force_vectorise) {
if (force_vectorise) {
stop("if force_vectorise is TRUE, all context-sensitive functions ",
"must be vectorised")
} else {
purrr::map_dbl(continuations, ~ as.numeric(fun(context, .)))
}
}
gffc_cs_wc_vectorised <- function(fun, context, continuations) {
if (seqopt::is_symmetric(fun)) {
fun(continuations, context)
} else if (seqopt::has_reverse(fun)) {
fun(continuations, context, reverse = TRUE)
} else stop("cannot use a vectorised cost function that is neither symmetric ",
"nor has a reverse option")
}
|
/R/analyse-continuations.R
|
permissive
|
pmcharrison/voicer
|
R
| false
| false
| 2,040
|
r
|
# context = NULL means no context
get_features_for_continuations <- function(funs,
context,
continuations,
force_vectorise = TRUE) {
purrr::map(funs,
get_feature_for_continuation,
context,
continuations,
force_vectorise) %>%
tibble::as_tibble()
}
get_feature_for_continuation <- function(fun, context, continuations, force_vectorise) {
if (seqopt::is_context_sensitive(fun))
gffc_context_sensitive(fun, context, continuations, force_vectorise) else
gffc_context_insensitive(fun, continuations)
}
gffc_context_insensitive <- function(fun, continuations) {
purrr::map_dbl(continuations, ~ as.numeric(fun(.)))
}
gffc_context_sensitive <- function(fun, context, continuations, force_vectorise) {
if (is.null(context))
gffc_cs_no_context(continuations) else
gffc_cs_with_context(fun, context, continuations, force_vectorise)
}
gffc_cs_no_context <- function(continuations) {
rep(as.numeric(NA), times = length(continuations))
}
gffc_cs_with_context <- function(fun, context, continuations, force_vectorise) {
if (seqopt::is_vectorised(fun))
gffc_cs_wc_vectorised(fun, context, continuations) else
gffc_cs_wc_unvectorised(fun, context, continuations, force_vectorise)
}
gffc_cs_wc_unvectorised <- function(fun, context, continuations, force_vectorise) {
if (force_vectorise) {
stop("if force_vectorise is TRUE, all context-sensitive functions ",
"must be vectorised")
} else {
purrr::map_dbl(continuations, ~ as.numeric(fun(context, .)))
}
}
gffc_cs_wc_vectorised <- function(fun, context, continuations) {
if (seqopt::is_symmetric(fun)) {
fun(continuations, context)
} else if (seqopt::has_reverse(fun)) {
fun(continuations, context, reverse = TRUE)
} else stop("cannot use a vectorised cost function that is neither symmetric ",
"nor has a reverse option")
}
|
### formula helper
.parseformula <- function(formula, data) {
formula <- as.formula(formula)
vars <- all.vars(formula)
### class
# for transactions, class can match multiple items!
class <- vars[1]
if(is(data, "itemMatrix")) {
class_ids <- which(grepl(paste0("^", class), colnames(data)))
} else {
class_ids <- pmatch(class, colnames(data))
}
if(any(is.na(class_ids)) || length(class_ids) == 0)
stop("Cannot identify column specified as class in the formula.")
class_names <- colnames(data)[class_ids]
if(!is(data, "itemMatrix") && !is.factor(data[[class_ids]]))
stop("class variable needs to be a factor!")
### predictors
vars <- vars[-1]
if(is(data, "itemMatrix")) {
if(length(vars) == 1 && vars == ".") var_ids <- setdiff(seq(ncol(data)), class_ids)
else var_ids <- which(grepl(paste0("^", vars, collapse = "|"), colnames(data)))
} else {
if(length(vars) == 1 && vars == ".") var_ids <- setdiff(which(sapply(data, is.numeric)), class_ids)
else var_ids <- pmatch(vars, colnames(data))
}
if(any(is.na(var_ids))) stop(paste("Cannot identify term", vars[is.na(var_ids)], "in data! "))
var_names <- colnames(data)[var_ids]
list(class_ids = class_ids, class_names = class_names,
var_ids = var_ids, var_names = var_names,
formula = formula)
}
|
/R/formula.R
|
no_license
|
tylergiallanza/arulesCWAR
|
R
| false
| false
| 1,325
|
r
|
### formula helper
.parseformula <- function(formula, data) {
formula <- as.formula(formula)
vars <- all.vars(formula)
### class
# for transactions, class can match multiple items!
class <- vars[1]
if(is(data, "itemMatrix")) {
class_ids <- which(grepl(paste0("^", class), colnames(data)))
} else {
class_ids <- pmatch(class, colnames(data))
}
if(any(is.na(class_ids)) || length(class_ids) == 0)
stop("Cannot identify column specified as class in the formula.")
class_names <- colnames(data)[class_ids]
if(!is(data, "itemMatrix") && !is.factor(data[[class_ids]]))
stop("class variable needs to be a factor!")
### predictors
vars <- vars[-1]
if(is(data, "itemMatrix")) {
if(length(vars) == 1 && vars == ".") var_ids <- setdiff(seq(ncol(data)), class_ids)
else var_ids <- which(grepl(paste0("^", vars, collapse = "|"), colnames(data)))
} else {
if(length(vars) == 1 && vars == ".") var_ids <- setdiff(which(sapply(data, is.numeric)), class_ids)
else var_ids <- pmatch(vars, colnames(data))
}
if(any(is.na(var_ids))) stop(paste("Cannot identify term", vars[is.na(var_ids)], "in data! "))
var_names <- colnames(data)[var_ids]
list(class_ids = class_ids, class_names = class_names,
var_ids = var_ids, var_names = var_names,
formula = formula)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_function.R
\name{fars_map_state}
\alias{fars_map_state}
\title{Map the FARS data for a state in a given year}
\usage{
fars_map_state(state.num, year)
}
\arguments{
\item{state.num}{An integer representing the state ID}
\item{year}{A year}
}
\value{
A map object or NULL if there is no accidents to report
}
\description{
This functions map the accident data for a state-year combination
}
\details{
If the state.num is invalid an error will be thrown specifying this
If there is no data associated with the state, a message "no accidents to plot" will be shown,
a invisible NULL is returned
If there is some data, points representing where the accidents occur is shown on a map
}
\examples{
\dontrun{
fars_map_state(1, 2013)
}
}
|
/man/fars_map_state.Rd
|
permissive
|
xxxw567/CourseraRpackagesFinal
|
R
| false
| true
| 823
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_function.R
\name{fars_map_state}
\alias{fars_map_state}
\title{Map the FARS data for a state in a given year}
\usage{
fars_map_state(state.num, year)
}
\arguments{
\item{state.num}{An integer representing the state ID}
\item{year}{A year}
}
\value{
A map object or NULL if there is no accidents to report
}
\description{
This functions map the accident data for a state-year combination
}
\details{
If the state.num is invalid an error will be thrown specifying this
If there is no data associated with the state, a message "no accidents to plot" will be shown,
a invisible NULL is returned
If there is some data, points representing where the accidents occur is shown on a map
}
\examples{
\dontrun{
fars_map_state(1, 2013)
}
}
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shinydashboard)
library(mvtnorm)
library(scatterplot3d)
library(ellipse)
library(rgl)
BOUND1<-1.5
BOUND2<-1.5
ui <- dashboardPage(
dashboardHeader(title="InfoF422"),
dashboardSidebar(
sidebarMenu(
sliderInput("N",
"Number of samples:",
min = 1,
max = 1000,
value = 100,step=2),
menuItem("Univariate mixture", tabName = "Univariatemixture", icon = icon("th")),
menuItem("Bivariate mixture", tabName = "Bivariatemixture", icon = icon("th"))
)
),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "Univariatemixture",
fluidRow(
box(width=4,sliderInput("mean1","Mean1:",min = -BOUND1, max = BOUND1 ,
value = -2,step=0.1),
sliderInput("variance1","Variance1:",min = 0.5,max = 2, value = 0.75),
sliderInput("mean2","Mean2:",min = -BOUND1, max = BOUND1 ,
value = 2,step=0.1),
sliderInput("variance2","Variance2:",min = 0.5,max = 2, value = 0.75,step=0.05),
sliderInput("p1","P1:",min = 0, max = 1 ,
value = 0.5)),
box(width=6,title = "Distribution",collapsible = TRUE,plotOutput("uniPlotP"))),
fluidRow( box(width=6,title = "Data",plotOutput("uniPlotD"))
)
),
# Second tab content
tabItem(tabName = "Bivariatemixture",
fluidRow(
box(width=4,sliderInput("rot1","Rotation 1:", min = -3.14,max = 3.14, value = 0),
sliderInput("ax11","Axis1 1:",min = 0.01,max = BOUND2,value = 3,step=0.05),
sliderInput("ax21","Axis2 1:", min = 0.01, max = BOUND2, value = 0.15,step=0.05),
sliderInput("rot2","Rotation 2:", min = -3.14,max = 3.14, value = 0),
sliderInput("ax12","Axis1 2:",min = 0.01,max = BOUND2,value = 0.15,step=0.05),
sliderInput("ax22","Axis2 2:", min = 0.01, max = BOUND2, value = 3,step=0.05),
sliderInput("P1","P1:",min = 0, max = 1 ,value = 0.5),
textOutput("textB")),
#rglwidgetOutput("biPlotP")
box(width=8,title = "Distribution",collapsible = TRUE,plotOutput("biPlotP"))
),
fluidRow( box(width=12,title = "Data",plotOutput("biPlotD")))
)
)
)
) # ui
D<-NULL ## Univariate dataset
E<-NULL ## Bivariate eigenvalue matrix
server<-function(input, output,session) {
set.seed(122)
histdata <- rnorm(500)
output$uniPlotP <- renderPlot( {
input$variance1+input$variance2+input$p1
input$N
xaxis=seq(min(input$mean1,input$mean2)-BOUND1,max(input$mean1,input$mean2)+BOUND1,by=0.01)
redp=dnorm(xaxis,input$mean1,input$variance1)
greenp=dnorm(xaxis,input$mean2,input$variance2)
plot(xaxis,redp,col="red",type="l",lwd=2,ylim=c(-0.1,1.1))
lines(xaxis,greenp,col="green",lwd=2)
postp=(redp*input$p1)/(redp*input$p1+greenp*(1-input$p1))
lines(xaxis,postp,col="red",type="l",lwd=4)
lines(xaxis,0.5*(numeric(length(xaxis))+1),lwd=1)
})
output$uniPlotD <- renderPlot( {
input$variance1+input$variance2+input$p1
input$N
D1<-rnorm(input$N,input$mean1,input$variance1)
D2<-rnorm(input$N,input$mean2,input$variance2)
I1<-sample(1:input$N,round(input$p1*input$N))
I2<-sample(1:input$N,round((1-input$p1)*input$N))
D1<-D1[I1]
D2<-D2[I2]
xl=min(input$mean1,input$mean2)-BOUND1
xu=max(input$mean1,input$mean2)+BOUND1
plot(D1,0*D1,xlim=c(xl,xu),col="red")
points(D2,0.01*(numeric(length(D2))+1),xlim=c(xl,xu),col="green")
})
output$biPlotP <- renderPlot({
x <- seq(-BOUND2, BOUND2, by= .2)
y <- x
z<-array(0,dim=c(length(x),length(y)))
#th : rotation angle of the first principal axis
#ax1: length principal axis 1
#ax2: length principal axis 2
ax1<-input$ax11
th=input$rot1
ax2<-input$ax21
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(ax1, 0, 0, ax2),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
E<<-eigen(Sigma)
ax1<-input$ax12
th=input$rot2
ax2<-input$ax22
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(ax1, 0, 0, ax2),dim=c(2,2))
Sigma2<-(Rot%*%A)%*%t(Rot)
E<<-eigen(Sigma2)
for (i in 1:length(x)){
for (j in 1:length(y)){
z[i,j]<-(input$P1)*dmvnorm(c(x[i],y[j]),sigma=Sigma)+(1-input$P1)*dmvnorm(c(x[i],y[j]),sigma=Sigma2)
}
}
z[is.na(z)] <- 1
op <- par(bg = "white")
prob.z<-z
open3d(useNULL =TRUE)
bg3d("white")
material3d(col = "black")
persp3d(x, y, prob.z, aspect = c(1, 1, 0.5), col = "lightblue")
#persp(x, y, prob.z, theta = 30, phi = 30, expand = 0.5, col = "lightblue")
#scatterplot3d(x, y, prob.z) #, theta = 30, phi = 30, expand = 0.5, col = "red")
})
output$biPlotD <- renderPlot( {
th=input$rot1
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax11, 0, 0, input$ax21),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
D1=rmvnorm(input$N,sigma=Sigma)
th=input$rot2
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax12, 0, 0, input$ax22),dim=c(2,2))
Sigma2<-(Rot%*%A)%*%t(Rot)
D2=rmvnorm(input$N,sigma=Sigma2)
I1<-sample(1:input$N,round(input$P1*input$N))
I2<-sample(1:input$N,round((1-input$P1)*input$N))
D<<-rbind(D1[I1,],D2[I2,])
plot(D[,1],D[,2],xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2))
lines(ellipse(Sigma))
lines(ellipse(Sigma2))
})
output$textB <- renderText({
input$rot
input$ax1
input$ax2
paste("Eigen1=", E$values[1], "\n Eigen2=", E$values[2])
})
output$triPlotD <- renderPlot({
Rotx<-array(c(1,0,0,0, cos(input$rotx), sin(input$rotx), 0, -sin(input$rotx), cos(input$rotx)),dim=c(3,3)); #rotation matrix
Roty<-array(c(cos(input$roty), 0, -sin(input$roty), 0, 1,0, sin(input$roty), 0, cos(input$roty)),dim=c(3,3));
Rotz<-array(c(cos(input$rotz), sin(input$rotz), 0, -sin(input$rotz), cos(input$rotz),0, 0, 0, 1),dim=c(3,3));
A<-array(c(input$ax31, 0, 0, 0, input$ax32,0, 0,0,input$ax33 ),dim=c(3,3))
Rot=Rotx%*%Roty%*%Rotz
Sigma<-(Rot%*%A)%*%t(Rot)
D3=rmvnorm(round(input$N/2),sigma=Sigma)
s3d<-scatterplot3d(D3,xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2),zlim=c(-BOUND2,BOUND2),xlab="x",ylab="y",zlab="z")
D3bis=rmvnorm(round(input$N/2),sigma=Sigma)
s3d$points3d(D3bis,col="red")
})
}
shinyApp(ui, server)
|
/inst/shiny/classif.R
|
no_license
|
niuneo/gbcode
|
R
| false
| false
| 7,139
|
r
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shinydashboard)
library(mvtnorm)
library(scatterplot3d)
library(ellipse)
library(rgl)
BOUND1<-1.5
BOUND2<-1.5
ui <- dashboardPage(
dashboardHeader(title="InfoF422"),
dashboardSidebar(
sidebarMenu(
sliderInput("N",
"Number of samples:",
min = 1,
max = 1000,
value = 100,step=2),
menuItem("Univariate mixture", tabName = "Univariatemixture", icon = icon("th")),
menuItem("Bivariate mixture", tabName = "Bivariatemixture", icon = icon("th"))
)
),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "Univariatemixture",
fluidRow(
box(width=4,sliderInput("mean1","Mean1:",min = -BOUND1, max = BOUND1 ,
value = -2,step=0.1),
sliderInput("variance1","Variance1:",min = 0.5,max = 2, value = 0.75),
sliderInput("mean2","Mean2:",min = -BOUND1, max = BOUND1 ,
value = 2,step=0.1),
sliderInput("variance2","Variance2:",min = 0.5,max = 2, value = 0.75,step=0.05),
sliderInput("p1","P1:",min = 0, max = 1 ,
value = 0.5)),
box(width=6,title = "Distribution",collapsible = TRUE,plotOutput("uniPlotP"))),
fluidRow( box(width=6,title = "Data",plotOutput("uniPlotD"))
)
),
# Second tab content
tabItem(tabName = "Bivariatemixture",
fluidRow(
box(width=4,sliderInput("rot1","Rotation 1:", min = -3.14,max = 3.14, value = 0),
sliderInput("ax11","Axis1 1:",min = 0.01,max = BOUND2,value = 3,step=0.05),
sliderInput("ax21","Axis2 1:", min = 0.01, max = BOUND2, value = 0.15,step=0.05),
sliderInput("rot2","Rotation 2:", min = -3.14,max = 3.14, value = 0),
sliderInput("ax12","Axis1 2:",min = 0.01,max = BOUND2,value = 0.15,step=0.05),
sliderInput("ax22","Axis2 2:", min = 0.01, max = BOUND2, value = 3,step=0.05),
sliderInput("P1","P1:",min = 0, max = 1 ,value = 0.5),
textOutput("textB")),
#rglwidgetOutput("biPlotP")
box(width=8,title = "Distribution",collapsible = TRUE,plotOutput("biPlotP"))
),
fluidRow( box(width=12,title = "Data",plotOutput("biPlotD")))
)
)
)
) # ui
D<-NULL ## Univariate dataset
E<-NULL ## Bivariate eigenvalue matrix
server<-function(input, output,session) {
set.seed(122)
histdata <- rnorm(500)
output$uniPlotP <- renderPlot( {
input$variance1+input$variance2+input$p1
input$N
xaxis=seq(min(input$mean1,input$mean2)-BOUND1,max(input$mean1,input$mean2)+BOUND1,by=0.01)
redp=dnorm(xaxis,input$mean1,input$variance1)
greenp=dnorm(xaxis,input$mean2,input$variance2)
plot(xaxis,redp,col="red",type="l",lwd=2,ylim=c(-0.1,1.1))
lines(xaxis,greenp,col="green",lwd=2)
postp=(redp*input$p1)/(redp*input$p1+greenp*(1-input$p1))
lines(xaxis,postp,col="red",type="l",lwd=4)
lines(xaxis,0.5*(numeric(length(xaxis))+1),lwd=1)
})
output$uniPlotD <- renderPlot( {
input$variance1+input$variance2+input$p1
input$N
D1<-rnorm(input$N,input$mean1,input$variance1)
D2<-rnorm(input$N,input$mean2,input$variance2)
I1<-sample(1:input$N,round(input$p1*input$N))
I2<-sample(1:input$N,round((1-input$p1)*input$N))
D1<-D1[I1]
D2<-D2[I2]
xl=min(input$mean1,input$mean2)-BOUND1
xu=max(input$mean1,input$mean2)+BOUND1
plot(D1,0*D1,xlim=c(xl,xu),col="red")
points(D2,0.01*(numeric(length(D2))+1),xlim=c(xl,xu),col="green")
})
output$biPlotP <- renderPlot({
x <- seq(-BOUND2, BOUND2, by= .2)
y <- x
z<-array(0,dim=c(length(x),length(y)))
#th : rotation angle of the first principal axis
#ax1: length principal axis 1
#ax2: length principal axis 2
ax1<-input$ax11
th=input$rot1
ax2<-input$ax21
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(ax1, 0, 0, ax2),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
E<<-eigen(Sigma)
ax1<-input$ax12
th=input$rot2
ax2<-input$ax22
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(ax1, 0, 0, ax2),dim=c(2,2))
Sigma2<-(Rot%*%A)%*%t(Rot)
E<<-eigen(Sigma2)
for (i in 1:length(x)){
for (j in 1:length(y)){
z[i,j]<-(input$P1)*dmvnorm(c(x[i],y[j]),sigma=Sigma)+(1-input$P1)*dmvnorm(c(x[i],y[j]),sigma=Sigma2)
}
}
z[is.na(z)] <- 1
op <- par(bg = "white")
prob.z<-z
open3d(useNULL =TRUE)
bg3d("white")
material3d(col = "black")
persp3d(x, y, prob.z, aspect = c(1, 1, 0.5), col = "lightblue")
#persp(x, y, prob.z, theta = 30, phi = 30, expand = 0.5, col = "lightblue")
#scatterplot3d(x, y, prob.z) #, theta = 30, phi = 30, expand = 0.5, col = "red")
})
output$biPlotD <- renderPlot( {
th=input$rot1
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax11, 0, 0, input$ax21),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
D1=rmvnorm(input$N,sigma=Sigma)
th=input$rot2
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax12, 0, 0, input$ax22),dim=c(2,2))
Sigma2<-(Rot%*%A)%*%t(Rot)
D2=rmvnorm(input$N,sigma=Sigma2)
I1<-sample(1:input$N,round(input$P1*input$N))
I2<-sample(1:input$N,round((1-input$P1)*input$N))
D<<-rbind(D1[I1,],D2[I2,])
plot(D[,1],D[,2],xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2))
lines(ellipse(Sigma))
lines(ellipse(Sigma2))
})
output$textB <- renderText({
input$rot
input$ax1
input$ax2
paste("Eigen1=", E$values[1], "\n Eigen2=", E$values[2])
})
output$triPlotD <- renderPlot({
Rotx<-array(c(1,0,0,0, cos(input$rotx), sin(input$rotx), 0, -sin(input$rotx), cos(input$rotx)),dim=c(3,3)); #rotation matrix
Roty<-array(c(cos(input$roty), 0, -sin(input$roty), 0, 1,0, sin(input$roty), 0, cos(input$roty)),dim=c(3,3));
Rotz<-array(c(cos(input$rotz), sin(input$rotz), 0, -sin(input$rotz), cos(input$rotz),0, 0, 0, 1),dim=c(3,3));
A<-array(c(input$ax31, 0, 0, 0, input$ax32,0, 0,0,input$ax33 ),dim=c(3,3))
Rot=Rotx%*%Roty%*%Rotz
Sigma<-(Rot%*%A)%*%t(Rot)
D3=rmvnorm(round(input$N/2),sigma=Sigma)
s3d<-scatterplot3d(D3,xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2),zlim=c(-BOUND2,BOUND2),xlab="x",ylab="y",zlab="z")
D3bis=rmvnorm(round(input$N/2),sigma=Sigma)
s3d$points3d(D3bis,col="red")
})
}
shinyApp(ui, server)
|
if(!require('sqldf')){
install.packages('sqldf')
}
library(sqldf)
colclasses = c("character", "character", rep("numeric",7))
sql <- "SELECT * FROM file WHERE Date='1/2/2007' OR Date='2/2/2007'"
data <- read.csv.sql("household_power_consumption.txt", sql, sep=';',
colClasses=colclasses, header = T)
# Convert ? to NA since read.csv.sql function doesn't have na.strings var
data[data == "?"] = NA
# Combine Date and Time column into new column, convert it POSIX time
DateTime <- paste(data$Date, data$Time)
DateTime <- strptime(DateTime,"%d/%m/%Y %H:%M:%S")
data <- cbind(DateTime, data)
png(file = "plot4.png", width = 480, height = 480)
par(mfcol = c(2, 2), mar = c(5, 4, 4, 2))
with(data, plot(DateTime, Global_active_power, type="l",
ylab="Global Active Power", xlab=""))
with(data, plot(DateTime, Sub_metering_1, type="n",
ylab="Energy sub metering", xlab=""))
with(data, points(DateTime, Sub_metering_1, type = "l"))
with(data, points(DateTime, Sub_metering_2, type = "l", col = "Red"))
with(data, points(DateTime, Sub_metering_3, type = "l", col = "Blue"))
legend("topright", bty = "n", lwd = 1,
col = c("Black", "Red", "Blue"), legend = names(data)[8:10])
with(data, plot(DateTime, Voltage, type="l", xlab="datetime"))
with(data, plot(DateTime, Global_reactive_power, type="l", xlab="datetime"))
dev.off()
|
/plot4.R
|
no_license
|
sven700c/ExData_Plotting1
|
R
| false
| false
| 1,399
|
r
|
if(!require('sqldf')){
install.packages('sqldf')
}
library(sqldf)
colclasses = c("character", "character", rep("numeric",7))
sql <- "SELECT * FROM file WHERE Date='1/2/2007' OR Date='2/2/2007'"
data <- read.csv.sql("household_power_consumption.txt", sql, sep=';',
colClasses=colclasses, header = T)
# Convert ? to NA since read.csv.sql function doesn't have na.strings var
data[data == "?"] = NA
# Combine Date and Time column into new column, convert it POSIX time
DateTime <- paste(data$Date, data$Time)
DateTime <- strptime(DateTime,"%d/%m/%Y %H:%M:%S")
data <- cbind(DateTime, data)
png(file = "plot4.png", width = 480, height = 480)
par(mfcol = c(2, 2), mar = c(5, 4, 4, 2))
with(data, plot(DateTime, Global_active_power, type="l",
ylab="Global Active Power", xlab=""))
with(data, plot(DateTime, Sub_metering_1, type="n",
ylab="Energy sub metering", xlab=""))
with(data, points(DateTime, Sub_metering_1, type = "l"))
with(data, points(DateTime, Sub_metering_2, type = "l", col = "Red"))
with(data, points(DateTime, Sub_metering_3, type = "l", col = "Blue"))
legend("topright", bty = "n", lwd = 1,
col = c("Black", "Red", "Blue"), legend = names(data)[8:10])
with(data, plot(DateTime, Voltage, type="l", xlab="datetime"))
with(data, plot(DateTime, Global_reactive_power, type="l", xlab="datetime"))
dev.off()
|
################ plotting distributions to compare GO and cluster #################
setwd("~/ferdig_rotation/regulon_validation/original_nets/consensus_network/clustering_output/")
library(igraph)
###################### GO distribution ##########################
###lets first load in the GO file and get the dist for number of genes with GO temrs with 1 gene, 2 genes, ... n genes
GO_data <- read.csv("../../GO_file/PID_6_10_NEW.csv", as.is=T)
GO_data[1:5,1:5]
row.names(GO_data) <- GO_data[,1]
GO_data[1:5,1:5]
GO_data1 <- GO_data[,-1]
GO_data1[1:5,1:5]
###now lets take only the rows (genes) that are also in the consensus network
#read in edgelist, convert to graph, get node names
cons_el <- read.csv("../consensus_edges.csv", as.is = T)
cons_graph <- graph_from_edgelist(as.matrix(cons_el[,c(1,2)]), directed=F)
cons_nodes <- V(cons_graph)$name
#find the rows of the GO matrix that intersect the consensus nodes
GO_cons <- GO_data1[row.names(GO_data1) %in% cons_nodes,]
###now we want to get the column sums - how many genes are in GO1, GO2, GO3 ...
go_sizes <- colSums(GO_cons)
#remove any zeros because we don't care about these terms - they aren't in the network
final_go_sizes <- go_sizes[go_sizes > 0]
#check what the mx is
max(final_go_sizes) #109
hist(final_go_sizes, breaks = seq(0,109,by=1), col="cadetblue2", xlab = "Number of genes (k)", ylab = "Number of GO terms with k genes", main="GO Term Size Distribution")
length(which(final_go_sizes == 1))
length(which(final_go_sizes == 2))
############# now lets look at some of our clustering results ##############
#lets do 1.9 (most GO terms) and 3.1 (highest LOO precision)
cluster_data <- read.csv("my_format/consnet_MCL_i1.9_KM.csv")
my_table <- table(cluster_data[,2])
tail(sort(my_table), 5)
max(my_table) #235
hist(my_table, breaks = seq(0,235,by=1), col="cadetblue2", xlab = "Number of genes (k)", ylab = "Number of clusters with k genes", main="Cluster Size Distribution (MCL i=1.9)")
#now for i=3.1
cluster_data <- read.csv("my_format/consnet_MCL_i3.1_KM.csv")
my_table <- table(cluster_data[,2])
max(my_table) #814
my_table2 <- sort(my_table)
tail(sort(my_table), 5)
hist(my_table2[1:373], breaks = seq(0,39,by=1), col="cadetblue2", xlab = "Number of genes (k)", ylab = "Number of clusters with k genes", main="Cluster Size Distribution (MCL i=3.1)")
|
/network_validation_codes/GO_cluster_distributions.R
|
no_license
|
katiemeis/code_gradlab
|
R
| false
| false
| 2,390
|
r
|
################ plotting distributions to compare GO and cluster #################
setwd("~/ferdig_rotation/regulon_validation/original_nets/consensus_network/clustering_output/")
library(igraph)
###################### GO distribution ##########################
###lets first load in the GO file and get the dist for number of genes with GO temrs with 1 gene, 2 genes, ... n genes
GO_data <- read.csv("../../GO_file/PID_6_10_NEW.csv", as.is=T)
GO_data[1:5,1:5]
row.names(GO_data) <- GO_data[,1]
GO_data[1:5,1:5]
GO_data1 <- GO_data[,-1]
GO_data1[1:5,1:5]
###now lets take only the rows (genes) that are also in the consensus network
#read in edgelist, convert to graph, get node names
cons_el <- read.csv("../consensus_edges.csv", as.is = T)
cons_graph <- graph_from_edgelist(as.matrix(cons_el[,c(1,2)]), directed=F)
cons_nodes <- V(cons_graph)$name
#find the rows of the GO matrix that intersect the consensus nodes
GO_cons <- GO_data1[row.names(GO_data1) %in% cons_nodes,]
###now we want to get the column sums - how many genes are in GO1, GO2, GO3 ...
go_sizes <- colSums(GO_cons)
#remove any zeros because we don't care about these terms - they aren't in the network
final_go_sizes <- go_sizes[go_sizes > 0]
#check what the mx is
max(final_go_sizes) #109
hist(final_go_sizes, breaks = seq(0,109,by=1), col="cadetblue2", xlab = "Number of genes (k)", ylab = "Number of GO terms with k genes", main="GO Term Size Distribution")
length(which(final_go_sizes == 1))
length(which(final_go_sizes == 2))
############# now lets look at some of our clustering results ##############
#lets do 1.9 (most GO terms) and 3.1 (highest LOO precision)
cluster_data <- read.csv("my_format/consnet_MCL_i1.9_KM.csv")
my_table <- table(cluster_data[,2])
tail(sort(my_table), 5)
max(my_table) #235
hist(my_table, breaks = seq(0,235,by=1), col="cadetblue2", xlab = "Number of genes (k)", ylab = "Number of clusters with k genes", main="Cluster Size Distribution (MCL i=1.9)")
#now for i=3.1
cluster_data <- read.csv("my_format/consnet_MCL_i3.1_KM.csv")
my_table <- table(cluster_data[,2])
max(my_table) #814
my_table2 <- sort(my_table)
tail(sort(my_table), 5)
hist(my_table2[1:373], breaks = seq(0,39,by=1), col="cadetblue2", xlab = "Number of genes (k)", ylab = "Number of clusters with k genes", main="Cluster Size Distribution (MCL i=3.1)")
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Developing Data Producs Course Project"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
#Slope of the linear model calculated
h3("Slope"),
textOutput("slopeOutput"),
#intercept of the linear model calculated
h3("Intercept"),
textOutput("intOutput"),
hr(),
#Text input for the x variable in the tree data
textInput("textX",label = 'Type the X field for the plot, between "Girth", "Height", "Volume"', value="Type your text here..."),
#Text input for the y variable in the tree data
textInput("textY",label = 'Type the y field for the plot, between "Girth", "Height", "Volume"',value = "Type your text here..."),
#action button, I decided to use the actionButton because that way the program has to wait for
#the values to be entered in order to start doing the calculations and so on.
actionButton("button","Apply changes!")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Plot",br(),plotOutput("plot1", brush = brushOpts(id="brush1"))),
tabPanel("Documentation",br(),("When entering the values of x and y and hitting the apply changes button, a graph of the relationship between the two data that was entered is created, and it is also possible to select a set of points greater than 2 within the graph to get the slope and intercept of the selected data"))
)
)
)
))
|
/ui.R
|
no_license
|
EdgardoDiBello/Developing-Data-Products-Course-Project
|
R
| false
| false
| 1,831
|
r
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Developing Data Producs Course Project"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
#Slope of the linear model calculated
h3("Slope"),
textOutput("slopeOutput"),
#intercept of the linear model calculated
h3("Intercept"),
textOutput("intOutput"),
hr(),
#Text input for the x variable in the tree data
textInput("textX",label = 'Type the X field for the plot, between "Girth", "Height", "Volume"', value="Type your text here..."),
#Text input for the y variable in the tree data
textInput("textY",label = 'Type the y field for the plot, between "Girth", "Height", "Volume"',value = "Type your text here..."),
#action button, I decided to use the actionButton because that way the program has to wait for
#the values to be entered in order to start doing the calculations and so on.
actionButton("button","Apply changes!")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Plot",br(),plotOutput("plot1", brush = brushOpts(id="brush1"))),
tabPanel("Documentation",br(),("When entering the values of x and y and hitting the apply changes button, a graph of the relationship between the two data that was entered is created, and it is also possible to select a set of points greater than 2 within the graph to get the slope and intercept of the selected data"))
)
)
)
))
|
library(drake)
### Name: file_in
### Title: Declare the file inputs of a workflow plan command.
### Aliases: file_in
### ** Examples
## Not run:
##D test_with_dir("Contain side effects", {
##D # The `file_out()` and `file_in()` functions
##D # just takes in strings and returns them.
##D file_out("summaries.txt")
##D # Their main purpose is to orchestrate your custom files
##D # in your workflow plan data frame.
##D suppressWarnings(
##D plan <- drake_plan(
##D write.csv(mtcars, file_out("mtcars.csv")),
##D contents = read.csv(file_in("mtcars.csv")),
##D strings_in_dots = "literals" # deprecated but useful: no single quotes needed. # nolint
##D )
##D )
##D plan
##D # drake knows "\"mtcars.csv\"" is the first target
##D # and a dependency of `contents`. See for yourself:
##D make(plan)
##D file.exists("mtcars.csv")
##D # See also `knitr_in()`. `knitr_in()` is like `file_in()`
##D # except that it analyzes active code chunks in your `knitr`
##D # source file and detects non-file dependencies.
##D # That way, updates to the right dependencies trigger rebuilds
##D # in your report.
##D })
## End(Not run)
|
/data/genthat_extracted_code/drake/examples/file_in.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,141
|
r
|
library(drake)
### Name: file_in
### Title: Declare the file inputs of a workflow plan command.
### Aliases: file_in
### ** Examples
## Not run:
##D test_with_dir("Contain side effects", {
##D # The `file_out()` and `file_in()` functions
##D # just takes in strings and returns them.
##D file_out("summaries.txt")
##D # Their main purpose is to orchestrate your custom files
##D # in your workflow plan data frame.
##D suppressWarnings(
##D plan <- drake_plan(
##D write.csv(mtcars, file_out("mtcars.csv")),
##D contents = read.csv(file_in("mtcars.csv")),
##D strings_in_dots = "literals" # deprecated but useful: no single quotes needed. # nolint
##D )
##D )
##D plan
##D # drake knows "\"mtcars.csv\"" is the first target
##D # and a dependency of `contents`. See for yourself:
##D make(plan)
##D file.exists("mtcars.csv")
##D # See also `knitr_in()`. `knitr_in()` is like `file_in()`
##D # except that it analyzes active code chunks in your `knitr`
##D # source file and detects non-file dependencies.
##D # That way, updates to the right dependencies trigger rebuilds
##D # in your report.
##D })
## End(Not run)
|
### load libraries
library(dplyr)
#load datasets
url <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'
#set local directory
dest <- '~/Documents/5th Year/Getting and Cleaning Data/UCI.zip'
#download
download.file(url,dest,method='curl')
#assign tables
features <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/features.txt',col.names=c('count','functions'))
activity <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/activity_labels.txt',col.names=c('code','activity'))
subject_test <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/subject_test.txt', col.names = 'subject')
x_test <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/X_test.txt', col.names = features$functions)
y_test <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/y_test.txt', col.names = "code")
subject_train <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/subject_train.txt', col.names = "subject")
x_train <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/X_train.txt', col.names = features$functions)
y_train <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/y_train.txt', col.names = "code")
#now they're all read into our environment
###Step 1: Merge into one dataset
#merge the Xs (both test and train)
X_data <- rbind(x_test,x_train)
#merge the ys
y_data <- rbind(y_test,y_train)
#merge the subject
subject_data <- rbind(subject_test,subject_train)
#put it all together!
df <- cbind(X_data,y_data,subject_data)
### Step 2: Select Mean and Standard Deviation measures
#now we need to use the select function
#only want mean and std
df_tidier <- select(df,subject,code,contains('mean'),contains('std'))
### Steps 3 and 4: tidy up the variables
df_tidier$code <- activity[df_tidier$code, 2]
#ones starting with t should be
names(df_tidier) <- gsub("^t", "Time", names(df_tidier))
#starting with f go to Freq
names(df_tidier) <- gsub("^f", "Freq", names(df_tidier))
#no double body
names(df_tidier) <- gsub("^BodyBody", "Body", names(df_tidier))
#set all to lowercase
names(df_tidier) <- tolower(names(df_tidier))
### Step 5: New dataset with means
df_tidiest <- df_tidier %>% group_by(subject,code) %>%
summarize_all(funs(mean))
|
/run_analysis.R
|
no_license
|
klydon1/cleaningdata
|
R
| false
| false
| 2,307
|
r
|
### load libraries
library(dplyr)
#load datasets
url <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'
#set local directory
dest <- '~/Documents/5th Year/Getting and Cleaning Data/UCI.zip'
#download
download.file(url,dest,method='curl')
#assign tables
features <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/features.txt',col.names=c('count','functions'))
activity <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/activity_labels.txt',col.names=c('code','activity'))
subject_test <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/subject_test.txt', col.names = 'subject')
x_test <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/X_test.txt', col.names = features$functions)
y_test <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/y_test.txt', col.names = "code")
subject_train <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/subject_train.txt', col.names = "subject")
x_train <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/X_train.txt', col.names = features$functions)
y_train <- read.table('~/Documents/5th Year/Getting and Cleaning Data/UCI/y_train.txt', col.names = "code")
#now they're all read into our environment
###Step 1: Merge into one dataset
#merge the Xs (both test and train)
X_data <- rbind(x_test,x_train)
#merge the ys
y_data <- rbind(y_test,y_train)
#merge the subject
subject_data <- rbind(subject_test,subject_train)
#put it all together!
df <- cbind(X_data,y_data,subject_data)
### Step 2: Select Mean and Standard Deviation measures
#now we need to use the select function
#only want mean and std
df_tidier <- select(df,subject,code,contains('mean'),contains('std'))
### Steps 3 and 4: tidy up the variables
df_tidier$code <- activity[df_tidier$code, 2]
#ones starting with t should be
names(df_tidier) <- gsub("^t", "Time", names(df_tidier))
#starting with f go to Freq
names(df_tidier) <- gsub("^f", "Freq", names(df_tidier))
#no double body
names(df_tidier) <- gsub("^BodyBody", "Body", names(df_tidier))
#set all to lowercase
names(df_tidier) <- tolower(names(df_tidier))
### Step 5: New dataset with means
df_tidiest <- df_tidier %>% group_by(subject,code) %>%
summarize_all(funs(mean))
|
#' @importFrom rlang error_cnd
stop <- function(
message = "",
exit_code = 1) {
base::stop(error_cnd(.subclass = exit_code, message = message))
}
|
/R/stop.R
|
no_license
|
slkarkar/RGCCA
|
R
| false
| false
| 169
|
r
|
#' @importFrom rlang error_cnd
stop <- function(
message = "",
exit_code = 1) {
base::stop(error_cnd(.subclass = exit_code, message = message))
}
|
testlist <- list(Beta = 0, CVLinf = -2.36101987400524e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615827869-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 487
|
r
|
testlist <- list(Beta = 0, CVLinf = -2.36101987400524e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isNumberOrNanVectorOrNull.R
\name{isNumberOrNanVectorOrNull}
\alias{isNumberOrNanVectorOrNull}
\title{Wrapper for the checkarg function, using specific parameter settings.}
\usage{
isNumberOrNanVectorOrNull(argument, default = NULL, stopIfNot = FALSE,
n = NA, message = NULL, argumentName = NULL)
}
\arguments{
\item{argument}{See checkarg function.}
\item{default}{See checkarg function.}
\item{stopIfNot}{See checkarg function.}
\item{n}{See checkarg function.}
\item{message}{See checkarg function.}
\item{argumentName}{See checkarg function.}
}
\value{
See checkarg function.
}
\description{
This function can be used in 3 ways:\enumerate{
\item Return TRUE or FALSE depending on whether the argument checks are
passed. This is suitable e.g. for if statements that take further action
if the argument does not pass the checks.\cr
\item Throw an exception if the argument does not pass the checks. This is
suitable e.g. when no further action needs to be taken other than
throwing an exception if the argument does not pass the checks.\cr
\item Same as (2) but by supplying a default value, a default can be assigned
in a single statement, when the argument is NULL. The checks are still
performed on the returned value, and an exception is thrown when not
passed.\cr
}
}
\details{
Actual call to checkarg: checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = NA, zeroAllowed = TRUE, negativeAllowed = TRUE, positiveAllowed = TRUE, nonIntegerAllowed = TRUE, naAllowed = FALSE, nanAllowed = TRUE, infAllowed = FALSE, message = message, argumentName = argumentName)
}
\examples{
isNumberOrNanVectorOrNull(2)
# returns TRUE (argument is valid)
isNumberOrNanVectorOrNull("X")
# returns FALSE (argument is invalid)
#isNumberOrNanVectorOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isNumberOrNanVectorOrNull(2, default = 1)
# returns 2 (the argument, rather than the default, since it is not NULL)
#isNumberOrNanVectorOrNull("X", default = 1)
# throws exception with message defined by message and argumentName parameters
isNumberOrNanVectorOrNull(NULL, default = 1)
# returns 1 (the default, rather than the argument, since it is NULL)
}
|
/man/isNumberOrNanVectorOrNull.Rd
|
no_license
|
cran/checkarg
|
R
| false
| true
| 2,438
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isNumberOrNanVectorOrNull.R
\name{isNumberOrNanVectorOrNull}
\alias{isNumberOrNanVectorOrNull}
\title{Wrapper for the checkarg function, using specific parameter settings.}
\usage{
isNumberOrNanVectorOrNull(argument, default = NULL, stopIfNot = FALSE,
n = NA, message = NULL, argumentName = NULL)
}
\arguments{
\item{argument}{See checkarg function.}
\item{default}{See checkarg function.}
\item{stopIfNot}{See checkarg function.}
\item{n}{See checkarg function.}
\item{message}{See checkarg function.}
\item{argumentName}{See checkarg function.}
}
\value{
See checkarg function.
}
\description{
This function can be used in 3 ways:\enumerate{
\item Return TRUE or FALSE depending on whether the argument checks are
passed. This is suitable e.g. for if statements that take further action
if the argument does not pass the checks.\cr
\item Throw an exception if the argument does not pass the checks. This is
suitable e.g. when no further action needs to be taken other than
throwing an exception if the argument does not pass the checks.\cr
\item Same as (2) but by supplying a default value, a default can be assigned
in a single statement, when the argument is NULL. The checks are still
performed on the returned value, and an exception is thrown when not
passed.\cr
}
}
\details{
Actual call to checkarg: checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = NA, zeroAllowed = TRUE, negativeAllowed = TRUE, positiveAllowed = TRUE, nonIntegerAllowed = TRUE, naAllowed = FALSE, nanAllowed = TRUE, infAllowed = FALSE, message = message, argumentName = argumentName)
}
\examples{
isNumberOrNanVectorOrNull(2)
# returns TRUE (argument is valid)
isNumberOrNanVectorOrNull("X")
# returns FALSE (argument is invalid)
#isNumberOrNanVectorOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isNumberOrNanVectorOrNull(2, default = 1)
# returns 2 (the argument, rather than the default, since it is not NULL)
#isNumberOrNanVectorOrNull("X", default = 1)
# throws exception with message defined by message and argumentName parameters
isNumberOrNanVectorOrNull(NULL, default = 1)
# returns 1 (the default, rather than the argument, since it is NULL)
}
|
#'@title createSavedPlot
#'@description Save a plot to BG.library/data/plotList. These plots can be executed with
#'either `executeSavePlot()` or `shinyPlot()`
#'@param libraryPath character string path to BG.library code
#'@param plotName character string to assign as name of saved plot
#'@param plotType character string indicating which plotting function to use,
#'options include 'plotLine_ly','summaryPlot_ly', and 'heatMap_ly'
#'@param description character string description of saved plot, will be added to plot as text
#'@param paramList list of all parameters needed to execute the plot
#'@param default TRUE/FALSE make this plot part of the default list of plots
#'allowing restoration of the original default plotList, by saving only defualt plots back
#'to the plotList
#'@examples
#'plotName<-"meanSGheat_hist"
#'plotType<-"heatMap_ly"
#'description<-"Heat map of mean hourly SG values per day with histogram of groups."
#'paramList<-list(brks = c(0,50,80,150,240,300,400,500),
#' brewerPallete = "RdBu",
#' revPallete = TRUE,
#' textCol = "black",
#' tcol = "time2",
#' dcol = "Date2",
#' valueVar = "Sensor.Glucose..mg.dL.",
#' sumFunc = "mean",
#' naRemove = TRUE,
#' includeTotals = TRUE,
#' filterCond = "")
#'
#'createSavedPlot(libraryPath, plotName,plotType,description, paramList)
createSavedPlot<-function(libraryPath, plotName,plotType,
description, paramList, default = TRUE){
#file path to plotList
plotListFile<-paste0(libraryPath,"/data/plotList")
#load plotList
load(file = plotListFile)
#create new plot for list
plotListSub<-list(plotType = plotType,
description = description,
paramList = paramList,
default = default)
eval(parse(text = paste0(plotName,"<-plotListSub")))
eval(parse(text = paste0("plotList$",plotName,"<-",plotName)))
#save updated plotList
save(file = plotListFile,plotList)
}
|
/BG.library/R/createSavedPlot.R
|
no_license
|
rscmbc3/BG.library
|
R
| false
| false
| 2,109
|
r
|
#'@title createSavedPlot
#'@description Save a plot to BG.library/data/plotList. These plots can be executed with
#'either `executeSavePlot()` or `shinyPlot()`
#'@param libraryPath character string path to BG.library code
#'@param plotName character string to assign as name of saved plot
#'@param plotType character string indicating which plotting function to use,
#'options include 'plotLine_ly','summaryPlot_ly', and 'heatMap_ly'
#'@param description character string description of saved plot, will be added to plot as text
#'@param paramList list of all parameters needed to execute the plot
#'@param default TRUE/FALSE make this plot part of the default list of plots
#'allowing restoration of the original default plotList, by saving only defualt plots back
#'to the plotList
#'@examples
#'plotName<-"meanSGheat_hist"
#'plotType<-"heatMap_ly"
#'description<-"Heat map of mean hourly SG values per day with histogram of groups."
#'paramList<-list(brks = c(0,50,80,150,240,300,400,500),
#' brewerPallete = "RdBu",
#' revPallete = TRUE,
#' textCol = "black",
#' tcol = "time2",
#' dcol = "Date2",
#' valueVar = "Sensor.Glucose..mg.dL.",
#' sumFunc = "mean",
#' naRemove = TRUE,
#' includeTotals = TRUE,
#' filterCond = "")
#'
#'createSavedPlot(libraryPath, plotName,plotType,description, paramList)
createSavedPlot<-function(libraryPath, plotName,plotType,
description, paramList, default = TRUE){
#file path to plotList
plotListFile<-paste0(libraryPath,"/data/plotList")
#load plotList
load(file = plotListFile)
#create new plot for list
plotListSub<-list(plotType = plotType,
description = description,
paramList = paramList,
default = default)
eval(parse(text = paste0(plotName,"<-plotListSub")))
eval(parse(text = paste0("plotList$",plotName,"<-",plotName)))
#save updated plotList
save(file = plotListFile,plotList)
}
|
#' Generic function for extracting the right-hand side from a model
#'
#' @keywords internal
#'
#' @param model A fitted model
#' @param \dots additional arguments passed to the specific extractor
#' @noRd
extract_rhs <- function(model, ...) {
UseMethod("extract_rhs", model)
}
#' Extract right-hand side
#'
#' Extract a data frame with list columns for the primary terms and subscripts
#' from all terms in the model
#'
#' @keywords internal
#'
#' @param model A fitted model
#'
#' @return A list with one element per future equation term. Term components
#' like subscripts are nested inside each list element. List elements with two
#' or more terms are interactions.
#' @noRd
#' @export
#' @examples
#' \dontrun{
#' library(palmerpenguins)
#' mod1 <- lm(body_mass_g ~ bill_length_mm + species * flipper_length_mm, penguins)
#'
#' extract_rhs(mod1)
#' # > # A tibble: 7 x 8
#' # > term estimate ... primary subscripts
#' # > 1 (Intercept) -3341.615846 ...
#' # > 2 bill_length_mm 59.304539 ... bill_length_mm
#' # > 3 speciesChinstrap -27.292519 ... species Chinstrap
#' # > 4 speciesGentoo -2215.913323 ... species Gentoo
#' # > 5 flipper_length_mm 24.962788 ... flipper_length_mm
#' # > 6 speciesChinstrap:flipper_length_mm -3.484628 ... flipper_length_mm Chinstrap,
#' # > 7 speciesGentoo:flipper_length_mm 11.025972 ... flipper_length_mm Gentoo,
#'
#' str(extract_rhs(mod1))
#' # > Classes ‘lm’ and 'data.frame': 7 obs. of 8 variables:
#' # > $ term : chr "(Intercept)" "bill_length_mm" "speciesChinstrap" "speciesGentoo" ...
#' # > $ estimate : num -3341.6 59.3 -27.3 -2215.9 25 ...
#' # > $ std.error : num 810.14 7.25 1394.17 1328.58 4.34 ...
#' # > $ statistic : num -4.1247 8.1795 -0.0196 -1.6679 5.7534 ...
#' # > $ p.value : num 4.69e-05 5.98e-15 9.84e-01 9.63e-02 1.97e-08 ...
#' # > $ split :List of 7
#' # > ..$ : chr "(Intercept)"
#' # > ..$ : chr "bill_length_mm"
#' # > ..$ : chr "speciesChinstrap"
#' # > ..$ : chr "speciesGentoo"
#' # > ..$ : chr "flipper_length_mm"
#' # > ..$ : chr "speciesChinstrap" "flipper_length_mm"
#' # > ..$ : chr "speciesGentoo" "flipper_length_mm"
#' # > $ primary :List of 7
#' # > ..$ : chr
#' # > ..$ : chr "bill_length_mm"
#' # > ..$ : chr "species"
#' # > ..$ : chr "species"
#' # > ..$ : chr "flipper_length_mm"
#' # > ..$ : chr "species" "flipper_length_mm"
#' # > ..$ : chr "species" "flipper_length_mm"
#' # > $ subscripts:List of 7
#' # > ..$ : chr ""
#' # > ..$ : chr ""
#' # > ..$ : chr "Chinstrap"
#' # > ..$ : chr "Gentoo"
#' # > ..$ : chr ""
#' # > ..$ : Named chr "Chinstrap" ""
#' # > .. ..- attr(*, "names")= chr [1:2] "species" "flipper_length_mm"
#' # > ..$ : Named chr "Gentoo" ""
#' # > .. ..- attr(*, "names")= chr [1:2] "species" "flipper_length_mm"
#' }
#'
extract_rhs.default <- function(model, index_factors) {
# Extract RHS from formula
formula_rhs <- labels(terms(formula(model)))
# Extract unique (primary) terms from formula (no interactions)
formula_rhs_terms <- formula_rhs[!grepl(":", formula_rhs)]
# Extract coefficient names and values from model
full_rhs <- broom::tidy(model)
# Split interactions split into character vectors
full_rhs$split <- strsplit(full_rhs$term, ":")
full_rhs$primary <- extract_primary_term(
formula_rhs_terms,
full_rhs$term
)
full_rhs$subscripts <- extract_all_subscripts(
full_rhs$primary,
full_rhs$split
)
if (index_factors) {
full_rhs <- distinct(full_rhs, "primary")
unique_ss <- unique(unlist(full_rhs$subscripts))
unique_ss <- unique_ss[vapply(unique_ss, nchar, FUN.VALUE = integer(1)) > 0]
replacement_ss <- letters[seq(9, (length(unique_ss) + 8))]
full_rhs$subscripts <- lapply(full_rhs$subscripts, function(x) {
out <- replacement_ss[match(x, unique_ss)]
ifelse(is.na(out), "", out)
})
}
class(full_rhs) <- c("data.frame", class(model))
full_rhs
}
#' @noRd
#' @export
extract_rhs.lmerMod <- function(model, return_variances) {
# Extract RHS from formula
formula_rhs <- labels(terms(formula(model)))
# Extract unique (primary) terms from formula (no interactions)
formula_rhs_terms <- formula_rhs[!grepl(":", formula_rhs)]
formula_rhs_terms <- gsub("^`?(.+)`$?", "\\1", formula_rhs_terms)
# Extract coefficient names and values from model
if(return_variances) {
full_rhs <- broom.mixed::tidy(model, scales = c("vcov", NA))
# Make the names like they are sdcor, so it doesn't break other code
full_rhs$term <- gsub("var__", "sd__", full_rhs$term)
full_rhs$term <- gsub("cov__", "cor__", full_rhs$term)
} else {
full_rhs <- broom.mixed::tidy(model)
}
full_rhs$term <- vapply(full_rhs$term, order_interaction,
FUN.VALUE = character(1)
)
full_rhs$group <- recode_groups(full_rhs)
full_rhs$original_order <- seq_len(nrow(full_rhs))
full_rhs$term <- gsub("^`?(.+)`$?", "\\1", full_rhs$term)
# Split interactions split into character vectors
full_rhs$split <- strsplit(full_rhs$term, ":")
full_rhs$primary <- lapply(full_rhs$term, function(x) "")
full_rhs$primary[full_rhs$effect == "fixed"] <- extract_primary_term(
formula_rhs_terms,
full_rhs$term[full_rhs$effect == "fixed"]
)
# make sure split and primary are in the same order
full_rhs$primary[full_rhs$effect == "fixed"] <- Map(
function(prim, splt) {
ord <- vapply(prim, function(x) grep(x, splt, fixed = TRUE), FUN.VALUE = integer(1))
names(sort(ord))
},
full_rhs$primary[full_rhs$effect == "fixed"],
full_rhs$split[full_rhs$effect == "fixed"]
)
full_rhs$subscripts <- lapply(full_rhs$term, function(x) "")
full_rhs$subscripts[full_rhs$effect == "fixed"] <- extract_all_subscripts(
full_rhs$primary[full_rhs$effect == "fixed"],
full_rhs$split[full_rhs$effect == "fixed"]
)
group_coefs <- detect_group_coef(model, full_rhs)
all_terms <- unique(unlist(full_rhs$primary[full_rhs$effect == "fixed"]))
l1_terms <- setdiff(all_terms, names(group_coefs))
l1_terms <- setNames(rep("l1", length(l1_terms)), l1_terms)
var_levs <- c(l1_terms, group_coefs)
full_rhs$pred_level <- lapply(full_rhs$primary, function(x) {
var_levs[names(var_levs) %in% x]
})
full_rhs$pred_level[full_rhs$effect == "fixed"] <- Map(
function(predlev, splt) {
ord <- vapply(names(predlev),
function(x) grep(x, splt, fixed = TRUE),
FUN.VALUE = integer(1))
ord <- names(sort(ord))
predlev[ord]
},
full_rhs$pred_level[full_rhs$effect == "fixed"],
full_rhs$split[full_rhs$effect == "fixed"]
)
full_rhs$l1 <- vapply(full_rhs$pred_level, function(x) {
length(x) > 0 & all(x == "l1")
}, FUN.VALUE = logical(1))
full_rhs$l1 <- ifelse(full_rhs$term == "(Intercept)",
TRUE,
full_rhs$l1
)
full_rhs$crosslevel <- detect_crosslevel(
full_rhs$primary,
full_rhs$pred_level
)
class(full_rhs) <- c("data.frame", class(model))
full_rhs
}
#' @noRd
#' @export
extract_rhs.glmerMod <- function(model, ...) {
extract_rhs.lmerMod(model, ...)
}
#' Extract right-hand side of an forecast::Arima object
#'
#' Extract a dataframe of S/MA components
#'
#' @keywords internal
#'
#' @inheritParams extract_eq
#'
#' @return A dataframe
#' @noRd
extract_rhs.forecast_ARIMA <- function(model, ...) {
# RHS of ARIMA is the Moving Average side
# Consists of a Non-Seasonal MA (p), Seasonal MA (P), Seasonal Differencing.
# This is more than needed, but we"re being explicit for readability.
# Orders structure in Arima model: c(p, q, P, Q, m, d, D)
ords <- model$arma
names(ords) <- c("p", "q", "P", "Q", "m", "d", "D")
# Following the rest of the package.
# Pull the full model with broom::tidy
full_mdl <- broom::tidy(model)
# Filter down to only the MA terms and seasonal drift
full_rhs <- full_mdl[grepl("^s?ma", full_mdl$term), ]
# Add a Primary column and set it to the backshift operator.
full_rhs$primary <- "B"
# Get the superscript for the backshift operator.
## This is equal to the number on the term for MA
## and the number on the term * the seasonal frequency for SMA.
## Powers of 1 are replaced with an empty string.
rhs_super <- as.numeric(gsub("^s?ma", "", full_rhs$term))
rhs_super[grepl("^sma", full_rhs$term)] <- rhs_super[grepl("^sma", full_rhs$term)] * ords["m"]
rhs_super <- as.character(rhs_super)
full_rhs$superscript <- rhs_super
# The RHS (MA side) has no differencing.
# Previous versions of this function were erroneous
# in that it included a seasonal difference on this side.
# Reduce any "1" superscripts to not show the superscript
full_rhs[full_rhs$superscript == "1", "superscript"] <- ""
# Set subscripts so that create_term works later
full_rhs$subscripts <- ""
# Set the class
class(full_rhs) <- c(class(model), "data.frame")
# Explicit return
return(full_rhs)
}
order_interaction <- function(interaction_term) {
if (grepl("^cor__", interaction_term)) {
ran_part <- gsub("(.+\\.).+", "\\1", interaction_term)
interaction_term <- gsub(ran_part, "", interaction_term, fixed = TRUE)
} else if (grepl("^sd__", interaction_term)) {
ran_part <- "sd__"
interaction_term <- gsub(paste0("^", ran_part), "", interaction_term)
}
terms <- strsplit(interaction_term, ":")[[1]]
terms_ordered <- sort(terms)
out <- paste0(terms_ordered, collapse = ":")
if (exists("ran_part")) {
# check/handle if there's an interaction in the random part
# sd or cor
type <- gsub("(^.+__).+", "\\1", ran_part)
# remove type and period at end
ran <- gsub(type, "", ran_part)
ran <- gsub("\\.$", "", ran)
# handle interaction (if present)
ran <- strsplit(ran, ":")[[1]]
ran <- paste0(sort(ran), collapse = ":")
# paste it all back together
if (grepl("^cor", ran_part)) {
out <- paste0(type, ran, ".", out)
} else {
out <- paste0(type, ran, out)
}
}
out
}
recode_groups <- function(rhs) {
rhs_splt <- split(rhs, rhs$group)
rhs_splt <- rhs_splt[!grepl("Residual", names(rhs_splt))]
names_collapsed <- collapse_groups(names(rhs_splt))
intercept_vary <- vapply(rhs_splt, function(x) {
any(grepl("sd__(Intercept)", x$term, fixed = TRUE))
}, FUN.VALUE = logical(1))
check <- split(intercept_vary, names_collapsed)
# collapse these groups
collapse <- vapply(check, all, FUN.VALUE = logical(1))
collapse_term <- function(term, v) {
ifelse(grepl(term, v), collapse_groups(v), v)
}
out <- rhs$group
for (i in seq_along(collapse[!collapse])) {
out <- collapse_term(names(collapse[!collapse])[i], out)
}
out
}
collapse_groups <- function(group) {
gsub("(.+)\\.\\d\\d?$", "\\1", group)
}
order_split <- function(split, pred_level) {
if (length(pred_level) == 0) {
return(pred_level)
}
var_order <- vapply(names(pred_level), function(x) {
exact <- split %in% x
detect <- grepl(x, split)
# take exact if it's there, if not take detect
if (any(exact)) {
out <- exact
} else {
out <- detect
}
seq_along(out)[out]
}, FUN.VALUE = integer(1))
split[var_order]
}
#' Pull just the random variables
#' @param rhs output from \code{extract_rhs}
#' @keywords internal
#' @noRd
extract_random_vars <- function(rhs) {
order <- rhs[rhs$group != "Residual", ]
order <- sort(tapply(order$original_order, order$group, min))
vc <- rhs[rhs$group != "Residual" & rhs$effect == "ran_pars", ]
splt <- split(vc, vc$group)[names(order)]
lapply(splt, function(x) {
vars <- x[!grepl("cor__", x$term), ]
gsub("sd__(.+)", "\\1", vars$term)
})
}
detect_crosslevel <- function(primary, pred_level) {
mapply_lgl(function(prim, predlev) {
if (length(prim) > 1) {
if (length(prim) != length(predlev)) {
TRUE
} else if (length(unique(predlev)) != 1) {
TRUE
} else {
FALSE
}
} else {
FALSE
}
},
prim = primary,
predlev = pred_level
)
}
#### Consider refactoring the below too
detect_covar_level <- function(predictor, group) {
nm <- names(group)
v <- paste(predictor, group[, 1], sep = " _|_ ")
unique_v <- unique(v)
test <- gsub(".+\\s\\_\\|\\_\\s(.+)", "\\1", unique_v)
if (all(!duplicated(test))) {
return(nm)
}
}
detect_X_level <- function(X, group) {
lapply(X, detect_covar_level, group)
}
collapse_list <- function(x, y) {
null_x <- vapply(x, function(x) {
if (any(is.null(x))) {
return(is.null(x))
} else {
return(is.na(x))
}
}, FUN.VALUE = logical(1))
null_y <- vapply(y, function(x) {
if (any(is.null(x))) {
return(is.null(x))
} else {
return(is.na(x))
}
}, FUN.VALUE = logical(1))
y[null_x & !null_y] <- y[null_x & !null_y]
y[!null_x & null_y] <- x[!null_x & null_y]
y[!null_x & !null_y] <- x[!null_x & !null_y]
unlist(lapply(y, function(x) ifelse(is.null(x), NA_character_, x)))
}
detect_group_coef <- function(model, rhs) {
outcome <- all.vars(formula(model))[1]
d <- model@frame
random_lev_names <- names(extract_random_vars(rhs))
random_levs <- unlist(strsplit(random_lev_names, ":"))
random_levs <- gsub("^\\(|\\)$", "", random_levs)
random_levs <- unique(collapse_groups(random_levs))
random_lev_ids <- d[random_levs]
ranef_order <- vapply(random_lev_ids, function(x) {
length(unique(x))
}, FUN.VALUE = numeric(1))
ranef_order <- rev(sort(ranef_order))
random_lev_ids <- random_lev_ids[, names(ranef_order), drop = FALSE]
# Make sure there are explicit ids
random_lev_ids <- make_explicit_id(random_lev_ids)
X <- d[!(names(d) %in% c(random_levs, outcome))]
lev_assign <- vector("list", length(random_levs))
for (i in seq_along(random_lev_ids)) {
lev_assign[[i]] <- detect_X_level(X, random_lev_ids[, i, drop = FALSE])
}
levs <- Reduce(collapse_list, rev(lev_assign))
# reassign acutal names (in cases where ranef contains ":")
out <- random_lev_names[match(levs, random_levs)]
names(out) <- names(levs)
unlist(out[!is.na(out)])
}
row_paste <- function(d) {
apply(d, 1, paste, collapse = "-")
}
#' Makes the grouping variables explicit, which is neccessary for
#' detecting group-level predictors
#' @param ranef_df A data frame that includes only the random
#' effect ID variables (i.e., random_lev_ids)
#' @noRd
make_explicit_id <- function(ranef_df) {
for(i in seq_along(ranef_df)[-length(ranef_df)]) {
ranef_df[[i]] <- row_paste(ranef_df[ ,i:length(ranef_df)])
}
ranef_df
}
#' Extract the primary terms from all terms
#'
#' @inheritParams detect_primary
#'
#' @keywords internal
#'
#' @param all_terms A list of all the equation terms on the right hand side,
#' usually the result of \code{broom::tidy(model, quick = TRUE)$term}.
#' @examples
#' \dontrun{
#' primaries <- c("partyid", "age", "race")
#'
#' full_terms <- c(
#' "partyidDon't know", "partyidOther party", "age",
#' "partyidNot str democrat", "age", "raceBlack", "age", "raceBlack"
#' )
#'
#' extract_primary_term(primaries, full_terms)
#' }
#' @noRd
extract_primary_term <- function(primary_term_v, all_terms) {
detected <- lapply(all_terms, detect_primary, primary_term_v)
lapply(detected, function(pull) primary_term_v[pull])
}
#' Detect if a given term is part of a vector of full terms
#'
#' @keywords internal
#'
#' @param full_term The full name of a single term, e.g.,
#' \code{"partyidOther party"}
#' @param primary_term_v A vector of primary terms, e.g., \code{"partyid"}.
#' Usually the result of \code{formula_rhs[!grepl(":", formula_rhs)]}
#'
#' @return A logical vector the same length of \code{primary_term_v} indicating
#' whether the \code{full_term} is part of the given \code{primary_term_v}
#' element
#'
#' @examples
#' \dontrun{
#' detect_primary("partyidStrong republican", c("partyid", "age", "race"))
#' detect_primary("age", c("partyid", "age", "race"))
#' detect_primary("raceBlack", c("partyid", "age", "race"))
#' }
#' @noRd
detect_primary <- function(full_term, primary_term_v) {
if (full_term %in% primary_term_v) {
primary_term_v %in% full_term
} else {
vapply(
primary_term_v, function(indiv_term) {
grepl(indiv_term, full_term, fixed = TRUE)
},
logical(1)
)
}
}
#' Extract all subscripts
#'
#' @keywords internal
#'
#' @param primary_list A list of primary terms
#' @param full_term_list A list of full terms
#'
#' @return A list with the subscripts. If full term has no subscript,
#' returns \code{""}.
#'
#' @examples
#' \dontrun{
#' p_list <- list(
#' "partyid",
#' c("partyid", "age"),
#' c("age", "race"),
#' c("partyid", "age", "race")
#' )
#'
#' ft_list <- list(
#' "partyidNot str republican",
#' c("partyidInd,near dem", "age"),
#' c("age", "raceBlack"),
#' c("partyidInd,near dem", "age", "raceBlack")
#' )
#'
#' extract_all_subscripts(p_list, ft_list)
#' }
#' @noRd
extract_all_subscripts <- function(primary_list, full_term_list) {
Map(extract_subscripts, primary_list, full_term_list)
}
#' Extract the subscripts from a given term
#'
#' @keywords internal
#'
#' @param primary A single primary term, e.g., \code{"partyid"}
#' @param full_term_v A vector of full terms, e.g.,
#' \code{c("partyidDon't know", "partyidOther party"}. Can be of length 1.
#' @examples
#' \dontrun{
#' extract_subscripts("partyid", "partyidDon't know")
#' extract_subscripts(
#' "partyid",
#' c(
#' "partyidDon't know", "partyidOther party",
#' "partyidNot str democrat"
#' )
#' )
#' }
#' @noRd
extract_subscripts <- function(primary, full_term_v) {
out <- switch(as.character(length(primary)),
"0" = "",
"1" = gsub(primary, "", full_term_v, fixed = TRUE),
mapply_chr(function(x, y) gsub(x, "", y, fixed = TRUE),
x = primary,
y = full_term_v
)
)
out
}
#' Generic function for wrapping the RHS of a model equation in something, like
#' how the RHS of probit is wrapped in φ()
#'
#' @keywords internal
#'
#' @param model A fitted model
#' @param tex The TeX version of the RHS of the model (as character), built as
#' \code{rhs_combined} or \code{eq_raw$rhs} in \code{extract_eq()}
#' @param \dots additional arguments passed to the specific extractor
#' @noRd
wrap_rhs <- function(model, tex, ...) {
UseMethod("wrap_rhs", model)
}
#' @export
#' @keywords internal
#' @noRd
wrap_rhs.default <- function(model, tex, ...) {
return(tex)
}
#' @export
#' @keywords internal
#' @noRd
wrap_rhs.glm <- function(model, tex, ...) {
if (model$family$link == "probit") {
rhs <- probitify(tex)
} else {
rhs <- tex
}
return(rhs)
}
#' @export
#' @keywords internal
#' @noRd
wrap_rhs.polr <- function(model, tex, ...) {
if (model$method == "probit") {
rhs <- probitify(tex)
} else {
rhs <- tex
}
return(rhs)
}
#' @export
#' @keywords internal
#' @noRd
wrap_rhs.clm <- function(model, tex, ...) {
if (model$info$link == "probit") {
rhs <- probitify(tex)
} else {
rhs <- tex
}
return(rhs)
}
#' @keywords internal
#' @noRd
probitify <- function(tex) {
# Replace existing beginning-of-line \quad space with `\\qquad\` to account for \Phi
tex <- gsub("&\\\\quad", "&\\\\qquad\\\\", tex)
# It would be cool to use \left[ and \right] someday, but they don't work when
# the equation is split across multiple lines (see
# https://tex.stackexchange.com/q/21290/11851)
paste0("\\Phi[", tex, "]")
}
|
/R/extract_rhs.R
|
permissive
|
shaoyoucheng/equatiomatic
|
R
| false
| false
| 19,684
|
r
|
#' Generic function for extracting the right-hand side from a model
#'
#' @keywords internal
#'
#' @param model A fitted model
#' @param \dots additional arguments passed to the specific extractor
#' @noRd
extract_rhs <- function(model, ...) {
UseMethod("extract_rhs", model)
}
#' Extract right-hand side
#'
#' Extract a data frame with list columns for the primary terms and subscripts
#' from all terms in the model
#'
#' @keywords internal
#'
#' @param model A fitted model
#'
#' @return A list with one element per future equation term. Term components
#' like subscripts are nested inside each list element. List elements with two
#' or more terms are interactions.
#' @noRd
#' @export
#' @examples
#' \dontrun{
#' library(palmerpenguins)
#' mod1 <- lm(body_mass_g ~ bill_length_mm + species * flipper_length_mm, penguins)
#'
#' extract_rhs(mod1)
#' # > # A tibble: 7 x 8
#' # > term estimate ... primary subscripts
#' # > 1 (Intercept) -3341.615846 ...
#' # > 2 bill_length_mm 59.304539 ... bill_length_mm
#' # > 3 speciesChinstrap -27.292519 ... species Chinstrap
#' # > 4 speciesGentoo -2215.913323 ... species Gentoo
#' # > 5 flipper_length_mm 24.962788 ... flipper_length_mm
#' # > 6 speciesChinstrap:flipper_length_mm -3.484628 ... flipper_length_mm Chinstrap,
#' # > 7 speciesGentoo:flipper_length_mm 11.025972 ... flipper_length_mm Gentoo,
#'
#' str(extract_rhs(mod1))
#' # > Classes ‘lm’ and 'data.frame': 7 obs. of 8 variables:
#' # > $ term : chr "(Intercept)" "bill_length_mm" "speciesChinstrap" "speciesGentoo" ...
#' # > $ estimate : num -3341.6 59.3 -27.3 -2215.9 25 ...
#' # > $ std.error : num 810.14 7.25 1394.17 1328.58 4.34 ...
#' # > $ statistic : num -4.1247 8.1795 -0.0196 -1.6679 5.7534 ...
#' # > $ p.value : num 4.69e-05 5.98e-15 9.84e-01 9.63e-02 1.97e-08 ...
#' # > $ split :List of 7
#' # > ..$ : chr "(Intercept)"
#' # > ..$ : chr "bill_length_mm"
#' # > ..$ : chr "speciesChinstrap"
#' # > ..$ : chr "speciesGentoo"
#' # > ..$ : chr "flipper_length_mm"
#' # > ..$ : chr "speciesChinstrap" "flipper_length_mm"
#' # > ..$ : chr "speciesGentoo" "flipper_length_mm"
#' # > $ primary :List of 7
#' # > ..$ : chr
#' # > ..$ : chr "bill_length_mm"
#' # > ..$ : chr "species"
#' # > ..$ : chr "species"
#' # > ..$ : chr "flipper_length_mm"
#' # > ..$ : chr "species" "flipper_length_mm"
#' # > ..$ : chr "species" "flipper_length_mm"
#' # > $ subscripts:List of 7
#' # > ..$ : chr ""
#' # > ..$ : chr ""
#' # > ..$ : chr "Chinstrap"
#' # > ..$ : chr "Gentoo"
#' # > ..$ : chr ""
#' # > ..$ : Named chr "Chinstrap" ""
#' # > .. ..- attr(*, "names")= chr [1:2] "species" "flipper_length_mm"
#' # > ..$ : Named chr "Gentoo" ""
#' # > .. ..- attr(*, "names")= chr [1:2] "species" "flipper_length_mm"
#' }
#'
extract_rhs.default <- function(model, index_factors) {
# Extract RHS from formula
formula_rhs <- labels(terms(formula(model)))
# Extract unique (primary) terms from formula (no interactions)
formula_rhs_terms <- formula_rhs[!grepl(":", formula_rhs)]
# Extract coefficient names and values from model
full_rhs <- broom::tidy(model)
# Split interactions split into character vectors
full_rhs$split <- strsplit(full_rhs$term, ":")
full_rhs$primary <- extract_primary_term(
formula_rhs_terms,
full_rhs$term
)
full_rhs$subscripts <- extract_all_subscripts(
full_rhs$primary,
full_rhs$split
)
if (index_factors) {
full_rhs <- distinct(full_rhs, "primary")
unique_ss <- unique(unlist(full_rhs$subscripts))
unique_ss <- unique_ss[vapply(unique_ss, nchar, FUN.VALUE = integer(1)) > 0]
replacement_ss <- letters[seq(9, (length(unique_ss) + 8))]
full_rhs$subscripts <- lapply(full_rhs$subscripts, function(x) {
out <- replacement_ss[match(x, unique_ss)]
ifelse(is.na(out), "", out)
})
}
class(full_rhs) <- c("data.frame", class(model))
full_rhs
}
#' @noRd
#' @export
extract_rhs.lmerMod <- function(model, return_variances) {
# Extract RHS from formula
formula_rhs <- labels(terms(formula(model)))
# Extract unique (primary) terms from formula (no interactions)
formula_rhs_terms <- formula_rhs[!grepl(":", formula_rhs)]
formula_rhs_terms <- gsub("^`?(.+)`$?", "\\1", formula_rhs_terms)
# Extract coefficient names and values from model
if(return_variances) {
full_rhs <- broom.mixed::tidy(model, scales = c("vcov", NA))
# Make the names like they are sdcor, so it doesn't break other code
full_rhs$term <- gsub("var__", "sd__", full_rhs$term)
full_rhs$term <- gsub("cov__", "cor__", full_rhs$term)
} else {
full_rhs <- broom.mixed::tidy(model)
}
full_rhs$term <- vapply(full_rhs$term, order_interaction,
FUN.VALUE = character(1)
)
full_rhs$group <- recode_groups(full_rhs)
full_rhs$original_order <- seq_len(nrow(full_rhs))
full_rhs$term <- gsub("^`?(.+)`$?", "\\1", full_rhs$term)
# Split interactions split into character vectors
full_rhs$split <- strsplit(full_rhs$term, ":")
full_rhs$primary <- lapply(full_rhs$term, function(x) "")
full_rhs$primary[full_rhs$effect == "fixed"] <- extract_primary_term(
formula_rhs_terms,
full_rhs$term[full_rhs$effect == "fixed"]
)
# make sure split and primary are in the same order
full_rhs$primary[full_rhs$effect == "fixed"] <- Map(
function(prim, splt) {
ord <- vapply(prim, function(x) grep(x, splt, fixed = TRUE), FUN.VALUE = integer(1))
names(sort(ord))
},
full_rhs$primary[full_rhs$effect == "fixed"],
full_rhs$split[full_rhs$effect == "fixed"]
)
full_rhs$subscripts <- lapply(full_rhs$term, function(x) "")
full_rhs$subscripts[full_rhs$effect == "fixed"] <- extract_all_subscripts(
full_rhs$primary[full_rhs$effect == "fixed"],
full_rhs$split[full_rhs$effect == "fixed"]
)
group_coefs <- detect_group_coef(model, full_rhs)
all_terms <- unique(unlist(full_rhs$primary[full_rhs$effect == "fixed"]))
l1_terms <- setdiff(all_terms, names(group_coefs))
l1_terms <- setNames(rep("l1", length(l1_terms)), l1_terms)
var_levs <- c(l1_terms, group_coefs)
full_rhs$pred_level <- lapply(full_rhs$primary, function(x) {
var_levs[names(var_levs) %in% x]
})
full_rhs$pred_level[full_rhs$effect == "fixed"] <- Map(
function(predlev, splt) {
ord <- vapply(names(predlev),
function(x) grep(x, splt, fixed = TRUE),
FUN.VALUE = integer(1))
ord <- names(sort(ord))
predlev[ord]
},
full_rhs$pred_level[full_rhs$effect == "fixed"],
full_rhs$split[full_rhs$effect == "fixed"]
)
full_rhs$l1 <- vapply(full_rhs$pred_level, function(x) {
length(x) > 0 & all(x == "l1")
}, FUN.VALUE = logical(1))
full_rhs$l1 <- ifelse(full_rhs$term == "(Intercept)",
TRUE,
full_rhs$l1
)
full_rhs$crosslevel <- detect_crosslevel(
full_rhs$primary,
full_rhs$pred_level
)
class(full_rhs) <- c("data.frame", class(model))
full_rhs
}
#' @noRd
#' @export
extract_rhs.glmerMod <- function(model, ...) {
extract_rhs.lmerMod(model, ...)
}
#' Extract right-hand side of an forecast::Arima object
#'
#' Extract a dataframe of S/MA components
#'
#' @keywords internal
#'
#' @inheritParams extract_eq
#'
#' @return A dataframe
#' @noRd
extract_rhs.forecast_ARIMA <- function(model, ...) {
# RHS of ARIMA is the Moving Average side
# Consists of a Non-Seasonal MA (p), Seasonal MA (P), Seasonal Differencing.
# This is more than needed, but we"re being explicit for readability.
# Orders structure in Arima model: c(p, q, P, Q, m, d, D)
ords <- model$arma
names(ords) <- c("p", "q", "P", "Q", "m", "d", "D")
# Following the rest of the package.
# Pull the full model with broom::tidy
full_mdl <- broom::tidy(model)
# Filter down to only the MA terms and seasonal drift
full_rhs <- full_mdl[grepl("^s?ma", full_mdl$term), ]
# Add a Primary column and set it to the backshift operator.
full_rhs$primary <- "B"
# Get the superscript for the backshift operator.
## This is equal to the number on the term for MA
## and the number on the term * the seasonal frequency for SMA.
## Powers of 1 are replaced with an empty string.
rhs_super <- as.numeric(gsub("^s?ma", "", full_rhs$term))
rhs_super[grepl("^sma", full_rhs$term)] <- rhs_super[grepl("^sma", full_rhs$term)] * ords["m"]
rhs_super <- as.character(rhs_super)
full_rhs$superscript <- rhs_super
# The RHS (MA side) has no differencing.
# Previous versions of this function were erroneous
# in that it included a seasonal difference on this side.
# Reduce any "1" superscripts to not show the superscript
full_rhs[full_rhs$superscript == "1", "superscript"] <- ""
# Set subscripts so that create_term works later
full_rhs$subscripts <- ""
# Set the class
class(full_rhs) <- c(class(model), "data.frame")
# Explicit return
return(full_rhs)
}
order_interaction <- function(interaction_term) {
if (grepl("^cor__", interaction_term)) {
ran_part <- gsub("(.+\\.).+", "\\1", interaction_term)
interaction_term <- gsub(ran_part, "", interaction_term, fixed = TRUE)
} else if (grepl("^sd__", interaction_term)) {
ran_part <- "sd__"
interaction_term <- gsub(paste0("^", ran_part), "", interaction_term)
}
terms <- strsplit(interaction_term, ":")[[1]]
terms_ordered <- sort(terms)
out <- paste0(terms_ordered, collapse = ":")
if (exists("ran_part")) {
# check/handle if there's an interaction in the random part
# sd or cor
type <- gsub("(^.+__).+", "\\1", ran_part)
# remove type and period at end
ran <- gsub(type, "", ran_part)
ran <- gsub("\\.$", "", ran)
# handle interaction (if present)
ran <- strsplit(ran, ":")[[1]]
ran <- paste0(sort(ran), collapse = ":")
# paste it all back together
if (grepl("^cor", ran_part)) {
out <- paste0(type, ran, ".", out)
} else {
out <- paste0(type, ran, out)
}
}
out
}
recode_groups <- function(rhs) {
rhs_splt <- split(rhs, rhs$group)
rhs_splt <- rhs_splt[!grepl("Residual", names(rhs_splt))]
names_collapsed <- collapse_groups(names(rhs_splt))
intercept_vary <- vapply(rhs_splt, function(x) {
any(grepl("sd__(Intercept)", x$term, fixed = TRUE))
}, FUN.VALUE = logical(1))
check <- split(intercept_vary, names_collapsed)
# collapse these groups
collapse <- vapply(check, all, FUN.VALUE = logical(1))
collapse_term <- function(term, v) {
ifelse(grepl(term, v), collapse_groups(v), v)
}
out <- rhs$group
for (i in seq_along(collapse[!collapse])) {
out <- collapse_term(names(collapse[!collapse])[i], out)
}
out
}
collapse_groups <- function(group) {
gsub("(.+)\\.\\d\\d?$", "\\1", group)
}
order_split <- function(split, pred_level) {
if (length(pred_level) == 0) {
return(pred_level)
}
var_order <- vapply(names(pred_level), function(x) {
exact <- split %in% x
detect <- grepl(x, split)
# take exact if it's there, if not take detect
if (any(exact)) {
out <- exact
} else {
out <- detect
}
seq_along(out)[out]
}, FUN.VALUE = integer(1))
split[var_order]
}
#' Pull just the random variables
#' @param rhs output from \code{extract_rhs}
#' @keywords internal
#' @noRd
extract_random_vars <- function(rhs) {
order <- rhs[rhs$group != "Residual", ]
order <- sort(tapply(order$original_order, order$group, min))
vc <- rhs[rhs$group != "Residual" & rhs$effect == "ran_pars", ]
splt <- split(vc, vc$group)[names(order)]
lapply(splt, function(x) {
vars <- x[!grepl("cor__", x$term), ]
gsub("sd__(.+)", "\\1", vars$term)
})
}
detect_crosslevel <- function(primary, pred_level) {
mapply_lgl(function(prim, predlev) {
if (length(prim) > 1) {
if (length(prim) != length(predlev)) {
TRUE
} else if (length(unique(predlev)) != 1) {
TRUE
} else {
FALSE
}
} else {
FALSE
}
},
prim = primary,
predlev = pred_level
)
}
#### Consider refactoring the below too
detect_covar_level <- function(predictor, group) {
nm <- names(group)
v <- paste(predictor, group[, 1], sep = " _|_ ")
unique_v <- unique(v)
test <- gsub(".+\\s\\_\\|\\_\\s(.+)", "\\1", unique_v)
if (all(!duplicated(test))) {
return(nm)
}
}
detect_X_level <- function(X, group) {
lapply(X, detect_covar_level, group)
}
collapse_list <- function(x, y) {
null_x <- vapply(x, function(x) {
if (any(is.null(x))) {
return(is.null(x))
} else {
return(is.na(x))
}
}, FUN.VALUE = logical(1))
null_y <- vapply(y, function(x) {
if (any(is.null(x))) {
return(is.null(x))
} else {
return(is.na(x))
}
}, FUN.VALUE = logical(1))
y[null_x & !null_y] <- y[null_x & !null_y]
y[!null_x & null_y] <- x[!null_x & null_y]
y[!null_x & !null_y] <- x[!null_x & !null_y]
unlist(lapply(y, function(x) ifelse(is.null(x), NA_character_, x)))
}
detect_group_coef <- function(model, rhs) {
outcome <- all.vars(formula(model))[1]
d <- model@frame
random_lev_names <- names(extract_random_vars(rhs))
random_levs <- unlist(strsplit(random_lev_names, ":"))
random_levs <- gsub("^\\(|\\)$", "", random_levs)
random_levs <- unique(collapse_groups(random_levs))
random_lev_ids <- d[random_levs]
ranef_order <- vapply(random_lev_ids, function(x) {
length(unique(x))
}, FUN.VALUE = numeric(1))
ranef_order <- rev(sort(ranef_order))
random_lev_ids <- random_lev_ids[, names(ranef_order), drop = FALSE]
# Make sure there are explicit ids
random_lev_ids <- make_explicit_id(random_lev_ids)
X <- d[!(names(d) %in% c(random_levs, outcome))]
lev_assign <- vector("list", length(random_levs))
for (i in seq_along(random_lev_ids)) {
lev_assign[[i]] <- detect_X_level(X, random_lev_ids[, i, drop = FALSE])
}
levs <- Reduce(collapse_list, rev(lev_assign))
# reassign acutal names (in cases where ranef contains ":")
out <- random_lev_names[match(levs, random_levs)]
names(out) <- names(levs)
unlist(out[!is.na(out)])
}
row_paste <- function(d) {
apply(d, 1, paste, collapse = "-")
}
#' Makes the grouping variables explicit, which is neccessary for
#' detecting group-level predictors
#' @param ranef_df A data frame that includes only the random
#' effect ID variables (i.e., random_lev_ids)
#' @noRd
make_explicit_id <- function(ranef_df) {
for(i in seq_along(ranef_df)[-length(ranef_df)]) {
ranef_df[[i]] <- row_paste(ranef_df[ ,i:length(ranef_df)])
}
ranef_df
}
#' Extract the primary terms from all terms
#'
#' @inheritParams detect_primary
#'
#' @keywords internal
#'
#' @param all_terms A list of all the equation terms on the right hand side,
#' usually the result of \code{broom::tidy(model, quick = TRUE)$term}.
#' @examples
#' \dontrun{
#' primaries <- c("partyid", "age", "race")
#'
#' full_terms <- c(
#' "partyidDon't know", "partyidOther party", "age",
#' "partyidNot str democrat", "age", "raceBlack", "age", "raceBlack"
#' )
#'
#' extract_primary_term(primaries, full_terms)
#' }
#' @noRd
extract_primary_term <- function(primary_term_v, all_terms) {
detected <- lapply(all_terms, detect_primary, primary_term_v)
lapply(detected, function(pull) primary_term_v[pull])
}
#' Detect if a given term is part of a vector of full terms
#'
#' @keywords internal
#'
#' @param full_term The full name of a single term, e.g.,
#' \code{"partyidOther party"}
#' @param primary_term_v A vector of primary terms, e.g., \code{"partyid"}.
#' Usually the result of \code{formula_rhs[!grepl(":", formula_rhs)]}
#'
#' @return A logical vector the same length of \code{primary_term_v} indicating
#' whether the \code{full_term} is part of the given \code{primary_term_v}
#' element
#'
#' @examples
#' \dontrun{
#' detect_primary("partyidStrong republican", c("partyid", "age", "race"))
#' detect_primary("age", c("partyid", "age", "race"))
#' detect_primary("raceBlack", c("partyid", "age", "race"))
#' }
#' @noRd
detect_primary <- function(full_term, primary_term_v) {
if (full_term %in% primary_term_v) {
primary_term_v %in% full_term
} else {
vapply(
primary_term_v, function(indiv_term) {
grepl(indiv_term, full_term, fixed = TRUE)
},
logical(1)
)
}
}
#' Extract all subscripts
#'
#' @keywords internal
#'
#' @param primary_list A list of primary terms
#' @param full_term_list A list of full terms
#'
#' @return A list with the subscripts. If full term has no subscript,
#' returns \code{""}.
#'
#' @examples
#' \dontrun{
#' p_list <- list(
#' "partyid",
#' c("partyid", "age"),
#' c("age", "race"),
#' c("partyid", "age", "race")
#' )
#'
#' ft_list <- list(
#' "partyidNot str republican",
#' c("partyidInd,near dem", "age"),
#' c("age", "raceBlack"),
#' c("partyidInd,near dem", "age", "raceBlack")
#' )
#'
#' extract_all_subscripts(p_list, ft_list)
#' }
#' @noRd
extract_all_subscripts <- function(primary_list, full_term_list) {
Map(extract_subscripts, primary_list, full_term_list)
}
#' Extract the subscripts from a given term
#'
#' @keywords internal
#'
#' @param primary A single primary term, e.g., \code{"partyid"}
#' @param full_term_v A vector of full terms, e.g.,
#' \code{c("partyidDon't know", "partyidOther party"}. Can be of length 1.
#' @examples
#' \dontrun{
#' extract_subscripts("partyid", "partyidDon't know")
#' extract_subscripts(
#' "partyid",
#' c(
#' "partyidDon't know", "partyidOther party",
#' "partyidNot str democrat"
#' )
#' )
#' }
#' @noRd
extract_subscripts <- function(primary, full_term_v) {
out <- switch(as.character(length(primary)),
"0" = "",
"1" = gsub(primary, "", full_term_v, fixed = TRUE),
mapply_chr(function(x, y) gsub(x, "", y, fixed = TRUE),
x = primary,
y = full_term_v
)
)
out
}
#' Generic function for wrapping the RHS of a model equation in something, like
#' how the RHS of probit is wrapped in φ()
#'
#' @keywords internal
#'
#' @param model A fitted model
#' @param tex The TeX version of the RHS of the model (as character), built as
#' \code{rhs_combined} or \code{eq_raw$rhs} in \code{extract_eq()}
#' @param \dots additional arguments passed to the specific extractor
#' @noRd
wrap_rhs <- function(model, tex, ...) {
UseMethod("wrap_rhs", model)
}
#' @export
#' @keywords internal
#' @noRd
wrap_rhs.default <- function(model, tex, ...) {
return(tex)
}
#' @export
#' @keywords internal
#' @noRd
wrap_rhs.glm <- function(model, tex, ...) {
if (model$family$link == "probit") {
rhs <- probitify(tex)
} else {
rhs <- tex
}
return(rhs)
}
#' @export
#' @keywords internal
#' @noRd
wrap_rhs.polr <- function(model, tex, ...) {
if (model$method == "probit") {
rhs <- probitify(tex)
} else {
rhs <- tex
}
return(rhs)
}
#' @export
#' @keywords internal
#' @noRd
wrap_rhs.clm <- function(model, tex, ...) {
if (model$info$link == "probit") {
rhs <- probitify(tex)
} else {
rhs <- tex
}
return(rhs)
}
#' @keywords internal
#' @noRd
probitify <- function(tex) {
# Replace existing beginning-of-line \quad space with `\\qquad\` to account for \Phi
tex <- gsub("&\\\\quad", "&\\\\qquad\\\\", tex)
# It would be cool to use \left[ and \right] someday, but they don't work when
# the equation is split across multiple lines (see
# https://tex.stackexchange.com/q/21290/11851)
paste0("\\Phi[", tex, "]")
}
|
#' Function to generate tools path object
#' @param config.file Path of tools configuration file (json, ini, yaml and toml be supported)
#' @param config.list List object of tools that all of tools path (exclude those without names).
#' @param config.vec Vector object of tools that all of tools path (exclude those without names).
#' @param eval.params Params pass to configr::eval.config
#'
#' @return
#' List object contain the tools path that can be used by other function in ngstk package
#' @export
#' @examples
#' config.file <- system.file('extdata', 'demo/tools_config.json', package = 'ngstk')
#' config.list <- list(gatk = '/path/gatk')
#' config.vec <- c('/path/samtools')
#' names(config.vec) <- 'samtools'
#' tools <- set_tools(config.file, config.list, config.vec,
#' eval.params = list(config = 'tools'))
set_tools <- function(config.file = "", config.list = list(), config.vec = c(), eval.params = list()) {
config.list.1 <- NULL
config.list.2 <- NULL
config.list.3 <- NULL
tools <- list()
if (config.file != "") {
params <- configr::config.list.merge(eval.params, list(file = config.file))
config <- do.call(configr::eval.config, params)
config.list.1 <- config[names(config) != ""]
tools <- configr::config.list.merge(tools, config.list.1)
}
if (is.list(config.list) && length(config.list) > 0) {
config.list.2 <- config.list[names(config.list) != ""]
tools <- configr::config.list.merge(tools, config.list.2)
}
if (is.vector(config.vec) && length(config.vec) > 0) {
config.vec <- config.vec[names(config.vec) != ""]
config.list.3 <- as.list(config.vec)
tools <- configr::config.list.merge(tools, config.list.3)
}
return(tools)
}
#' Function to get a series defined theme colors
#'
#' @param theme Colors theme, e.g. default, red_blue
#' @param theme_config_file Theme configuration file, default is
#' system.file('extdata', 'config/theme.toml', package = 'ngstk')
#' @param show_all_themes Wheather show all avaliable colors theme, default is FALSE
#' @export
#' @return
#' A character
#' @examples
#' red_blue <- set_colors('red_blue')
#' default <- set_colors('default')
#' colors <- set_colors(show_all_themes = TRUE)
set_colors <- function(theme = NULL, theme_config_file = NULL, show_all_themes = FALSE) {
if (is.null(theme_config_file)) {
theme_config_file <- system.file("extdata", "config/theme.toml", package = "ngstk")
}
if (show_all_themes) {
config <- read.config(file = theme_config_file)
return(config)
}
if (is.null(theme)) {
theme <- "default"
}
colors <- eval.config(value = "colors", config = theme, file = theme_config_file)
return(colors)
}
#' Process the input file a batch of one batch
#' @param filename Filename need to process
#' @param batch_lines Batch lines to process the data, default 10000000
#' @param handler The function to process the data
#' @param param_names Hander function required parameter names
#' @param extra_fread_params Extra fread parameters in read data step,
#' default is list(sep = '\\n', header = TRUE, return_1L = TRUE), return_1L to get x[[1L]]
#' @param extra_params Extra paramemters pass to handler function
#' @param start_index default is 1, control the skip rows, n = (i-1) * batch_lines
#' @export
#' @examples
#' dat <- data.frame(a=1:100, b=1:100)
#' filename <- tempfile()
#' write.table(dat, filename, sep = '\t', row.names = FALSE, quote = FALSE)
#' handler_fun <- function(x, i = 1) {
#' return(x[i])
#' }
#' batch_file(filename, 10, handler_fun)
batch_file <- function(filename = "", batch_lines = 1e+07, handler = NULL, param_names = c("x",
"i"), extra_fread_params = list(sep = "\n", header = FALSE, return_1L = TRUE),
extra_params = list(), start_index = 1) {
old_op <- options()
options(scipen = 200)
i <- start_index
pool <- "x"
if (start_index != 1) {
status <- lapply(1:start_index, function(x) {
return(NA)
})
names(status)[1:(start_index - 1)] <- 1:(start_index - 1)
} else {
status <- NULL
}
return_1L <- extra_fread_params$return_1L
extra_fread_params$return_1L <- NULL
while (TRUE) {
skip <- as.numeric((i - 1) * batch_lines)
if (i != 1) {
extra_fread_params$header = FALSE
}
fread_params <- config.list.merge(list(input = filename, nrows = batch_lines,
skip = skip), extra_fread_params)
if (return_1L) {
assign(pool[1], value = do.call(fread, fread_params)[[1L]])
} else {
assign(pool[1], value = do.call(fread, fread_params))
}
x <- get(pool[1])
params <- list(x = x, i = i)
names(params) <- param_names
params <- config.list.merge(params, extra_params)
status.tmp <- do.call(handler, params)
if (is.null(status)) {
status <- list(i = status.tmp)
names(status) <- i
} else {
status <- config.list.merge(status, list(i = status.tmp))
names(status)[i] <- i
}
if (return_1L && length(get(pool[1])) < batch_lines) {
break
} else if (!return_1L && nrow(x) < batch_lines) {
break
} else {
i <- i + 1
}
}
options(old_op)
status[length(status)] <- NULL
return(status)
}
# Get config value (2 depth)
get_config_value <- function(config_input, level_1, level_2) {
config_input[[level_1]][[level_2]]
}
# initial config_meta_format
initial_params <- function(config_file, config_list, input_type, this_section, meta_flag,
format_flag, handler_funs = NULL, mhandler_funs = NULL, handler_confg_file = NULL,
mhandler_confg_file = NULL) {
if (is.null(config_list)) {
config_meta <- eval.config(value = meta_flag, config = this_section, file = config_file)
config_format <- eval.config(value = format_flag, config = this_section,
file = config_file)
} else {
config_meta <- config_list[[this_section]][[meta_flag]]
config_format <- config_list[[this_section]][[format_flag]]
}
defined_cols <- config_meta[["defined_cols"]][["colnames"]]
if (is.null(handler_funs)) {
handler_lib <- config_meta[["defined_cols"]][["handler_lib"]]
if (is.null(handler_lib)) {
handler_lib <- "default_handlers"
}
handler_lib_data <- eval.config(value = handler_lib, config = "handler",
file = handler_confg_file)
handler_funs <- handler_lib_data$handler_funs
}
if (is.null(mhandler_funs)) {
mhandler_lib <- config_meta[["defined_cols"]][["mhandler_lib"]]
if (is.null(mhandler_lib)) {
mhandler_lib <- "default_mhandlers"
}
mhandler_lib_data <- eval.config(value = mhandler_lib, config = "mhandler",
file = mhandler_confg_file)
mhandler_funs <- mhandler_lib_data$mhandler_funs
}
config_input <- config_format[[input_type]]
return(list(config_meta = config_meta, config_format = config_format, config_input = config_input,
defined_cols = defined_cols, handler_funs = handler_funs, mhandler_funs = mhandler_funs))
}
# format converter
data_format_converter <- function(input_data, input_type = "", config_file = "",
config_list = NULL, handler_confg_file = "", mhandler_confg_file = "", handler_funs = NULL,
mhandler_funs = NULL, handler_extra_params = NULL, mhandler_extra_params = NULL,
outfn = NULL, function_name = "", handler_api = "", mhandler_api = "", meta_flag = "meta",
format_flag = "format") {
params <- initial_params(config_file, config_list, input_type, function_name,
meta_flag, format_flag, handler_funs, mhandler_funs, handler_confg_file,
mhandler_confg_file)
config_input <- params$config_input
defined_cols <- params$defined_cols
config_input <- params$config_input
handler_funs <- params$handler_funs
mhandler_funs <- params$mhandler_funs
handler_data <- NULL
for (i in 1:length(defined_cols)) {
handler_data <- do.call(handler_api, list(handler_data = handler_data, config_input = config_input,
defined_cols = defined_cols, input_data = input_data, index = i, handler_funs = handler_funs,
extra_params = handler_extra_params))
}
handler_data <- do.call(mhandler_api, list(handler_data = handler_data, config_input = config_input,
mhandler_funs = mhandler_funs, extra_params = handler_extra_params))
if (!is.null(outfn)) {
write.table(handler_data, outfn, sep = "\t", row.names = F, quote = F, col.names = T)
}
return(handler_data)
}
default_handler_api <- function(handler_data, config_input, defined_cols, input_data,
index, handler_funs = NULL, extra_params = NULL) {
handler_data <- handler(handler_data, config_input, defined_cols, input_data,
index, handler_funs = handler_funs, extra_params = extra_params)
return(handler_data)
}
default_mhandler_api <- function(handler_data, config_input, mhandler_funs = NULL,
extra_params = NULL) {
handler_data <- mhandler(handler_data, config_input, mhandler_funs, extra_params)
return(handler_data)
}
|
/R/utils.R
|
permissive
|
JhuangLab/ngstk
|
R
| false
| false
| 8,881
|
r
|
#' Function to generate tools path object
#' @param config.file Path of tools configuration file (json, ini, yaml and toml be supported)
#' @param config.list List object of tools that all of tools path (exclude those without names).
#' @param config.vec Vector object of tools that all of tools path (exclude those without names).
#' @param eval.params Params pass to configr::eval.config
#'
#' @return
#' List object contain the tools path that can be used by other function in ngstk package
#' @export
#' @examples
#' config.file <- system.file('extdata', 'demo/tools_config.json', package = 'ngstk')
#' config.list <- list(gatk = '/path/gatk')
#' config.vec <- c('/path/samtools')
#' names(config.vec) <- 'samtools'
#' tools <- set_tools(config.file, config.list, config.vec,
#' eval.params = list(config = 'tools'))
set_tools <- function(config.file = "", config.list = list(), config.vec = c(), eval.params = list()) {
config.list.1 <- NULL
config.list.2 <- NULL
config.list.3 <- NULL
tools <- list()
if (config.file != "") {
params <- configr::config.list.merge(eval.params, list(file = config.file))
config <- do.call(configr::eval.config, params)
config.list.1 <- config[names(config) != ""]
tools <- configr::config.list.merge(tools, config.list.1)
}
if (is.list(config.list) && length(config.list) > 0) {
config.list.2 <- config.list[names(config.list) != ""]
tools <- configr::config.list.merge(tools, config.list.2)
}
if (is.vector(config.vec) && length(config.vec) > 0) {
config.vec <- config.vec[names(config.vec) != ""]
config.list.3 <- as.list(config.vec)
tools <- configr::config.list.merge(tools, config.list.3)
}
return(tools)
}
#' Function to get a series defined theme colors
#'
#' @param theme Colors theme, e.g. default, red_blue
#' @param theme_config_file Theme configuration file, default is
#' system.file('extdata', 'config/theme.toml', package = 'ngstk')
#' @param show_all_themes Wheather show all avaliable colors theme, default is FALSE
#' @export
#' @return
#' A character
#' @examples
#' red_blue <- set_colors('red_blue')
#' default <- set_colors('default')
#' colors <- set_colors(show_all_themes = TRUE)
set_colors <- function(theme = NULL, theme_config_file = NULL, show_all_themes = FALSE) {
if (is.null(theme_config_file)) {
theme_config_file <- system.file("extdata", "config/theme.toml", package = "ngstk")
}
if (show_all_themes) {
config <- read.config(file = theme_config_file)
return(config)
}
if (is.null(theme)) {
theme <- "default"
}
colors <- eval.config(value = "colors", config = theme, file = theme_config_file)
return(colors)
}
#' Process the input file a batch of one batch
#' @param filename Filename need to process
#' @param batch_lines Batch lines to process the data, default 10000000
#' @param handler The function to process the data
#' @param param_names Hander function required parameter names
#' @param extra_fread_params Extra fread parameters in read data step,
#' default is list(sep = '\\n', header = TRUE, return_1L = TRUE), return_1L to get x[[1L]]
#' @param extra_params Extra paramemters pass to handler function
#' @param start_index default is 1, control the skip rows, n = (i-1) * batch_lines
#' @export
#' @examples
#' dat <- data.frame(a=1:100, b=1:100)
#' filename <- tempfile()
#' write.table(dat, filename, sep = '\t', row.names = FALSE, quote = FALSE)
#' handler_fun <- function(x, i = 1) {
#' return(x[i])
#' }
#' batch_file(filename, 10, handler_fun)
batch_file <- function(filename = "", batch_lines = 1e+07, handler = NULL, param_names = c("x",
"i"), extra_fread_params = list(sep = "\n", header = FALSE, return_1L = TRUE),
extra_params = list(), start_index = 1) {
old_op <- options()
options(scipen = 200)
i <- start_index
pool <- "x"
if (start_index != 1) {
status <- lapply(1:start_index, function(x) {
return(NA)
})
names(status)[1:(start_index - 1)] <- 1:(start_index - 1)
} else {
status <- NULL
}
return_1L <- extra_fread_params$return_1L
extra_fread_params$return_1L <- NULL
while (TRUE) {
skip <- as.numeric((i - 1) * batch_lines)
if (i != 1) {
extra_fread_params$header = FALSE
}
fread_params <- config.list.merge(list(input = filename, nrows = batch_lines,
skip = skip), extra_fread_params)
if (return_1L) {
assign(pool[1], value = do.call(fread, fread_params)[[1L]])
} else {
assign(pool[1], value = do.call(fread, fread_params))
}
x <- get(pool[1])
params <- list(x = x, i = i)
names(params) <- param_names
params <- config.list.merge(params, extra_params)
status.tmp <- do.call(handler, params)
if (is.null(status)) {
status <- list(i = status.tmp)
names(status) <- i
} else {
status <- config.list.merge(status, list(i = status.tmp))
names(status)[i] <- i
}
if (return_1L && length(get(pool[1])) < batch_lines) {
break
} else if (!return_1L && nrow(x) < batch_lines) {
break
} else {
i <- i + 1
}
}
options(old_op)
status[length(status)] <- NULL
return(status)
}
# Get config value (2 depth)
get_config_value <- function(config_input, level_1, level_2) {
config_input[[level_1]][[level_2]]
}
# initial config_meta_format
initial_params <- function(config_file, config_list, input_type, this_section, meta_flag,
format_flag, handler_funs = NULL, mhandler_funs = NULL, handler_confg_file = NULL,
mhandler_confg_file = NULL) {
if (is.null(config_list)) {
config_meta <- eval.config(value = meta_flag, config = this_section, file = config_file)
config_format <- eval.config(value = format_flag, config = this_section,
file = config_file)
} else {
config_meta <- config_list[[this_section]][[meta_flag]]
config_format <- config_list[[this_section]][[format_flag]]
}
defined_cols <- config_meta[["defined_cols"]][["colnames"]]
if (is.null(handler_funs)) {
handler_lib <- config_meta[["defined_cols"]][["handler_lib"]]
if (is.null(handler_lib)) {
handler_lib <- "default_handlers"
}
handler_lib_data <- eval.config(value = handler_lib, config = "handler",
file = handler_confg_file)
handler_funs <- handler_lib_data$handler_funs
}
if (is.null(mhandler_funs)) {
mhandler_lib <- config_meta[["defined_cols"]][["mhandler_lib"]]
if (is.null(mhandler_lib)) {
mhandler_lib <- "default_mhandlers"
}
mhandler_lib_data <- eval.config(value = mhandler_lib, config = "mhandler",
file = mhandler_confg_file)
mhandler_funs <- mhandler_lib_data$mhandler_funs
}
config_input <- config_format[[input_type]]
return(list(config_meta = config_meta, config_format = config_format, config_input = config_input,
defined_cols = defined_cols, handler_funs = handler_funs, mhandler_funs = mhandler_funs))
}
# format converter
data_format_converter <- function(input_data, input_type = "", config_file = "",
config_list = NULL, handler_confg_file = "", mhandler_confg_file = "", handler_funs = NULL,
mhandler_funs = NULL, handler_extra_params = NULL, mhandler_extra_params = NULL,
outfn = NULL, function_name = "", handler_api = "", mhandler_api = "", meta_flag = "meta",
format_flag = "format") {
params <- initial_params(config_file, config_list, input_type, function_name,
meta_flag, format_flag, handler_funs, mhandler_funs, handler_confg_file,
mhandler_confg_file)
config_input <- params$config_input
defined_cols <- params$defined_cols
config_input <- params$config_input
handler_funs <- params$handler_funs
mhandler_funs <- params$mhandler_funs
handler_data <- NULL
for (i in 1:length(defined_cols)) {
handler_data <- do.call(handler_api, list(handler_data = handler_data, config_input = config_input,
defined_cols = defined_cols, input_data = input_data, index = i, handler_funs = handler_funs,
extra_params = handler_extra_params))
}
handler_data <- do.call(mhandler_api, list(handler_data = handler_data, config_input = config_input,
mhandler_funs = mhandler_funs, extra_params = handler_extra_params))
if (!is.null(outfn)) {
write.table(handler_data, outfn, sep = "\t", row.names = F, quote = F, col.names = T)
}
return(handler_data)
}
default_handler_api <- function(handler_data, config_input, defined_cols, input_data,
index, handler_funs = NULL, extra_params = NULL) {
handler_data <- handler(handler_data, config_input, defined_cols, input_data,
index, handler_funs = handler_funs, extra_params = extra_params)
return(handler_data)
}
default_mhandler_api <- function(handler_data, config_input, mhandler_funs = NULL,
extra_params = NULL) {
handler_data <- mhandler(handler_data, config_input, mhandler_funs, extra_params)
return(handler_data)
}
|
#load in necessary packages
library(geiger)
#read in insect phylogeny
trees <- read.nexus("../data/post.nex")
#read in the microsatellite data
dat.mic <- read.csv("../results/micRocounter_results_TII.csv",
as.is = T, row.names = 4)
#loop through to drop any unmatching data or tree tips
trees.pruned <- c()
for(i in 1:100){
trees.pruned[[i]] <- treedata(phy = trees[[i]], data=dat.mic)[[1]]
}
# run aovphylo with phylogenetic correction
# make named vector for bpMbp coontent
bp.Mbp <- dat.mic$bp.Mbp
names(bp.Mbp) <- row.names(dat.mic)
# make named vector for orders
order <- as.factor(dat.mic$order)
names(order) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.Mbp <- matrix(NA, 100, 2)
colnames(results.Mbp) <- c("wophylo","wphylo")
#run phyloANOVA for bpMbp and orders
bp2 <- bp.Mbp
ord2 <- order
for(i in 1:100){
for(j in 1:length(bp2)){
hit <- which(names(bp.Mbp) == trees.pruned[[i]]$tip.label[j])
bp2[j] <- bp.Mbp[hit]
ord2[j] <- order[hit]
}
names(bp2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(bp2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.Mbp[i, 1] <- aov.sum$`Pr(>F)`[1]
results.Mbp[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.Mbp, "../results/Mbp.csv")
#make named vector for twomers
twomers <- dat.mic$twomers
names(twomers) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.twomers <- matrix(NA, 100, 2)
colnames(results.twomers) <- c("wophylo","wphylo")
#run phyloANOVA for twomers and orders
twomers.2 <- twomers
ord2 <- order
for(i in 1:100){
for(j in 1:length(twomers.2)){
hit <- which(names(twomers) == trees.pruned[[i]]$tip.label[j])
twomers.2[j] <- twomers[hit]
ord2[j] <- order[hit]
}
names(twomers.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(twomers.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.twomers[i, 1] <- aov.sum$`Pr(>F)`[1]
results.twomers[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.twomers, "../results/twomers.csv")
#make named vector for threemers
threemers <- dat.mic$threemers
names(threemers) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.threemers <- matrix(NA, 100, 2)
colnames(results.threemers) <- c("wophylo","wphylo")
#run phyloANOVA for threemers and orders
threemers.2 <- threemers
ord2 <- order
for(i in 1:100){
for(j in 1:length(threemers.2)){
hit <- which(names(threemers) == trees.pruned[[i]]$tip.label[j])
threemers.2[j] <- threemers[hit]
ord2[j] <- order[hit]
}
names(threemers.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(threemers.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.threemers[i, 1] <- aov.sum$`Pr(>F)`[1]
results.threemers[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.threemers, "../results/threemers.csv")
#make named vector for fourmers
fourmers <- dat.mic$fourmers
names(fourmers) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.fourmers <- matrix(NA, 100, 2)
colnames(results.fourmers) <- c("wophylo","wphylo")
#run phyloANOVA for fourmers and orders
fourmers.2 <- fourmers
ord2 <- order
for(i in 1:100){
for(j in 1:length(fourmers.2)){
hit <- which(names(fourmers) == trees.pruned[[i]]$tip.label[j])
fourmers.2[j] <- fourmers[hit]
ord2[j] <- order[hit]
}
names(fourmers.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(fourmers.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.fourmers[i, 1] <- aov.sum$`Pr(>F)`[1]
results.fourmers[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.fourmers, "../results/fourmers.csv")
#make named vector for fivemers
fivemers <- dat.mic$fivemers
names(fivemers) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.fivemers <- matrix(NA, 100, 2)
colnames(results.fivemers) <- c("wophylo","wphylo")
#run phyloANOVA for fivemers and orders
fivemers.2 <- fivemers
ord2 <- order
for(i in 1:100){
for(j in 1:length(fivemers.2)){
hit <- which(names(fivemers) == trees.pruned[[i]]$tip.label[j])
fivemers.2[j] <- fivemers[hit]
ord2[j] <- order[hit]
}
names(fivemers.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(fivemers.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.fivemers[i, 1] <- aov.sum$`Pr(>F)`[1]
results.fivemers[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.fivemers, "../results/fivemers.csv")
#make named vector for sixmers
sixmers <- dat.mic$sixmers
names(sixmers) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.sixmers <- matrix(NA, 100, 2)
colnames(results.sixmers) <- c("wophylo","wphylo")
#run phyloANOVA for sixmers and orders
sixmers.2 <- sixmers
ord2 <- order
for(i in 1:100){
for(j in 1:length(sixmers.2)){
hit <- which(names(sixmers) == trees.pruned[[i]]$tip.label[j])
sixmers.2[j] <- fourmers[hit]
ord2[j] <- order[hit]
}
names(sixmers.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(sixmers.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.sixmers[i, 1] <- aov.sum$`Pr(>F)`[1]
results.sixmers[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.sixmers, "../results/sixmers.csv")
#make named vector for all
all <- dat.mic$all
names(all) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.all <- matrix(NA, 100, 2)
colnames(results.all) <- c("wophylo","wphylo")
#run phyloANOVA for allmers and orders
all.2 <- all
ord2 <- order
for(i in 1:100){
for(j in 1:length(all.2)){
hit <- which(names(all) == trees.pruned[[i]]$tip.label[j])
all.2[j] <- all[hit]
ord2[j] <- order[hit]
}
names(all.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(all.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.all[i, 1] <- aov.sum$`Pr(>F)`[1]
results.all[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.all, "../results/all.csv")
|
/analyses/order.content.R
|
no_license
|
coleoguy/microsat
|
R
| false
| false
| 6,864
|
r
|
#load in necessary packages
library(geiger)
#read in insect phylogeny
trees <- read.nexus("../data/post.nex")
#read in the microsatellite data
dat.mic <- read.csv("../results/micRocounter_results_TII.csv",
as.is = T, row.names = 4)
#loop through to drop any unmatching data or tree tips
trees.pruned <- c()
for(i in 1:100){
trees.pruned[[i]] <- treedata(phy = trees[[i]], data=dat.mic)[[1]]
}
# run aovphylo with phylogenetic correction
# make named vector for bpMbp coontent
bp.Mbp <- dat.mic$bp.Mbp
names(bp.Mbp) <- row.names(dat.mic)
# make named vector for orders
order <- as.factor(dat.mic$order)
names(order) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.Mbp <- matrix(NA, 100, 2)
colnames(results.Mbp) <- c("wophylo","wphylo")
#run phyloANOVA for bpMbp and orders
bp2 <- bp.Mbp
ord2 <- order
for(i in 1:100){
for(j in 1:length(bp2)){
hit <- which(names(bp.Mbp) == trees.pruned[[i]]$tip.label[j])
bp2[j] <- bp.Mbp[hit]
ord2[j] <- order[hit]
}
names(bp2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(bp2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.Mbp[i, 1] <- aov.sum$`Pr(>F)`[1]
results.Mbp[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.Mbp, "../results/Mbp.csv")
#make named vector for twomers
twomers <- dat.mic$twomers
names(twomers) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.twomers <- matrix(NA, 100, 2)
colnames(results.twomers) <- c("wophylo","wphylo")
#run phyloANOVA for twomers and orders
twomers.2 <- twomers
ord2 <- order
for(i in 1:100){
for(j in 1:length(twomers.2)){
hit <- which(names(twomers) == trees.pruned[[i]]$tip.label[j])
twomers.2[j] <- twomers[hit]
ord2[j] <- order[hit]
}
names(twomers.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(twomers.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.twomers[i, 1] <- aov.sum$`Pr(>F)`[1]
results.twomers[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.twomers, "../results/twomers.csv")
#make named vector for threemers
threemers <- dat.mic$threemers
names(threemers) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.threemers <- matrix(NA, 100, 2)
colnames(results.threemers) <- c("wophylo","wphylo")
#run phyloANOVA for threemers and orders
threemers.2 <- threemers
ord2 <- order
for(i in 1:100){
for(j in 1:length(threemers.2)){
hit <- which(names(threemers) == trees.pruned[[i]]$tip.label[j])
threemers.2[j] <- threemers[hit]
ord2[j] <- order[hit]
}
names(threemers.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(threemers.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.threemers[i, 1] <- aov.sum$`Pr(>F)`[1]
results.threemers[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.threemers, "../results/threemers.csv")
#make named vector for fourmers
fourmers <- dat.mic$fourmers
names(fourmers) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.fourmers <- matrix(NA, 100, 2)
colnames(results.fourmers) <- c("wophylo","wphylo")
#run phyloANOVA for fourmers and orders
fourmers.2 <- fourmers
ord2 <- order
for(i in 1:100){
for(j in 1:length(fourmers.2)){
hit <- which(names(fourmers) == trees.pruned[[i]]$tip.label[j])
fourmers.2[j] <- fourmers[hit]
ord2[j] <- order[hit]
}
names(fourmers.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(fourmers.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.fourmers[i, 1] <- aov.sum$`Pr(>F)`[1]
results.fourmers[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.fourmers, "../results/fourmers.csv")
#make named vector for fivemers
fivemers <- dat.mic$fivemers
names(fivemers) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.fivemers <- matrix(NA, 100, 2)
colnames(results.fivemers) <- c("wophylo","wphylo")
#run phyloANOVA for fivemers and orders
fivemers.2 <- fivemers
ord2 <- order
for(i in 1:100){
for(j in 1:length(fivemers.2)){
hit <- which(names(fivemers) == trees.pruned[[i]]$tip.label[j])
fivemers.2[j] <- fivemers[hit]
ord2[j] <- order[hit]
}
names(fivemers.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(fivemers.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.fivemers[i, 1] <- aov.sum$`Pr(>F)`[1]
results.fivemers[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.fivemers, "../results/fivemers.csv")
#make named vector for sixmers
sixmers <- dat.mic$sixmers
names(sixmers) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.sixmers <- matrix(NA, 100, 2)
colnames(results.sixmers) <- c("wophylo","wphylo")
#run phyloANOVA for sixmers and orders
sixmers.2 <- sixmers
ord2 <- order
for(i in 1:100){
for(j in 1:length(sixmers.2)){
hit <- which(names(sixmers) == trees.pruned[[i]]$tip.label[j])
sixmers.2[j] <- fourmers[hit]
ord2[j] <- order[hit]
}
names(sixmers.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(sixmers.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.sixmers[i, 1] <- aov.sum$`Pr(>F)`[1]
results.sixmers[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.sixmers, "../results/sixmers.csv")
#make named vector for all
all <- dat.mic$all
names(all) <- row.names(dat.mic)
#create results data frame and indicate proper column names
results.all <- matrix(NA, 100, 2)
colnames(results.all) <- c("wophylo","wphylo")
#run phyloANOVA for allmers and orders
all.2 <- all
ord2 <- order
for(i in 1:100){
for(j in 1:length(all.2)){
hit <- which(names(all) == trees.pruned[[i]]$tip.label[j])
all.2[j] <- all[hit]
ord2[j] <- order[hit]
}
names(all.2) <- names(ord2) <- trees.pruned[[i]]$tip.label
fit <- aov.phylo(all.2~ord2,
phy = trees.pruned[[i]],
nsim = 100)
aov.sum <- attributes(fit)$summary
results.all[i, 1] <- aov.sum$`Pr(>F)`[1]
results.all[i, 2] <- aov.sum$`Pr(>F) given phy`[1]
}
#save p-value data frame into csv
write.csv(results.all, "../results/all.csv")
|
#BL BARNHART 2018-04-04
#This is for running SWAT and optimizing fertilizer reductions at the
#subbasin scale. In particular, below consists of two functions.
#1. getSwatInputandProfit(): this calculates the fertilizer inputs
#into swat given certain tax and q levels.
#2. runSWAT() calculates no3 outputs from SWAT given fertilizer inputs.
################################################
########## SETTING UP INPUT PARAMETERS #########
################################################
#Input Taxes
#x = runif(112,min,max)
#lookupfile = read.csv('/home/blb/swat/swat_sweden/lookup.csv')
getSwatInputandProfit <- function(input_taxes_tills,lookupfile) {
#example
#input_taxes_tills = c(runif(112,1,2),runif(112,0,1))
#old
#subbasin-scale runif(112,1,2)
#basinwide #input_taxes = array(runif(1,1,2),c(112))
lookup=lookupfile
#lookup = read.csv('/home/blb/swat/swat_sweden/lookup.csv')
q = lookup$V9
area = lookup$V10*247.105 #convert from km2 to acres
tills = lookup$V7 #3 is mulch till, 4 is no-till.
#Constants
row = 50.98 #lb/ac
dc = 0 #if previous crop is corn, this is equal to 1; 0 if otherwise.
wN = 0.25 #$/lb from Duff and Smith (2004)
a13 = -0.0028545 #bu.ac./(lb)^2
p1 = 2.18 #$/bu Johanns (2012) year 2003
b16 = 0.21 #$/bu
a12 = 0.74044 #bu/lb
#Initialize Array
swat_input = array(NA,c(7280)) #initialize 3640 hru parameters for swat input
qvaluesused = array(NA,c(3640,1))
areaused = array(NA,c(3640,1))
inputtaxesused = array(NA,c(3640,1))
subbasinnumber = array(NA,c(3640,1))
#current config; all CORN and SOYB set to mulch till.
s2nopolicy = 1 #1 if mulch till; 0 if otherwise
s3nopolicy = 0 #1 if no-till; 0 if otherwise
### SUB TILL SETUP. IF input_taxes_tills > 0.5, THEN implement no-till
s2policy = array(NA,c(3640))
s3policy = array(NA,c(3640))
#Calculate Fertilizer Application @ Max Profit
#Note that this fertilizer is only applied in the CORN year of the
#CORN/SOY rotation. Therefore, dc and dcc always = 0.
counter = 1
for (j in 1:112) {
sub = lookup[lookup$V2==j,]
for (k in 1:length(sub$V1)) {
if (sub$V8[k] %in% c('CORN','SOYB')) {
swat_input[counter] = as.double(
row*dc + ((1.04 * input_taxes_tills[j] * wN)/(2*a13*q[counter]*(p1-b16))) +
-(a12/(2*a13))
)
#add till
if (input_taxes_tills[j+112] < 0.5) {
s2policy[counter] = 1
s3policy[counter] = 0
swat_input[counter+3640] = input_taxes_tills[j+112]
}
if (input_taxes_tills[j+112] > 0.5) {
s2policy[counter] = 0
s3policy[counter] = 1
swat_input[counter+3640] = input_taxes_tills[j+112]
}
qvaluesused[counter,1] = q[counter]
areaused[counter,1] = area[counter]
inputtaxesused[counter,1] = input_taxes_tills[j]
subbasinnumber[counter,1] = j
counter = counter + 1
}
else {
swat_input[counter] = NA
swat_input[counter+3640] = NA
# subbasinnumber[counter,1] = j
counter = counter + 1
}
}
}
#Convert from lb/ac to kg/ha using 1.12085 conversion
#swat_input[swat_input<990] = swat_input[swat_input<990]* 1.12085
for (i in 1:3640) {
if (!is.na(swat_input[i])) {
swat_input[i] = swat_input[i]*1.12085
if (swat_input[i] < 0) { swat_input[i]=NA }
}
}
#swat_input[!is.na(swat_input)] = swat_input[!is.na(swat_input)]*1.12085
###################################################
############# END OF SETTING UP SECTION
###################################################
###################################################
############## CALCULATE TOTPROFIT
#corn yield
a10 = 177.0309 #bu/ac
a11 = -28.4758 #bu/ac
a12 = 0.74044 #bu/lb
a13 = -0.0028545 #bu.ac./(lb^2)
row = 50.98 #lb/ac
gam11 = 0.978
gam12 = 0.932
gam13 = 0.984
gam14 = 0.970
#dc is set below.
dcc = 0 #if last 2 crops were corn, then 1; 0 otherwise.
b10 = 183.62 #$/ac
b11 = -6.37 #$/ac
b12 = -6.55 #$/ac
b13 = 20.05 #$/ac
b14 = 12.66 #$/ac
b15 = 3.06 #$/ac
b16 = 0.21 #$/bu
wN = 0.25 #$/lb
a20 = 47.3876 #bu/ac
a21 = 11.78437 #bu/ac
a22 = 19.6716 #bu/ac
gam21 = 0.974
gam22 = 0.951
b20 = 143.80 #$/ac
b21 = -1.33 #$/ac
b22 = -5.80 #$/ac
b23 = 0.19 #$/bu
p1 = 2.18 #$/bu
p2 = 6.08 #$/bu
ycorn_np = array(NA,c(3640))
costcorn_np = array(NA,c(3640))
ysoy_np = array(NA,c(3640))
costsoy_np = array(NA,c(3640))
totprofit_np = array(NA,c(3640))
ycorn_wp = array(NA,c(3640))
costcorn_wp = array(NA,c(3640))
ysoy_wp = array(NA,c(3640))
costsoy_wp = array(NA,c(3640))
totprofit_wp = array(NA,c(3640))
indivtaxobj = array(NA,c(3640))
#with no tax and till.
for (j in 1:3640) {
# if ((!is.na(swat_input[j]))) {
dc = 0; #since last crop was SOYB
ycorn_np[j] = qvaluesused[j]*((a10*gam11^(dc*s2nopolicy)*gam12^(dc*s3nopolicy)*gam14^((1-dc)*s3nopolicy)) +
a11*dc + a12*(swat_input[j]-(row*dc)) + a13*((swat_input[j]-row*dc)^2))
costcorn_np[j] = b10 + s2nopolicy*b11 + s3nopolicy*b12 + dc*(b13+s2nopolicy*b14+s3nopolicy*b15) +
b16*ycorn_np[j] + 1.04*inputtaxesused[j]*wN*swat_input[j]
dc = 1; #since last crop was CORN
ysoy_np[j] = qvaluesused[j]*((a20*(gam21^s2nopolicy)*(gam22^s3nopolicy)) + (a21*dc) + (a22*dcc))
costsoy_np[j] = b20 + (s2nopolicy*b21) + (s3nopolicy*b22) + (b23*ysoy_np[j])
totprofit_np[j] = (p1*ycorn_np[j] + p2*ysoy_np[j]) - (costcorn_np[j] + costsoy_np[j])
indivtaxobj[j] = (inputtaxesused[j]-1)*wN*swat_input[j]
# }
}
#with tax and till.
for (j in 1:3640) {
# if ((!is.na(swat_input[j]))) {
dc = 0; #since last crop was SOYB
ycorn_wp[j] = qvaluesused[j]*((a10*gam11^(dc*s2policy[j])*gam12^(dc*s3policy[j])*gam14^((1-dc)*s3policy[j])) +
a11*dc + a12*(swat_input[j]-(row*dc)) + a13*((swat_input[j]-row*dc)^2))
costcorn_wp[j] = b10 + s2policy[j]*b11 + s3policy[j]*b12 + dc*(b13+s2policy[j]*b14+s3policy[j]*b15) +
b16*ycorn_wp[j] + 1.04*inputtaxesused[j]*wN*swat_input[j]
dc = 1; #since last crop was CORN
ysoy_wp[j] = qvaluesused[j]*((a20*(gam21^s2policy[j])*(gam22^s3policy[j])) + (a21*dc) + (a22*dcc))
costsoy_wp[j] = b20 + (s2policy[j]*b21) + (s3policy[j]*b22) + (b23*ysoy_wp[j])
totprofit_wp[j] = (p1*ycorn_wp[j] + p2*ysoy_wp[j]) - (costcorn_wp[j] + costsoy_wp[j])
# indivtaxobj[j] = (inputtaxesused[j]-1)*wN*swat_input[j]
# }
}
profit_diffs = array(NA,c(3640))
#Compare profits
for (i in 1:3640) {
profit_diffs[i] = totprofit_wp[i]*areaused[i,1] - totprofit_np[i]*areaused[i,1]
}
basinprofit_diffs = abs(sum(profit_diffs,na.rm=TRUE))
#basinprofit = sum(totprofit*areaused[,1],na.rm=TRUE)
taxObj = sum(indivtaxobj*areaused[,1],na.rm=TRUE)
dfout <- data.frame(swat_input,taxObj,basinprofit_diffs)#,totprofit,basinprofit,inputtaxesused,qvaluesused,subbasinnumber,areaused)
colnames(dfout) <- c("swat_input","taxObj","basinprofit_diffs")#,"indivprofit","basinprofit","tax","q","sub","area_acres")
return(dfout)
}
getNo3Outputs <- function(swat_input) {
###################################################
########## RUN SWAT ###############################
###################################################
#INPUTS ARE input_swat FROM PREVIOUS PORTION.
#The .so was compiled with Intel Fortran x64. I have to invoke the
#following system code to allow the "dyn.load" command to work.
#system('source /opt/intel/bin/compilervars.sh intel64')
#Load SWAT as a standard object file (.so)
dyn.load('/home/blb/swat/bilevel_targeting_swat/Raccoon/src_swat/swat2009_i64_calibrate.so')
#Set directory Path to the SWAT directory
setwd("/home/blb/swat/bilevel_targeting_swat/Raccoon/swat_inputs_Raccoon/")
output <- .Fortran("swat2009",
vars_Rga = swat_input,
nvars = as.integer(7280),
rchdy2_Rga = double(731)
)
###################################################
no3outputs = sum(output$rchdy2_Rga)/2
return(no3outputs)
}
|
/Raccoon/rcode_taxonly/taxonly_sub_optim.R
|
no_license
|
fqx9904/bilevel_targeting_swat
|
R
| false
| false
| 7,909
|
r
|
#BL BARNHART 2018-04-04
#This is for running SWAT and optimizing fertilizer reductions at the
#subbasin scale. In particular, below consists of two functions.
#1. getSwatInputandProfit(): this calculates the fertilizer inputs
#into swat given certain tax and q levels.
#2. runSWAT() calculates no3 outputs from SWAT given fertilizer inputs.
################################################
########## SETTING UP INPUT PARAMETERS #########
################################################
#Input Taxes
#x = runif(112,min,max)
#lookupfile = read.csv('/home/blb/swat/swat_sweden/lookup.csv')
getSwatInputandProfit <- function(input_taxes_tills,lookupfile) {
#example
#input_taxes_tills = c(runif(112,1,2),runif(112,0,1))
#old
#subbasin-scale runif(112,1,2)
#basinwide #input_taxes = array(runif(1,1,2),c(112))
lookup=lookupfile
#lookup = read.csv('/home/blb/swat/swat_sweden/lookup.csv')
q = lookup$V9
area = lookup$V10*247.105 #convert from km2 to acres
tills = lookup$V7 #3 is mulch till, 4 is no-till.
#Constants
row = 50.98 #lb/ac
dc = 0 #if previous crop is corn, this is equal to 1; 0 if otherwise.
wN = 0.25 #$/lb from Duff and Smith (2004)
a13 = -0.0028545 #bu.ac./(lb)^2
p1 = 2.18 #$/bu Johanns (2012) year 2003
b16 = 0.21 #$/bu
a12 = 0.74044 #bu/lb
#Initialize Array
swat_input = array(NA,c(7280)) #initialize 3640 hru parameters for swat input
qvaluesused = array(NA,c(3640,1))
areaused = array(NA,c(3640,1))
inputtaxesused = array(NA,c(3640,1))
subbasinnumber = array(NA,c(3640,1))
#current config; all CORN and SOYB set to mulch till.
s2nopolicy = 1 #1 if mulch till; 0 if otherwise
s3nopolicy = 0 #1 if no-till; 0 if otherwise
### SUB TILL SETUP. IF input_taxes_tills > 0.5, THEN implement no-till
s2policy = array(NA,c(3640))
s3policy = array(NA,c(3640))
#Calculate Fertilizer Application @ Max Profit
#Note that this fertilizer is only applied in the CORN year of the
#CORN/SOY rotation. Therefore, dc and dcc always = 0.
counter = 1
for (j in 1:112) {
sub = lookup[lookup$V2==j,]
for (k in 1:length(sub$V1)) {
if (sub$V8[k] %in% c('CORN','SOYB')) {
swat_input[counter] = as.double(
row*dc + ((1.04 * input_taxes_tills[j] * wN)/(2*a13*q[counter]*(p1-b16))) +
-(a12/(2*a13))
)
#add till
if (input_taxes_tills[j+112] < 0.5) {
s2policy[counter] = 1
s3policy[counter] = 0
swat_input[counter+3640] = input_taxes_tills[j+112]
}
if (input_taxes_tills[j+112] > 0.5) {
s2policy[counter] = 0
s3policy[counter] = 1
swat_input[counter+3640] = input_taxes_tills[j+112]
}
qvaluesused[counter,1] = q[counter]
areaused[counter,1] = area[counter]
inputtaxesused[counter,1] = input_taxes_tills[j]
subbasinnumber[counter,1] = j
counter = counter + 1
}
else {
swat_input[counter] = NA
swat_input[counter+3640] = NA
# subbasinnumber[counter,1] = j
counter = counter + 1
}
}
}
#Convert from lb/ac to kg/ha using 1.12085 conversion
#swat_input[swat_input<990] = swat_input[swat_input<990]* 1.12085
for (i in 1:3640) {
if (!is.na(swat_input[i])) {
swat_input[i] = swat_input[i]*1.12085
if (swat_input[i] < 0) { swat_input[i]=NA }
}
}
#swat_input[!is.na(swat_input)] = swat_input[!is.na(swat_input)]*1.12085
###################################################
############# END OF SETTING UP SECTION
###################################################
###################################################
############## CALCULATE TOTPROFIT
#corn yield
a10 = 177.0309 #bu/ac
a11 = -28.4758 #bu/ac
a12 = 0.74044 #bu/lb
a13 = -0.0028545 #bu.ac./(lb^2)
row = 50.98 #lb/ac
gam11 = 0.978
gam12 = 0.932
gam13 = 0.984
gam14 = 0.970
#dc is set below.
dcc = 0 #if last 2 crops were corn, then 1; 0 otherwise.
b10 = 183.62 #$/ac
b11 = -6.37 #$/ac
b12 = -6.55 #$/ac
b13 = 20.05 #$/ac
b14 = 12.66 #$/ac
b15 = 3.06 #$/ac
b16 = 0.21 #$/bu
wN = 0.25 #$/lb
a20 = 47.3876 #bu/ac
a21 = 11.78437 #bu/ac
a22 = 19.6716 #bu/ac
gam21 = 0.974
gam22 = 0.951
b20 = 143.80 #$/ac
b21 = -1.33 #$/ac
b22 = -5.80 #$/ac
b23 = 0.19 #$/bu
p1 = 2.18 #$/bu
p2 = 6.08 #$/bu
ycorn_np = array(NA,c(3640))
costcorn_np = array(NA,c(3640))
ysoy_np = array(NA,c(3640))
costsoy_np = array(NA,c(3640))
totprofit_np = array(NA,c(3640))
ycorn_wp = array(NA,c(3640))
costcorn_wp = array(NA,c(3640))
ysoy_wp = array(NA,c(3640))
costsoy_wp = array(NA,c(3640))
totprofit_wp = array(NA,c(3640))
indivtaxobj = array(NA,c(3640))
#with no tax and till.
for (j in 1:3640) {
# if ((!is.na(swat_input[j]))) {
dc = 0; #since last crop was SOYB
ycorn_np[j] = qvaluesused[j]*((a10*gam11^(dc*s2nopolicy)*gam12^(dc*s3nopolicy)*gam14^((1-dc)*s3nopolicy)) +
a11*dc + a12*(swat_input[j]-(row*dc)) + a13*((swat_input[j]-row*dc)^2))
costcorn_np[j] = b10 + s2nopolicy*b11 + s3nopolicy*b12 + dc*(b13+s2nopolicy*b14+s3nopolicy*b15) +
b16*ycorn_np[j] + 1.04*inputtaxesused[j]*wN*swat_input[j]
dc = 1; #since last crop was CORN
ysoy_np[j] = qvaluesused[j]*((a20*(gam21^s2nopolicy)*(gam22^s3nopolicy)) + (a21*dc) + (a22*dcc))
costsoy_np[j] = b20 + (s2nopolicy*b21) + (s3nopolicy*b22) + (b23*ysoy_np[j])
totprofit_np[j] = (p1*ycorn_np[j] + p2*ysoy_np[j]) - (costcorn_np[j] + costsoy_np[j])
indivtaxobj[j] = (inputtaxesused[j]-1)*wN*swat_input[j]
# }
}
#with tax and till.
for (j in 1:3640) {
# if ((!is.na(swat_input[j]))) {
dc = 0; #since last crop was SOYB
ycorn_wp[j] = qvaluesused[j]*((a10*gam11^(dc*s2policy[j])*gam12^(dc*s3policy[j])*gam14^((1-dc)*s3policy[j])) +
a11*dc + a12*(swat_input[j]-(row*dc)) + a13*((swat_input[j]-row*dc)^2))
costcorn_wp[j] = b10 + s2policy[j]*b11 + s3policy[j]*b12 + dc*(b13+s2policy[j]*b14+s3policy[j]*b15) +
b16*ycorn_wp[j] + 1.04*inputtaxesused[j]*wN*swat_input[j]
dc = 1; #since last crop was CORN
ysoy_wp[j] = qvaluesused[j]*((a20*(gam21^s2policy[j])*(gam22^s3policy[j])) + (a21*dc) + (a22*dcc))
costsoy_wp[j] = b20 + (s2policy[j]*b21) + (s3policy[j]*b22) + (b23*ysoy_wp[j])
totprofit_wp[j] = (p1*ycorn_wp[j] + p2*ysoy_wp[j]) - (costcorn_wp[j] + costsoy_wp[j])
# indivtaxobj[j] = (inputtaxesused[j]-1)*wN*swat_input[j]
# }
}
profit_diffs = array(NA,c(3640))
#Compare profits
for (i in 1:3640) {
profit_diffs[i] = totprofit_wp[i]*areaused[i,1] - totprofit_np[i]*areaused[i,1]
}
basinprofit_diffs = abs(sum(profit_diffs,na.rm=TRUE))
#basinprofit = sum(totprofit*areaused[,1],na.rm=TRUE)
taxObj = sum(indivtaxobj*areaused[,1],na.rm=TRUE)
dfout <- data.frame(swat_input,taxObj,basinprofit_diffs)#,totprofit,basinprofit,inputtaxesused,qvaluesused,subbasinnumber,areaused)
colnames(dfout) <- c("swat_input","taxObj","basinprofit_diffs")#,"indivprofit","basinprofit","tax","q","sub","area_acres")
return(dfout)
}
getNo3Outputs <- function(swat_input) {
###################################################
########## RUN SWAT ###############################
###################################################
#INPUTS ARE input_swat FROM PREVIOUS PORTION.
#The .so was compiled with Intel Fortran x64. I have to invoke the
#following system code to allow the "dyn.load" command to work.
#system('source /opt/intel/bin/compilervars.sh intel64')
#Load SWAT as a standard object file (.so)
dyn.load('/home/blb/swat/bilevel_targeting_swat/Raccoon/src_swat/swat2009_i64_calibrate.so')
#Set directory Path to the SWAT directory
setwd("/home/blb/swat/bilevel_targeting_swat/Raccoon/swat_inputs_Raccoon/")
output <- .Fortran("swat2009",
vars_Rga = swat_input,
nvars = as.integer(7280),
rchdy2_Rga = double(731)
)
###################################################
no3outputs = sum(output$rchdy2_Rga)/2
return(no3outputs)
}
|
# package globals
.globals <- new.env(parent = emptyenv())
.globals$overlay <- list()
.globals$job_registry <- new.env(parent = emptyenv())
|
/R/cloudml-package.R
|
no_license
|
MitchellAkeba/cloudml
|
R
| false
| false
| 142
|
r
|
# package globals
.globals <- new.env(parent = emptyenv())
.globals$overlay <- list()
.globals$job_registry <- new.env(parent = emptyenv())
|
library(dnar)
library(xlsx)
library(lubridate)
convertUKMM<-function(xx,mmLookup,errorOnNA=TRUE){
splits<-strsplit(xx,'\\.')
splits<-lapply(splits,function(xx){
orig<-xx
if(length(xx)==0)return('')
xx[1]<-sub('UK','EJ',gsub(' ','',xx[1]))
if(grepl('EJ',xx[1]))xx[1]<-mmLookup[xx[1]]
if(is.na(xx[1])&errorOnNA)stop('Problem converting ID "',paste(orig,collapse='.'),'"')
return(xx)
})
return(sapply(splits,paste,collapse='.'))
}
fixDecimals<-function(xx,maxVisit=29,checkRank=TRUE){
splits<-strsplit(xx,'\\.')
visits<-as.numeric(sub('[oO]','0',sapply(splits,'[',2)))
pats<-sapply(splits,'[',1)
fixVisit<-ave(visits,pats,FUN=function(xx){
out<-xx
#catch crazy visits e.g. 70
out[out>maxVisit&out%%10]<-out[out>maxVisit&out%%10]/10
#catch too high values
potentialProbs<-which(xx%%10==0)
for(ii in rev(potentialProbs)){
if(ii==length(xx))break
#fix too high e.g. 20 instead of 2
if(any(out[(ii+1):length(out)]<out[ii]))out[ii]<-out[ii]/10
}
#catch too low value
probs<-out<cummax(out)
if(any(probs))out[probs]<-out[probs]*10
if(checkRank&&any(rank(out)!=1:length(out)))stop('Problem fixing visits: ',paste(xx,collapse=', '))
out
})
return(sprintf('%s.%s%s',pats,sprintf('%02d',fixVisit),sapply(splits,function(xx)ifelse(length(xx)>2,sprintf('.%s',paste(xx[-2:-1],collapse='.')),''))))
}
##MASTER META LIST##
meta1<-read.csv('data/For Scot, Complete Master table AUG.2017_meta.csv',stringsAsFactors=FALSE)[,-1:-2]
meta1<-meta1[,1:6]
#meta1$id<-fillDown(meta1$ID)
meta1$id<-sapply(strsplit(meta1$Time.Points,'\\.'),'[',1)
meta1<-meta1[meta1$Time.Points!='Total number of sequences',]
rownames(meta1)<-meta1$Time.Points
meta2<-read.csv('data/New MM cohort patients.csv',stringsAsFactors=FALSE)
#meta2<-meta2[meta2$Date!=''&!is.na(meta2$Date)&meta2$Date!='Date',]
meta2<-meta2[meta2[,2]!='',]
colnames(meta2)[1:2]<-c('ID','Time.Points')
colnames(meta2)[colnames(meta2)=='Viral.load']<-'VL'
colnames(meta2)[colnames(meta2)=='CD4.count']<-'CD4'
meta2<-meta2[,1:6]
meta2$id<-fillDown(meta2$ID)
meta2$Time.Points<-sprintf('MM%s',meta2$Time.Points)
tmp<-sub('\\.([0-9])$','.0\\1',mapply(function(xx,yy)sub('^MM[0-9]+',xx,yy),meta2$id,meta2$Time.Points))
meta2$Time.Points<-tmp
rownames(meta2)<-meta2$Time.Points
meta<-rbind(meta1,meta2)
meta$mm<-meta$id
meta$Date[meta$Date==' 12/07/2001'&meta$id=='MM14']<-'7/12/2001'
meta$Date[meta$Date=='07/0806']<-'07/08/2006'
meta$Date[meta$Date==' 08/21/08']<-'08/21/2008'
meta$rDate<-as_date(sapply(meta$Date,function(xx)ifelse(grepl('/[0-9]{4}',xx),mdy(xx),dmy(xx))))
meta$vl<-as.numeric(gsub('[><,]','',meta$VL))
meta$cd4<-as.numeric(meta$CD4)
meta$visit<-sapply(strsplit(rownames(meta),'\\.'),'[',2)
##CORRECTIONS and IFN###
wb <- loadWorkbook("meta/EJ MM plasma cytokine data CORRECTED updated VL CD4 Jan2018.xlsx")
rawMetas<-lapply(getSheets(wb),function(sheet){
rows<-getCells(getRows(sheet),simplify=FALSE)
vals<-lapply(rows,function(row){
tmp<-sapply(row,function(xx)ifelse(is.null(xx),NA,getCellValue(xx)))
#30 is arbitrary number to make all same width
out<-rep(NA,50)
names(out)<-1:50
out[names(tmp)[names(tmp)!='']]<-tmp[names(tmp)!='']
return(out)
})
dates<-sapply(vals,'[',2)
goodDates<-!is.na(suppressWarnings(as.numeric(dates)))
isStringDate<-all(!goodDates)
if(isStringDate)goodDates<-grepl('[0-9]{2}[./][0-9]{2}[./][12]?[90]?[0-9]{2}',dates)
dat<-do.call(rbind,vals[1:length(vals)>2&goodDates])
if(is.null(dat))browser()
cols<-c('sample','date','dfosx','oldViralLoad','viralLoad','cd4','diluted','XXX','ifna1','ifna2','XXX','ifnb1','ifnb2','XXX','ifno1','ifno2','XXX','ifng1','ifng2')
if(!vals[[1]][[5]] %in% c('Corrected','corrected','Viral load (copies/ml)'))cols<-cols[-4]
if(is.na(vals[[1]][[3]]))cols[3:4]<-c('newDate','dfosx')
ifnCols<-grep('IFN',vals[[1]])
colnames(dat)[1:length(cols)]<-cols
dat<-dat[,cols[cols!='XXX']]
dat[dat=='BDL']<- 1
dat<-as.data.frame(dat,stringsAsFactors=FALSE)
dat[,grepl('^ifn',colnames(dat))]<- apply(dat[,grepl('^ifn',colnames(dat))],2,as.numeric)
dat$dfosx<-as.numeric(dat$dfosx)
if(any(colnames(dat)=='newDate')){
if(any(!is.na(dat$newDate)&(abs(as.numeric(dat$newDate)-as.numeric(dat$oldDate))>10)))stop('Big difference in old and new date')
dat[!is.na(dat$newDate),'date']<-dat[!is.na(dat$newDate),'newDate']
}
if(isStringDate)dat$rDate<-dmy(dat$date)
else dat$rDate<-as.Date(as.numeric(dat$date),origin='1899-12-30')
dat$cd4<-as.numeric(ifelse(dat$cd4 %in% c('not done','no data','NA'),NA,dat$cd4))
dat$vl<-as.numeric(ifelse(dat$viralLoad %in% c('not done','no data'),NA,gsub('[><,]','',dat$viralLoad)))
if(any(colnames(dat)=='oldViralLoad')){
oldVl<-as.numeric(gsub('[<>,]','',dat$oldViralLoad))
if(any(!is.na(dat$vl)&(abs(log2(dat$vl/oldVl))>1)))stop('Big difference in old and new vl')
dat[is.na(dat$vl),'vl']<-oldVl[is.na(dat$vl)]
}
return(dat)
})
names(rawMetas)<-names(getSheets(wb))
colCounts<-table(unlist(sapply(rawMetas,colnames)))
targetCols<-names(colCounts)[colCounts==length(rawMetas)]
ifnMeta<-do.call(rbind,lapply(rawMetas,'[',targetCols[orderIn(targetCols,colnames(rawMetas[[1]]))]))
ifnMeta$ej<-sapply(strsplit(rownames(ifnMeta),' '),'[',1)
ifnMeta$mm<-sapply(strsplit(rownames(ifnMeta),'[ .]'),'[',2)
ifnMeta$sample<-sub('[oO]','0',ifnMeta$sample)
isVisitSample<-!is.na(ifnMeta$sample)&grepl('\\.',ifnMeta$sample)&!grepl('[a-zA-Z]$',ifnMeta$sample)
ifnMeta$oldSample<-ifnMeta$sample
ifnMeta$sample[isVisitSample]<-fixDecimals(ifnMeta$sample[isVisitSample])
ifnMeta$visit<-sapply(strsplit(ifnMeta$sample,'[.]'),'[',2)
ifnMeta<-ifnMeta[ifnMeta$mm %in% meta$mm,]
#fix inconsistent date formatting
ifnMeta[ifnMeta$ej=='EJ52'&ifnMeta$rDate=='2001-06-09','rDate']<-ymd('2001-09-06')
tmp<-unique(ifnMeta[,c('mm','ej')])
ejLookup<-structure(tmp$ej,.Names=tmp$mm)
mmLookup<-structure(tmp$mm,.Names=tmp$e)
## cell sorting ##
sorts<-read.csv('meta/AFM MM data summary Jan2018.csv',stringsAsFactors=FALSE)
colnames(sorts)[1]<-'patient'
sorts$patient<-fillDown(sorts$patient)
newColnames<-sub('\\.\\.3\\.replicate\\.values\\.|\\.\\.\\..+cells\\.','',fillDown(ifelse(grepl('^X\\.[0-9]+$',colnames(sorts)),NA,colnames(sorts))))
newColnames<-sprintf('%s%s',newColnames,ave(newColnames,newColnames,FUN=function(xx)if(length(xx)==1)'' else sprintf('__%d',1:length(xx))))
colnames(sorts)<-newColnames
sorts$ej<-sub(' ','',sapply(strsplit(sorts$Donor,'[.]'),'[',1))
paste(sorts$ej,sorts$DFOSx) %in% paste(meta$ej,meta$dfosx)
sorts$rDate<-as_date(sapply(sorts$Visit.Date,function(xx)ifelse(grepl('/',xx),mdy(xx),dmy(xx))))
## Inflammation markers ##
trans<-read.csv('meta/London Cohort inflammation markers ELISA data for Scott 07172017.csv',stringsAsFactors=FALSE)
trans$sample<-fixDecimals(as.character(trans$Sample))
## messy all combined list
if(!exists('ejs'))source('readAllPats.R')
## Post ART data
pbmc<-read.csv('meta/EJ post ART PBMC available.csv',header=FALSE,stringsAsFactors=FALSE)
pbmc[,1]<-trimws(pbmc[,1])
pbmc<-pbmc[grepl('^E?J?[0-9]+\\.[0-9]+$',pbmc[,1]),]
colnames(pbmc)<-c('sample','date','DFOSx','viralLoad','CD4','vials','postArt')
pbmc$ej<-sprintf('EJ%s',sub('EJ','',sub('\\.[0-9]+$','',pbmc$sample)))
pbmc$visit<-sub('.*\\.([0-9]+)$','\\1',pbmc$sample)
pbmc$vl<-as.numeric(sub('<','',pbmc$viralLoad))
pbmc$cd4<-as.numeric(sub('N/A','',pbmc$CD4))
## Additional data
more<-read.csv('meta/moreMetaData.csv',stringsAsFactors=FALSE)
more$vl<-as.numeric(sub('<','',more$VL))
more$cd4<-as.numeric(more$CD4)
art<-read.csv('data/artDates.csv',stringsAsFactors=FALSE)
artDates<-withAs(xx=art[!is.na(art$date)&art$mm %in% meta$mm,],structure(dmy(xx$date),.Names=xx$mm))
#art$lastDate<-ymd(apply(art[,c('lastClinic','lastSample')],1,function(xx)if(all(is.na(xx)))return(NA)else as.character(max(dmy(xx),na.rm=TRUE))))
art$lastDate<-dmy(art$lastSample)
lastDates<-withAs(xx=art[!is.na(art$lastDate)&art$mm %in% meta$mm,],structure(xx$lastDate,.Names=xx$mm))
## Joining ##
##combine ifnMeta and meta
#newMeta<-!paste(ifnMeta$rDate,ifnMeta$mm) %in% paste(meta$rDate,meta$mm)
#minDiff<-apply(ifnMeta[newMeta,c('mm','rDate')],1,function(xx)min(c(Inf,abs(meta[meta$mm==xx[1],'rDate']-ymd(xx[2])))))
#checked manually look all distinct
#ifnMeta[newMeta,][minDiff<10,]
newMeta<-!paste(meta$rDate,meta$mm) %in% paste(ifnMeta$rDate,ifnMeta$mm)
minDiff<-apply(meta[newMeta,c('mm','rDate')],1,function(xx)min(c(Inf,abs(ifnMeta[ifnMeta$mm==xx[1],'rDate']-ymd(xx[2])))))
#checked manually look distinct
meta[newMeta,][minDiff<10,]
meta$ej<-ejLookup[meta$mm]
metaMerge<-meta
metaMerge[,colnames(ifnMeta)[!colnames(ifnMeta) %in% colnames(meta)]]<-NA
metaMerge[,c('sample','date','dfosx','viralLoad')]<-metaMerge[,c('Time.Points','Date','DFOSx','VL')]
metaMerge$source<-'meta'
ifnMeta$source<-'ifn'
comboMeta<-rbind(ifnMeta,metaMerge[newMeta,colnames(ifnMeta)])
#combine ejs
thisEjs<-ejs[ejs$ej %in% comboMeta$ej&!grepl('[a-z]',ejs$date),]
thisEjs$rDate<-as_date(sapply(thisEjs$date,function(xx)ifelse(grepl('/[0-9]{4}',xx),mdy(xx),dmy(xx))))
newEj<-!paste(thisEjs$ej,thisEjs$rDate) %in% paste(comboMeta$ej,comboMeta$rDate)&!grepl('HAART',thisEjs$notes)
thisEjs<-thisEjs[newEj,]
thisEjs[,c('viralLoad','sample')]<-thisEjs[,c('vl','id')]
thisEjs$visit<-trimws(sapply(strsplit(thisEjs$sample,'\\.'),'[',2))
thisEjs$mm<-sapply(thisEjs$ej,function(xx)names(ejLookup)[ejLookup==xx])
thisEjs<-thisEjs[!thisEjs$id %in% c('EJ 85.14','EJ85.11'),]
minDiff<-apply(thisEjs[,c('mm','rDate')],1,function(xx)min(c(Inf,abs(comboMeta[comboMeta$mm==xx[1],'rDate']-ymd(xx[2])))))
if(any(minDiff<10))stop('Close date in ejs')
thisEjs[,colnames(comboMeta)[!colnames(comboMeta) %in% colnames(thisEjs)]]<-NA
thisEjs$source<-'ej'
comboMeta<-rbind(comboMeta,thisEjs[,colnames(comboMeta)])
#combine pbmc
thisPbmc<-pbmc[pbmc$ej %in% comboMeta$ej & !paste(pbmc$ej,pbmc$visit) %in% paste(comboMeta$ej,comboMeta$visit),]
thisPbmc$rDate<-dmy(thisPbmc$date)
thisPbmc$mm<-sapply(thisPbmc$ej,function(xx)names(ejLookup)[ejLookup==xx])
thisPbmc[,colnames(comboMeta)[!colnames(comboMeta) %in% colnames(thisPbmc)]]<-NA
thisPbmc$source<-'pbmc'
comboMeta<-rbind(comboMeta,thisPbmc[,colnames(comboMeta)])
##combine additional data
thisMore<-more
thisMore$ej<-ejLookup[thisMore$mm]
thisMore$rDate<-dmy(thisMore$date)
thisMore[,colnames(comboMeta)[!colnames(comboMeta) %in% colnames(thisMore)]]<-NA
thisMore$source<-'additional'
comboMeta<-rbind(comboMeta,thisMore[,colnames(comboMeta)])
##combine trans
if(any(!trans$sample %in% sprintf('%s.%s',sub('EJ','',comboMeta$ej),comboMeta$visit)))stop('Found unknown sample in trans data')
rownames(trans)<-trans$sample
transCols<-colnames(trans)[!colnames(trans) %in% c('sample','Sample')]
if(any(transCols %in% colnames(comboMeta)))stop('Duplicate column in trans')
comboMeta[,transCols]<-trans[sprintf('%s.%s',sub('EJ','',comboMeta$ej),comboMeta$visit),transCols]
##combine sort
#first two samples are controls
if(any(!paste(sorts$ej,sorts$rDate)[-1:-2] %in% paste(comboMeta$ej,comboMeta$rDate)))stop('Unknown sample in sorts')
sortCols<-colnames(sorts)[grepl('BST|HLA|CD38|__',colnames(sorts))]
if(any(sortCols %in% colnames(comboMeta)))stop('Duplicate column in trans')
rownames(sorts)<-paste(sorts$ej,sorts$rDate)
comboMeta[,sortCols]<-sorts[paste(comboMeta$ej,comboMeta$rDate),sortCols]
comboMeta<-comboMeta[order(comboMeta$mm,comboMeta$rDate),]
comboMeta$dfosx<-as.numeric(comboMeta$dfosx)
comboMeta$qvoa<-comboMeta$rDate>as_date(ifelse(comboMeta$mm %in% names(artDates),artDates[comboMeta$mm],Inf))
sapply(by(comboMeta[,c('dfosx','rDate')],comboMeta$mm,function(xx){zz<-table(xx$rDate-xx$dfosx)}),function(xx)diff(range(ymd(names(xx)))))
baseDate<-by(comboMeta[,c('dfosx','rDate')],comboMeta$mm,function(xx){zz<-table(xx$rDate-xx$dfosx);names(zz)[which.max(zz)]})
comboMeta$time<-comboMeta$rDate-ymd(baseDate[comboMeta$mm])
comboMeta[comboMeta$visit=='12 MW'&comboMeta$mm=='MM39','visit']<-'13'
#comboMeta[comboMeta$vl==37611600&!is.na(comboMeta$vl),'vl']<-NA
if(any(apply(table(comboMeta$visit,comboMeta$mm)>1,2,any)))stop('Duplicate visit found')
write.csv(comboMeta,'out/combinedMeta.csv')
tmp<-comboMeta[,c('mm','ej','date','rDate','vl','cd4','source')]
tmp$dfosx<-comboMeta$time
write.csv(tmp,'out/combinedMetadata.csv',row.names=FALSE)
artDfosx<-sapply(names(artDates),function(xx)artDates[xx]-ymd(baseDate[xx]))
names(artDfosx)<-names(artDates)
lastDfosx<-sapply(names(lastDates),function(xx)lastDates[xx]-ymd(baseDate[xx]))
names(lastDfosx)<-names(lastDates)
for(ii in names(lastDfosx))lastDfosx[ii]<-max(as.numeric(comboMeta[comboMeta$mm==ii,'time']),lastDfosx[ii])
customCols<-read.csv('data/Hex color no. for MM cohort colorcode.csv',stringsAsFactors=FALSE,header=FALSE)[,1:2]
customCols<-customCols[customCols[,1]!='',]
colnames(customCols)<-c('sample','color')
customCols$name<-fixDecimals(sub(' ?\\(.*$','',customCols$sample))
rownames(customCols)<-customCols$name
wb <- loadWorkbook("meta/EJ MM CD4 VL pre and post ART 08June2018_sasm.xlsx")
vals<-lapply(getSheets(wb),function(sheet){
rows<-getCells(getRows(sheet),simplify=FALSE)
vals<-lapply(rows,function(row){
tmp<-lapply(as.character(1:8),function(xx)ifelse(any(names(row)==xx),getCellValue(row[[xx]]),NA))
if(is.na(tmp[[2]])&is.na(tmp[[3]]))return(NULL)
if((grepl('Date',tmp[[2]])|grepl('Date',tmp[[3]])))return(NULL)
out<-data.frame('id'='999.99','origDate'='99.99.99','date'=99999,'DFOSx'=99999,'VL'=999999999,'CD4'=9999999,'ART'='','Notes'='',stringsAsFactors=FALSE)[0,]
out[1,]<-rep(NA,8)
for(ii in 1:8)if(length(tmp)>=ii)out[1,ii]<-tmp[[ii]] else out[1,ii]<-NA
return(out)
})
return(do.call(rbind,vals))
})
compiledMeta<-do.call(rbind,mapply(function(xx,yy){xx$pat<-yy;xx},vals,names(vals),SIMPLIFY=FALSE))
compiledMeta[compiledMeta$origDate=='05.01.12'&compiledMeta$id=='108.1','id']<-'108.10'
compiledMeta[compiledMeta$id=='85.12MW'&!is.na(compiledMeta$id),'id']<-'85.13'
compiledMeta$mm<-sub('.* ','',sub('MM ','MM',compiledMeta$pat))
compiledMeta$ej<-sub(' .*','',sub('EJ ','EJ',compiledMeta$pat))
compiledMeta<-compiledMeta[compiledMeta$mm %in% mmLookup,]
compiledMeta$rDate<-as.Date(as.numeric(compiledMeta$date),origin='1899-12-30')
compiledMeta$vl<-as.numeric(gsub(' ','',sub('<','',compiledMeta$VL)))
compiledMeta$cd4<-as.numeric(compiledMeta$CD4)
rownames(compiledMeta)<-sapply(strsplit(sub('^[^ ]+ ','',rownames(compiledMeta)),'\\.'),function(xx)sprintf('%s.%02d',xx[1],as.numeric(xx[2])))
if(any(is.na(compiledMeta$rDate)))stop('Problem interpreting date')
if(year(min(compiledMeta$rDate))<2000)stop('Year <2000 detected')
if(year(min(compiledMeta$rDate))>2015)stop('Year >2015 detected')
startDates<-tapply(compiledMeta$rDate-compiledMeta$DFOSx,compiledMeta$mm,mostAbundant)
compiledMeta$time<-compiledMeta$rDate-as.Date(startDates[compiledMeta$mm])
if(any(abs(compiledMeta$time-compiledMeta$DFOSx)>1))warning('Disagreement in dfosx')
#2nd column likely gives day from exposure
weauSymptomDate<-ymd('1990-06-04')
weauMeta<-read.csv('meta/weau.csv',stringsAsFactors=FALSE)
weauMeta$origDate<-weauMeta$date<-weauMeta$Date
weauMeta$rDate<-dmy(weauMeta$Date)
weauMeta$ID<-weauMeta$id<-weauMeta$visit<-1:nrow(weauMeta)
weauMeta$time<-weauMeta$rDate-weauSymptomDate
weauAdditional<-read.csv('meta/additionalWEAUMeta.csv')
weauAdditional$origDate<-weauAdditional$date<-weauAdditional$Date
weauAdditional$rDate<-mdy(weauAdditional$Date)
weauAdditional$ID<-weauAdditional$id<-weauAdditional$visit<-nrow(weauMeta)+1:nrow(weauAdditional)
weauAdditional$VL<-NA
weauAdditional$Time<-NA
weauAdditional$Available<-NA
weauAdditional$time<-weauAdditional$rDate-weauSymptomDate
weauAdditional<-weauAdditional[weauAdditional$time>100,]
weauMeta<-rbind(weauMeta,weauAdditional[,colnames(weauMeta)])
weauMeta$cd4<-as.numeric(ifelse(weauMeta$CD4=='nd',NA,weauMeta$CD4))
weauMeta$vl<-as.numeric(ifelse(weauMeta$VL=='nd',NA,weauMeta$VL))
weauMeta$DFOSx<-weauMeta$time
weauMeta$ART<-weauMeta$Notes<-NA
weauMeta$pat<-weauMeta$mm<-weauMeta$ej<-'WEAU'
rownames(weauMeta)<-weauMeta$Time.Points<-sprintf('WEAU.%02d',weauMeta$id)
weauMeta<-weauMeta[order(weauMeta$time),]
aztDfosx<-list('WEAU'=ymd(c('start'='1992/01/23','end'='1994/06/01'))-weauSymptomDate)
compiledMeta<-rbind(compiledMeta,weauMeta[,colnames(compiledMeta)])
meta<-rbind(meta,weauMeta[,colnames(meta)])
compiledMeta$visit<-sub('[^.]+\\.','',compiledMeta$id)
compiledMeta$visit<-ifelse(grepl('^[0-9]+$',compiledMeta$visit),sprintf('%02d',suppressWarnings(as.integer(compiledMeta$visit))),compiledMeta$visit)
compiledMeta$sample<-ifelse(compiledMeta$visit==''|is.na(compiledMeta$id),sprintf('XX%s',1:nrow(compiledMeta)),paste(compiledMeta$mm,compiledMeta$visit,sep='.'))
rownames(compiledMeta)<-compiledMeta$sample
#WEAU no ART but calling first record of low CD4 as when would have initiated (day 391 original, day 371 after adjustment for symptoms)
meta$artDay<-c(artDfosx,'WEAU'=371)[meta$mm]
meta$daysBeforeArt<-meta$artDay-as.numeric(meta$DFOSx)
compiledMeta$artDay<-c(artDfosx,'WEAU'=371)[compiledMeta$mm]
compiledMeta$daysBeforeArt<-compiledMeta$artDay-as.numeric(compiledMeta$time)
founders<-read.csv('founder.csv',stringsAsFactors=FALSE,row.names=1)
superDate<-ymd(founders$superDate)
founders$superTime<-superDate-ymd(startDates[rownames(founders)])
write.csv(founders,'out/founders.csv')
less350Time<-by(compiledMeta[!is.na(compiledMeta$cd4),],compiledMeta[!is.na(compiledMeta$cd4),'mm'],function(xx){
lastInfect<-ifelse(is.na(founders[xx$mm[1],'superTime']),1,founders[xx$mm[1],'superTime'])
xx$previousLess<-c(Inf,xx$cd4[-nrow(xx)])<350
out<-min(c(xx[xx$time>lastInfect+180&xx$cd4<350&xx$previousLess,'time'],Inf))
if(out==Inf)out<-NA
if(is.na(out)&!is.na(artDfosx[xx$mm[1]]))out<-artDfosx[xx$mm[1]]
return(out)
})
compiledMeta$day350<-less350Time[compiledMeta$mm]
compiledMeta$daysBefore350<-compiledMeta$day350-as.numeric(compiledMeta$time)
write.csv(compiledMeta,'out/allLongitudinalMeta.csv')
if(FALSE){
comboMeta[which(!paste(comboMeta$mm,comboMeta$rDate) %in% paste(compiledMeta$mm,compiledMeta$rDate) & !is.na(comboMeta$mm)&(!is.na(comboMeta$vl)|!is.na(comboMeta$cd4))),c('mm','date','rDate','time','vl','cd4','source')]
tmp<-comboMeta$vl
names(tmp)<-paste(comboMeta$mm,comboMeta$rDate)
tmp<-tmp[paste(compiledMeta$mm,compiledMeta$rDate)]
probs<-tmp!=sub('<','',compiledMeta$VL)&!is.na(tmp)
cbind(compiledMeta[probs,],tmp[probs])
tmp<-comboMeta$cd4
names(tmp)<-paste(comboMeta$mm,comboMeta$rDate)
tmp<-tmp[paste(compiledMeta$mm,compiledMeta$rDate)]
probs<-tmp!=compiledMeta$CD4&!is.na(tmp)
cbind(compiledMeta[probs,],tmp[probs])
}
|
/readMeta.R
|
no_license
|
sherrillmix/IFNDynamics
|
R
| false
| false
| 18,573
|
r
|
library(dnar)
library(xlsx)
library(lubridate)
convertUKMM<-function(xx,mmLookup,errorOnNA=TRUE){
splits<-strsplit(xx,'\\.')
splits<-lapply(splits,function(xx){
orig<-xx
if(length(xx)==0)return('')
xx[1]<-sub('UK','EJ',gsub(' ','',xx[1]))
if(grepl('EJ',xx[1]))xx[1]<-mmLookup[xx[1]]
if(is.na(xx[1])&errorOnNA)stop('Problem converting ID "',paste(orig,collapse='.'),'"')
return(xx)
})
return(sapply(splits,paste,collapse='.'))
}
fixDecimals<-function(xx,maxVisit=29,checkRank=TRUE){
splits<-strsplit(xx,'\\.')
visits<-as.numeric(sub('[oO]','0',sapply(splits,'[',2)))
pats<-sapply(splits,'[',1)
fixVisit<-ave(visits,pats,FUN=function(xx){
out<-xx
#catch crazy visits e.g. 70
out[out>maxVisit&out%%10]<-out[out>maxVisit&out%%10]/10
#catch too high values
potentialProbs<-which(xx%%10==0)
for(ii in rev(potentialProbs)){
if(ii==length(xx))break
#fix too high e.g. 20 instead of 2
if(any(out[(ii+1):length(out)]<out[ii]))out[ii]<-out[ii]/10
}
#catch too low value
probs<-out<cummax(out)
if(any(probs))out[probs]<-out[probs]*10
if(checkRank&&any(rank(out)!=1:length(out)))stop('Problem fixing visits: ',paste(xx,collapse=', '))
out
})
return(sprintf('%s.%s%s',pats,sprintf('%02d',fixVisit),sapply(splits,function(xx)ifelse(length(xx)>2,sprintf('.%s',paste(xx[-2:-1],collapse='.')),''))))
}
##MASTER META LIST##
meta1<-read.csv('data/For Scot, Complete Master table AUG.2017_meta.csv',stringsAsFactors=FALSE)[,-1:-2]
meta1<-meta1[,1:6]
#meta1$id<-fillDown(meta1$ID)
meta1$id<-sapply(strsplit(meta1$Time.Points,'\\.'),'[',1)
meta1<-meta1[meta1$Time.Points!='Total number of sequences',]
rownames(meta1)<-meta1$Time.Points
meta2<-read.csv('data/New MM cohort patients.csv',stringsAsFactors=FALSE)
#meta2<-meta2[meta2$Date!=''&!is.na(meta2$Date)&meta2$Date!='Date',]
meta2<-meta2[meta2[,2]!='',]
colnames(meta2)[1:2]<-c('ID','Time.Points')
colnames(meta2)[colnames(meta2)=='Viral.load']<-'VL'
colnames(meta2)[colnames(meta2)=='CD4.count']<-'CD4'
meta2<-meta2[,1:6]
meta2$id<-fillDown(meta2$ID)
meta2$Time.Points<-sprintf('MM%s',meta2$Time.Points)
tmp<-sub('\\.([0-9])$','.0\\1',mapply(function(xx,yy)sub('^MM[0-9]+',xx,yy),meta2$id,meta2$Time.Points))
meta2$Time.Points<-tmp
rownames(meta2)<-meta2$Time.Points
meta<-rbind(meta1,meta2)
meta$mm<-meta$id
meta$Date[meta$Date==' 12/07/2001'&meta$id=='MM14']<-'7/12/2001'
meta$Date[meta$Date=='07/0806']<-'07/08/2006'
meta$Date[meta$Date==' 08/21/08']<-'08/21/2008'
meta$rDate<-as_date(sapply(meta$Date,function(xx)ifelse(grepl('/[0-9]{4}',xx),mdy(xx),dmy(xx))))
meta$vl<-as.numeric(gsub('[><,]','',meta$VL))
meta$cd4<-as.numeric(meta$CD4)
meta$visit<-sapply(strsplit(rownames(meta),'\\.'),'[',2)
##CORRECTIONS and IFN###
wb <- loadWorkbook("meta/EJ MM plasma cytokine data CORRECTED updated VL CD4 Jan2018.xlsx")
rawMetas<-lapply(getSheets(wb),function(sheet){
rows<-getCells(getRows(sheet),simplify=FALSE)
vals<-lapply(rows,function(row){
tmp<-sapply(row,function(xx)ifelse(is.null(xx),NA,getCellValue(xx)))
#30 is arbitrary number to make all same width
out<-rep(NA,50)
names(out)<-1:50
out[names(tmp)[names(tmp)!='']]<-tmp[names(tmp)!='']
return(out)
})
dates<-sapply(vals,'[',2)
goodDates<-!is.na(suppressWarnings(as.numeric(dates)))
isStringDate<-all(!goodDates)
if(isStringDate)goodDates<-grepl('[0-9]{2}[./][0-9]{2}[./][12]?[90]?[0-9]{2}',dates)
dat<-do.call(rbind,vals[1:length(vals)>2&goodDates])
if(is.null(dat))browser()
cols<-c('sample','date','dfosx','oldViralLoad','viralLoad','cd4','diluted','XXX','ifna1','ifna2','XXX','ifnb1','ifnb2','XXX','ifno1','ifno2','XXX','ifng1','ifng2')
if(!vals[[1]][[5]] %in% c('Corrected','corrected','Viral load (copies/ml)'))cols<-cols[-4]
if(is.na(vals[[1]][[3]]))cols[3:4]<-c('newDate','dfosx')
ifnCols<-grep('IFN',vals[[1]])
colnames(dat)[1:length(cols)]<-cols
dat<-dat[,cols[cols!='XXX']]
dat[dat=='BDL']<- 1
dat<-as.data.frame(dat,stringsAsFactors=FALSE)
dat[,grepl('^ifn',colnames(dat))]<- apply(dat[,grepl('^ifn',colnames(dat))],2,as.numeric)
dat$dfosx<-as.numeric(dat$dfosx)
if(any(colnames(dat)=='newDate')){
if(any(!is.na(dat$newDate)&(abs(as.numeric(dat$newDate)-as.numeric(dat$oldDate))>10)))stop('Big difference in old and new date')
dat[!is.na(dat$newDate),'date']<-dat[!is.na(dat$newDate),'newDate']
}
if(isStringDate)dat$rDate<-dmy(dat$date)
else dat$rDate<-as.Date(as.numeric(dat$date),origin='1899-12-30')
dat$cd4<-as.numeric(ifelse(dat$cd4 %in% c('not done','no data','NA'),NA,dat$cd4))
dat$vl<-as.numeric(ifelse(dat$viralLoad %in% c('not done','no data'),NA,gsub('[><,]','',dat$viralLoad)))
if(any(colnames(dat)=='oldViralLoad')){
oldVl<-as.numeric(gsub('[<>,]','',dat$oldViralLoad))
if(any(!is.na(dat$vl)&(abs(log2(dat$vl/oldVl))>1)))stop('Big difference in old and new vl')
dat[is.na(dat$vl),'vl']<-oldVl[is.na(dat$vl)]
}
return(dat)
})
names(rawMetas)<-names(getSheets(wb))
colCounts<-table(unlist(sapply(rawMetas,colnames)))
targetCols<-names(colCounts)[colCounts==length(rawMetas)]
ifnMeta<-do.call(rbind,lapply(rawMetas,'[',targetCols[orderIn(targetCols,colnames(rawMetas[[1]]))]))
ifnMeta$ej<-sapply(strsplit(rownames(ifnMeta),' '),'[',1)
ifnMeta$mm<-sapply(strsplit(rownames(ifnMeta),'[ .]'),'[',2)
ifnMeta$sample<-sub('[oO]','0',ifnMeta$sample)
isVisitSample<-!is.na(ifnMeta$sample)&grepl('\\.',ifnMeta$sample)&!grepl('[a-zA-Z]$',ifnMeta$sample)
ifnMeta$oldSample<-ifnMeta$sample
ifnMeta$sample[isVisitSample]<-fixDecimals(ifnMeta$sample[isVisitSample])
ifnMeta$visit<-sapply(strsplit(ifnMeta$sample,'[.]'),'[',2)
ifnMeta<-ifnMeta[ifnMeta$mm %in% meta$mm,]
#fix inconsistent date formatting
ifnMeta[ifnMeta$ej=='EJ52'&ifnMeta$rDate=='2001-06-09','rDate']<-ymd('2001-09-06')
tmp<-unique(ifnMeta[,c('mm','ej')])
ejLookup<-structure(tmp$ej,.Names=tmp$mm)
mmLookup<-structure(tmp$mm,.Names=tmp$e)
## cell sorting ##
sorts<-read.csv('meta/AFM MM data summary Jan2018.csv',stringsAsFactors=FALSE)
colnames(sorts)[1]<-'patient'
sorts$patient<-fillDown(sorts$patient)
newColnames<-sub('\\.\\.3\\.replicate\\.values\\.|\\.\\.\\..+cells\\.','',fillDown(ifelse(grepl('^X\\.[0-9]+$',colnames(sorts)),NA,colnames(sorts))))
newColnames<-sprintf('%s%s',newColnames,ave(newColnames,newColnames,FUN=function(xx)if(length(xx)==1)'' else sprintf('__%d',1:length(xx))))
colnames(sorts)<-newColnames
sorts$ej<-sub(' ','',sapply(strsplit(sorts$Donor,'[.]'),'[',1))
paste(sorts$ej,sorts$DFOSx) %in% paste(meta$ej,meta$dfosx)
sorts$rDate<-as_date(sapply(sorts$Visit.Date,function(xx)ifelse(grepl('/',xx),mdy(xx),dmy(xx))))
## Inflammation markers ##
trans<-read.csv('meta/London Cohort inflammation markers ELISA data for Scott 07172017.csv',stringsAsFactors=FALSE)
trans$sample<-fixDecimals(as.character(trans$Sample))
## messy all combined list
if(!exists('ejs'))source('readAllPats.R')
## Post ART data
pbmc<-read.csv('meta/EJ post ART PBMC available.csv',header=FALSE,stringsAsFactors=FALSE)
pbmc[,1]<-trimws(pbmc[,1])
pbmc<-pbmc[grepl('^E?J?[0-9]+\\.[0-9]+$',pbmc[,1]),]
colnames(pbmc)<-c('sample','date','DFOSx','viralLoad','CD4','vials','postArt')
pbmc$ej<-sprintf('EJ%s',sub('EJ','',sub('\\.[0-9]+$','',pbmc$sample)))
pbmc$visit<-sub('.*\\.([0-9]+)$','\\1',pbmc$sample)
pbmc$vl<-as.numeric(sub('<','',pbmc$viralLoad))
pbmc$cd4<-as.numeric(sub('N/A','',pbmc$CD4))
## Additional data
more<-read.csv('meta/moreMetaData.csv',stringsAsFactors=FALSE)
more$vl<-as.numeric(sub('<','',more$VL))
more$cd4<-as.numeric(more$CD4)
art<-read.csv('data/artDates.csv',stringsAsFactors=FALSE)
artDates<-withAs(xx=art[!is.na(art$date)&art$mm %in% meta$mm,],structure(dmy(xx$date),.Names=xx$mm))
#art$lastDate<-ymd(apply(art[,c('lastClinic','lastSample')],1,function(xx)if(all(is.na(xx)))return(NA)else as.character(max(dmy(xx),na.rm=TRUE))))
art$lastDate<-dmy(art$lastSample)
lastDates<-withAs(xx=art[!is.na(art$lastDate)&art$mm %in% meta$mm,],structure(xx$lastDate,.Names=xx$mm))
## Joining ##
##combine ifnMeta and meta
#newMeta<-!paste(ifnMeta$rDate,ifnMeta$mm) %in% paste(meta$rDate,meta$mm)
#minDiff<-apply(ifnMeta[newMeta,c('mm','rDate')],1,function(xx)min(c(Inf,abs(meta[meta$mm==xx[1],'rDate']-ymd(xx[2])))))
#checked manually look all distinct
#ifnMeta[newMeta,][minDiff<10,]
newMeta<-!paste(meta$rDate,meta$mm) %in% paste(ifnMeta$rDate,ifnMeta$mm)
minDiff<-apply(meta[newMeta,c('mm','rDate')],1,function(xx)min(c(Inf,abs(ifnMeta[ifnMeta$mm==xx[1],'rDate']-ymd(xx[2])))))
#checked manually look distinct
meta[newMeta,][minDiff<10,]
meta$ej<-ejLookup[meta$mm]
metaMerge<-meta
metaMerge[,colnames(ifnMeta)[!colnames(ifnMeta) %in% colnames(meta)]]<-NA
metaMerge[,c('sample','date','dfosx','viralLoad')]<-metaMerge[,c('Time.Points','Date','DFOSx','VL')]
metaMerge$source<-'meta'
ifnMeta$source<-'ifn'
comboMeta<-rbind(ifnMeta,metaMerge[newMeta,colnames(ifnMeta)])
#combine ejs
thisEjs<-ejs[ejs$ej %in% comboMeta$ej&!grepl('[a-z]',ejs$date),]
thisEjs$rDate<-as_date(sapply(thisEjs$date,function(xx)ifelse(grepl('/[0-9]{4}',xx),mdy(xx),dmy(xx))))
newEj<-!paste(thisEjs$ej,thisEjs$rDate) %in% paste(comboMeta$ej,comboMeta$rDate)&!grepl('HAART',thisEjs$notes)
thisEjs<-thisEjs[newEj,]
thisEjs[,c('viralLoad','sample')]<-thisEjs[,c('vl','id')]
thisEjs$visit<-trimws(sapply(strsplit(thisEjs$sample,'\\.'),'[',2))
thisEjs$mm<-sapply(thisEjs$ej,function(xx)names(ejLookup)[ejLookup==xx])
thisEjs<-thisEjs[!thisEjs$id %in% c('EJ 85.14','EJ85.11'),]
minDiff<-apply(thisEjs[,c('mm','rDate')],1,function(xx)min(c(Inf,abs(comboMeta[comboMeta$mm==xx[1],'rDate']-ymd(xx[2])))))
if(any(minDiff<10))stop('Close date in ejs')
thisEjs[,colnames(comboMeta)[!colnames(comboMeta) %in% colnames(thisEjs)]]<-NA
thisEjs$source<-'ej'
comboMeta<-rbind(comboMeta,thisEjs[,colnames(comboMeta)])
#combine pbmc
thisPbmc<-pbmc[pbmc$ej %in% comboMeta$ej & !paste(pbmc$ej,pbmc$visit) %in% paste(comboMeta$ej,comboMeta$visit),]
thisPbmc$rDate<-dmy(thisPbmc$date)
thisPbmc$mm<-sapply(thisPbmc$ej,function(xx)names(ejLookup)[ejLookup==xx])
thisPbmc[,colnames(comboMeta)[!colnames(comboMeta) %in% colnames(thisPbmc)]]<-NA
thisPbmc$source<-'pbmc'
comboMeta<-rbind(comboMeta,thisPbmc[,colnames(comboMeta)])
##combine additional data
thisMore<-more
thisMore$ej<-ejLookup[thisMore$mm]
thisMore$rDate<-dmy(thisMore$date)
thisMore[,colnames(comboMeta)[!colnames(comboMeta) %in% colnames(thisMore)]]<-NA
thisMore$source<-'additional'
comboMeta<-rbind(comboMeta,thisMore[,colnames(comboMeta)])
##combine trans
if(any(!trans$sample %in% sprintf('%s.%s',sub('EJ','',comboMeta$ej),comboMeta$visit)))stop('Found unknown sample in trans data')
rownames(trans)<-trans$sample
transCols<-colnames(trans)[!colnames(trans) %in% c('sample','Sample')]
if(any(transCols %in% colnames(comboMeta)))stop('Duplicate column in trans')
comboMeta[,transCols]<-trans[sprintf('%s.%s',sub('EJ','',comboMeta$ej),comboMeta$visit),transCols]
##combine sort
#first two samples are controls
if(any(!paste(sorts$ej,sorts$rDate)[-1:-2] %in% paste(comboMeta$ej,comboMeta$rDate)))stop('Unknown sample in sorts')
sortCols<-colnames(sorts)[grepl('BST|HLA|CD38|__',colnames(sorts))]
if(any(sortCols %in% colnames(comboMeta)))stop('Duplicate column in trans')
rownames(sorts)<-paste(sorts$ej,sorts$rDate)
comboMeta[,sortCols]<-sorts[paste(comboMeta$ej,comboMeta$rDate),sortCols]
comboMeta<-comboMeta[order(comboMeta$mm,comboMeta$rDate),]
comboMeta$dfosx<-as.numeric(comboMeta$dfosx)
comboMeta$qvoa<-comboMeta$rDate>as_date(ifelse(comboMeta$mm %in% names(artDates),artDates[comboMeta$mm],Inf))
sapply(by(comboMeta[,c('dfosx','rDate')],comboMeta$mm,function(xx){zz<-table(xx$rDate-xx$dfosx)}),function(xx)diff(range(ymd(names(xx)))))
baseDate<-by(comboMeta[,c('dfosx','rDate')],comboMeta$mm,function(xx){zz<-table(xx$rDate-xx$dfosx);names(zz)[which.max(zz)]})
comboMeta$time<-comboMeta$rDate-ymd(baseDate[comboMeta$mm])
comboMeta[comboMeta$visit=='12 MW'&comboMeta$mm=='MM39','visit']<-'13'
#comboMeta[comboMeta$vl==37611600&!is.na(comboMeta$vl),'vl']<-NA
if(any(apply(table(comboMeta$visit,comboMeta$mm)>1,2,any)))stop('Duplicate visit found')
write.csv(comboMeta,'out/combinedMeta.csv')
tmp<-comboMeta[,c('mm','ej','date','rDate','vl','cd4','source')]
tmp$dfosx<-comboMeta$time
write.csv(tmp,'out/combinedMetadata.csv',row.names=FALSE)
artDfosx<-sapply(names(artDates),function(xx)artDates[xx]-ymd(baseDate[xx]))
names(artDfosx)<-names(artDates)
lastDfosx<-sapply(names(lastDates),function(xx)lastDates[xx]-ymd(baseDate[xx]))
names(lastDfosx)<-names(lastDates)
for(ii in names(lastDfosx))lastDfosx[ii]<-max(as.numeric(comboMeta[comboMeta$mm==ii,'time']),lastDfosx[ii])
customCols<-read.csv('data/Hex color no. for MM cohort colorcode.csv',stringsAsFactors=FALSE,header=FALSE)[,1:2]
customCols<-customCols[customCols[,1]!='',]
colnames(customCols)<-c('sample','color')
customCols$name<-fixDecimals(sub(' ?\\(.*$','',customCols$sample))
rownames(customCols)<-customCols$name
wb <- loadWorkbook("meta/EJ MM CD4 VL pre and post ART 08June2018_sasm.xlsx")
vals<-lapply(getSheets(wb),function(sheet){
rows<-getCells(getRows(sheet),simplify=FALSE)
vals<-lapply(rows,function(row){
tmp<-lapply(as.character(1:8),function(xx)ifelse(any(names(row)==xx),getCellValue(row[[xx]]),NA))
if(is.na(tmp[[2]])&is.na(tmp[[3]]))return(NULL)
if((grepl('Date',tmp[[2]])|grepl('Date',tmp[[3]])))return(NULL)
out<-data.frame('id'='999.99','origDate'='99.99.99','date'=99999,'DFOSx'=99999,'VL'=999999999,'CD4'=9999999,'ART'='','Notes'='',stringsAsFactors=FALSE)[0,]
out[1,]<-rep(NA,8)
for(ii in 1:8)if(length(tmp)>=ii)out[1,ii]<-tmp[[ii]] else out[1,ii]<-NA
return(out)
})
return(do.call(rbind,vals))
})
compiledMeta<-do.call(rbind,mapply(function(xx,yy){xx$pat<-yy;xx},vals,names(vals),SIMPLIFY=FALSE))
compiledMeta[compiledMeta$origDate=='05.01.12'&compiledMeta$id=='108.1','id']<-'108.10'
compiledMeta[compiledMeta$id=='85.12MW'&!is.na(compiledMeta$id),'id']<-'85.13'
compiledMeta$mm<-sub('.* ','',sub('MM ','MM',compiledMeta$pat))
compiledMeta$ej<-sub(' .*','',sub('EJ ','EJ',compiledMeta$pat))
compiledMeta<-compiledMeta[compiledMeta$mm %in% mmLookup,]
compiledMeta$rDate<-as.Date(as.numeric(compiledMeta$date),origin='1899-12-30')
compiledMeta$vl<-as.numeric(gsub(' ','',sub('<','',compiledMeta$VL)))
compiledMeta$cd4<-as.numeric(compiledMeta$CD4)
rownames(compiledMeta)<-sapply(strsplit(sub('^[^ ]+ ','',rownames(compiledMeta)),'\\.'),function(xx)sprintf('%s.%02d',xx[1],as.numeric(xx[2])))
if(any(is.na(compiledMeta$rDate)))stop('Problem interpreting date')
if(year(min(compiledMeta$rDate))<2000)stop('Year <2000 detected')
if(year(min(compiledMeta$rDate))>2015)stop('Year >2015 detected')
startDates<-tapply(compiledMeta$rDate-compiledMeta$DFOSx,compiledMeta$mm,mostAbundant)
compiledMeta$time<-compiledMeta$rDate-as.Date(startDates[compiledMeta$mm])
if(any(abs(compiledMeta$time-compiledMeta$DFOSx)>1))warning('Disagreement in dfosx')
#2nd column likely gives day from exposure
weauSymptomDate<-ymd('1990-06-04')
weauMeta<-read.csv('meta/weau.csv',stringsAsFactors=FALSE)
weauMeta$origDate<-weauMeta$date<-weauMeta$Date
weauMeta$rDate<-dmy(weauMeta$Date)
weauMeta$ID<-weauMeta$id<-weauMeta$visit<-1:nrow(weauMeta)
weauMeta$time<-weauMeta$rDate-weauSymptomDate
weauAdditional<-read.csv('meta/additionalWEAUMeta.csv')
weauAdditional$origDate<-weauAdditional$date<-weauAdditional$Date
weauAdditional$rDate<-mdy(weauAdditional$Date)
weauAdditional$ID<-weauAdditional$id<-weauAdditional$visit<-nrow(weauMeta)+1:nrow(weauAdditional)
weauAdditional$VL<-NA
weauAdditional$Time<-NA
weauAdditional$Available<-NA
weauAdditional$time<-weauAdditional$rDate-weauSymptomDate
weauAdditional<-weauAdditional[weauAdditional$time>100,]
weauMeta<-rbind(weauMeta,weauAdditional[,colnames(weauMeta)])
weauMeta$cd4<-as.numeric(ifelse(weauMeta$CD4=='nd',NA,weauMeta$CD4))
weauMeta$vl<-as.numeric(ifelse(weauMeta$VL=='nd',NA,weauMeta$VL))
weauMeta$DFOSx<-weauMeta$time
weauMeta$ART<-weauMeta$Notes<-NA
weauMeta$pat<-weauMeta$mm<-weauMeta$ej<-'WEAU'
rownames(weauMeta)<-weauMeta$Time.Points<-sprintf('WEAU.%02d',weauMeta$id)
weauMeta<-weauMeta[order(weauMeta$time),]
aztDfosx<-list('WEAU'=ymd(c('start'='1992/01/23','end'='1994/06/01'))-weauSymptomDate)
compiledMeta<-rbind(compiledMeta,weauMeta[,colnames(compiledMeta)])
meta<-rbind(meta,weauMeta[,colnames(meta)])
compiledMeta$visit<-sub('[^.]+\\.','',compiledMeta$id)
compiledMeta$visit<-ifelse(grepl('^[0-9]+$',compiledMeta$visit),sprintf('%02d',suppressWarnings(as.integer(compiledMeta$visit))),compiledMeta$visit)
compiledMeta$sample<-ifelse(compiledMeta$visit==''|is.na(compiledMeta$id),sprintf('XX%s',1:nrow(compiledMeta)),paste(compiledMeta$mm,compiledMeta$visit,sep='.'))
rownames(compiledMeta)<-compiledMeta$sample
#WEAU no ART but calling first record of low CD4 as when would have initiated (day 391 original, day 371 after adjustment for symptoms)
meta$artDay<-c(artDfosx,'WEAU'=371)[meta$mm]
meta$daysBeforeArt<-meta$artDay-as.numeric(meta$DFOSx)
compiledMeta$artDay<-c(artDfosx,'WEAU'=371)[compiledMeta$mm]
compiledMeta$daysBeforeArt<-compiledMeta$artDay-as.numeric(compiledMeta$time)
founders<-read.csv('founder.csv',stringsAsFactors=FALSE,row.names=1)
superDate<-ymd(founders$superDate)
founders$superTime<-superDate-ymd(startDates[rownames(founders)])
write.csv(founders,'out/founders.csv')
less350Time<-by(compiledMeta[!is.na(compiledMeta$cd4),],compiledMeta[!is.na(compiledMeta$cd4),'mm'],function(xx){
lastInfect<-ifelse(is.na(founders[xx$mm[1],'superTime']),1,founders[xx$mm[1],'superTime'])
xx$previousLess<-c(Inf,xx$cd4[-nrow(xx)])<350
out<-min(c(xx[xx$time>lastInfect+180&xx$cd4<350&xx$previousLess,'time'],Inf))
if(out==Inf)out<-NA
if(is.na(out)&!is.na(artDfosx[xx$mm[1]]))out<-artDfosx[xx$mm[1]]
return(out)
})
compiledMeta$day350<-less350Time[compiledMeta$mm]
compiledMeta$daysBefore350<-compiledMeta$day350-as.numeric(compiledMeta$time)
write.csv(compiledMeta,'out/allLongitudinalMeta.csv')
if(FALSE){
comboMeta[which(!paste(comboMeta$mm,comboMeta$rDate) %in% paste(compiledMeta$mm,compiledMeta$rDate) & !is.na(comboMeta$mm)&(!is.na(comboMeta$vl)|!is.na(comboMeta$cd4))),c('mm','date','rDate','time','vl','cd4','source')]
tmp<-comboMeta$vl
names(tmp)<-paste(comboMeta$mm,comboMeta$rDate)
tmp<-tmp[paste(compiledMeta$mm,compiledMeta$rDate)]
probs<-tmp!=sub('<','',compiledMeta$VL)&!is.na(tmp)
cbind(compiledMeta[probs,],tmp[probs])
tmp<-comboMeta$cd4
names(tmp)<-paste(comboMeta$mm,comboMeta$rDate)
tmp<-tmp[paste(compiledMeta$mm,compiledMeta$rDate)]
probs<-tmp!=compiledMeta$CD4&!is.na(tmp)
cbind(compiledMeta[probs,],tmp[probs])
}
|
#' Human annotation GRCh37
#'
#' Human annotation GRCh37 from Ensembl release 75.
#'
#' @examples
#' head(grch37)
#'
#' @source \url{http://grch37.ensembl.org/}
"grch37"
|
/R/grch37.R
|
no_license
|
timknut/annotables
|
R
| false
| false
| 170
|
r
|
#' Human annotation GRCh37
#'
#' Human annotation GRCh37 from Ensembl release 75.
#'
#' @examples
#' head(grch37)
#'
#' @source \url{http://grch37.ensembl.org/}
"grch37"
|
s3 = import('s3')
test = s3$test
|
/tests/testthat/modules/s3_b.r
|
permissive
|
flying-sheep/modules
|
R
| false
| false
| 33
|
r
|
s3 = import('s3')
test = s3$test
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dim_plot.R
\name{pivot_Plot1d}
\alias{pivot_Plot1d}
\title{a wrapper module for plot in 1D}
\usage{
pivot_Plot1d(input, output, session, type = NULL, obj = NULL, proj = NULL,
minfo = NULL)
}
\description{
This is the server part of the module.
}
|
/man/pivot_Plot1d.Rd
|
no_license
|
jeevanyue/PIVOT
|
R
| false
| true
| 327
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dim_plot.R
\name{pivot_Plot1d}
\alias{pivot_Plot1d}
\title{a wrapper module for plot in 1D}
\usage{
pivot_Plot1d(input, output, session, type = NULL, obj = NULL, proj = NULL,
minfo = NULL)
}
\description{
This is the server part of the module.
}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(NA, -1.121210344879e+131, NaN), Linf = -4.83594859887756e+25, MK = 2.81991272491703e-308, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(-3.0623435805879e-27, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126, 1.17820552589861e+39, -7.12442022816983e-235, -4.12197860834498e-174, 3.41570901807186e+175, -1.83850758797779e-303, 9.00286239024321e+218, -5.85373311417744e-255, -3.64455385022046e+148, -3.51797524435303e-192, 3.54728311818697e+148, -1.08070601034782e-237, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615830698-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 2,191
|
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(NA, -1.121210344879e+131, NaN), Linf = -4.83594859887756e+25, MK = 2.81991272491703e-308, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(-3.0623435805879e-27, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126, 1.17820552589861e+39, -7.12442022816983e-235, -4.12197860834498e-174, 3.41570901807186e+175, -1.83850758797779e-303, 9.00286239024321e+218, -5.85373311417744e-255, -3.64455385022046e+148, -3.51797524435303e-192, 3.54728311818697e+148, -1.08070601034782e-237, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
library(UCSCXenaTools)
### Name: downloadTCGA
### Title: Easily Download TCGA Data by Several Options
### Aliases: downloadTCGA
### ** Examples
## No test:
# download RNASeq data (use UVM as example)
downloadTCGA(project = "UVM",
data_type = "Gene Expression RNASeq",
file_type = "IlluminaHiSeq RNASeqV2")
## End(No test)
|
/data/genthat_extracted_code/UCSCXenaTools/examples/downloadTCGA.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 364
|
r
|
library(UCSCXenaTools)
### Name: downloadTCGA
### Title: Easily Download TCGA Data by Several Options
### Aliases: downloadTCGA
### ** Examples
## No test:
# download RNASeq data (use UVM as example)
downloadTCGA(project = "UVM",
data_type = "Gene Expression RNASeq",
file_type = "IlluminaHiSeq RNASeqV2")
## End(No test)
|
# load library's needed
library(dplyr)
library(ggplot2)
library(stringr)
# read data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# subset Baltimore city and Los Angelos county data
NEI_BAL_LA <- subset(NEI, fips == "24510"| fips == "06037")
# subset motor vehicle source
SCC_MOTOR <- SCC %>% filter(str_detect(SCC.Level.Two,regex('vehicle', ignore_case = T)))
# merge data to subset NEI_BALTIMORE
NEI_BAL_LA_MOTOR <- merge(NEI_BAL_LA, SCC_MOTOR, by = "SCC")
# calculate sums of Emissions per year
TOT_EMS <- group_by(NEI_BAL_LA_MOTOR, year, fips) %>% summarize(EMS = sum(Emissions)) %>%
mutate(US_County = case_when(fips == "24510" ~ "Baltimore city", fips == "06037" ~ "Los Angeles county"))
#plot the data
g <- qplot(year,EMS, data = TOT_EMS, color = US_County, geom=c("point","line"),
ylab = expression("Total PM"[2.5]*" Emission in Tons"))
print(g)
dev.copy(png, file = "plot6.png") ## Copy my plot to a PNG file
dev.off() ## Don't forget to close the PNG device!
|
/plot6.R
|
no_license
|
BobdeTheije/ExpData-Project-2
|
R
| false
| false
| 1,062
|
r
|
# load library's needed
library(dplyr)
library(ggplot2)
library(stringr)
# read data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# subset Baltimore city and Los Angelos county data
NEI_BAL_LA <- subset(NEI, fips == "24510"| fips == "06037")
# subset motor vehicle source
SCC_MOTOR <- SCC %>% filter(str_detect(SCC.Level.Two,regex('vehicle', ignore_case = T)))
# merge data to subset NEI_BALTIMORE
NEI_BAL_LA_MOTOR <- merge(NEI_BAL_LA, SCC_MOTOR, by = "SCC")
# calculate sums of Emissions per year
TOT_EMS <- group_by(NEI_BAL_LA_MOTOR, year, fips) %>% summarize(EMS = sum(Emissions)) %>%
mutate(US_County = case_when(fips == "24510" ~ "Baltimore city", fips == "06037" ~ "Los Angeles county"))
#plot the data
g <- qplot(year,EMS, data = TOT_EMS, color = US_County, geom=c("point","line"),
ylab = expression("Total PM"[2.5]*" Emission in Tons"))
print(g)
dev.copy(png, file = "plot6.png") ## Copy my plot to a PNG file
dev.off() ## Don't forget to close the PNG device!
|
library(readxl)
library(dplyr)
library(magrittr)
library(tidyr)
library(stringr)
library(readr)
library(purrr)
# dit is een test file
excel_to_longDF <- function(path, tab){
# get basic info about this experiment, based on the path
filename <- str_split(path,"/") %>% unlist()
filename <- filename[length(filename)]
experiment_ID <- str_split(filename,"_") %>% unlist()
experiment_ID <- experiment_ID[3]
# get the concentration based on the sheet name
conc <- str_remove(tab,"uM")
# try reading this, if the tab does not exist, exit
df <- read_excel(
path=path,
sheet=tab,
skip=1)
# select the relevant data
tryCatch({
df <- df %>% select(...1,`rel. to Baseline`:starts_with("Normalized")) %>%
select(-starts_with("Normalized"))
}, error = function(e){
print(e)
}
)
# hacky way to ensure that in an alternative scenario, namely a control, only the right columns are selected
# somehow I do not manage to get this coded into trCatch
tryCatch({
df <- df %>% select(...1,`rel. to Baseline`:rel) %>%
select(-rel)
}, error = function(e){
print(e)
}
)
# get the right column names
colns <- df[1,] %>% as.vector() %>% as.character()
colns[is.na(colns)] <- "Parameter"
colnames(df) <- colns
# drop the first row
df <- df[2:nrow(df),]
# pivot to a long df
df <- df %>% pivot_longer(-Parameter, names_to="Well",values_to="Measurement", values_drop_na = TRUE)
# add source information
df <- df %>% mutate(Source_file = filename, Concentration=conc, Experiment=experiment_ID)
return(df)
}
read_all_sheets <- function(path, conclist=concentrations){
sheets <- excel_sheets(path)
sheets <- sheets[sheets%in%conclist]
df <- sheets %>%
map_df(~ excel_to_longDF(path = path, tab = .x))
return(df)
}
# relevant possible sheet names include controls (EtOH, DMSO),
# and concentration levels with and without units (uM)
concentrations <- c("0.01","0.03","0.1","0.3","1","3","10","30")
concentrations <- c("EtOH","DMSO",concentrations,paste0(concentrations,"uM"))
# collect all the files in the experiments folder
exp_folder <- "data/experiment_results/"
path <- exp_folder %>% dir(pattern =".xlsx")
path <- paste0(exp_folder,path)
# map the read function over all files
df <- path %>%
map_df(~ read_all_sheets(path=.x))
# save the resulting dataframe
write_csv(df,"data/clean_data/parameter_measurements_all.csv")
|
/readdata.R
|
no_license
|
aart1/wezel
|
R
| false
| false
| 2,441
|
r
|
library(readxl)
library(dplyr)
library(magrittr)
library(tidyr)
library(stringr)
library(readr)
library(purrr)
# dit is een test file
excel_to_longDF <- function(path, tab){
# get basic info about this experiment, based on the path
filename <- str_split(path,"/") %>% unlist()
filename <- filename[length(filename)]
experiment_ID <- str_split(filename,"_") %>% unlist()
experiment_ID <- experiment_ID[3]
# get the concentration based on the sheet name
conc <- str_remove(tab,"uM")
# try reading this, if the tab does not exist, exit
df <- read_excel(
path=path,
sheet=tab,
skip=1)
# select the relevant data
tryCatch({
df <- df %>% select(...1,`rel. to Baseline`:starts_with("Normalized")) %>%
select(-starts_with("Normalized"))
}, error = function(e){
print(e)
}
)
# hacky way to ensure that in an alternative scenario, namely a control, only the right columns are selected
# somehow I do not manage to get this coded into trCatch
tryCatch({
df <- df %>% select(...1,`rel. to Baseline`:rel) %>%
select(-rel)
}, error = function(e){
print(e)
}
)
# get the right column names
colns <- df[1,] %>% as.vector() %>% as.character()
colns[is.na(colns)] <- "Parameter"
colnames(df) <- colns
# drop the first row
df <- df[2:nrow(df),]
# pivot to a long df
df <- df %>% pivot_longer(-Parameter, names_to="Well",values_to="Measurement", values_drop_na = TRUE)
# add source information
df <- df %>% mutate(Source_file = filename, Concentration=conc, Experiment=experiment_ID)
return(df)
}
read_all_sheets <- function(path, conclist=concentrations){
sheets <- excel_sheets(path)
sheets <- sheets[sheets%in%conclist]
df <- sheets %>%
map_df(~ excel_to_longDF(path = path, tab = .x))
return(df)
}
# relevant possible sheet names include controls (EtOH, DMSO),
# and concentration levels with and without units (uM)
concentrations <- c("0.01","0.03","0.1","0.3","1","3","10","30")
concentrations <- c("EtOH","DMSO",concentrations,paste0(concentrations,"uM"))
# collect all the files in the experiments folder
exp_folder <- "data/experiment_results/"
path <- exp_folder %>% dir(pattern =".xlsx")
path <- paste0(exp_folder,path)
# map the read function over all files
df <- path %>%
map_df(~ read_all_sheets(path=.x))
# save the resulting dataframe
write_csv(df,"data/clean_data/parameter_measurements_all.csv")
|
#
#
#
# x = snapShots[[i]]
tMax = 1
# hcols=hyCols
plot_hyphae = function(x, tMax=100, hcols=rev(brewer.pal(11, "Spectral")), ...) {
segCol = data.frame(colorRamp(hcols, alpha=0.5)(x[,"t"]/max(tMax,1)))
colnames(segCol) = c("red", "green", "blue", "alpha")
segCols = rgb(red=segCol$red, green=segCol$green, blue=segCol$blue, maxColorValue = 255)
plot(NA, xlab="", ylab="", ...)
for(i in 1:dim(x)[1])
segments(x$x0[i], x$y0[i], x$x[i], x$y[i], col=segCols[i])
#points(x$x0, x$y0, col = segCols, pch = 20, cex=0.3)
}
#
# Helper function for hyphal length
#
hyphal_length = function(x) {
return( sqrt( (x["x"]-x["x0"])^2 + (x["y"]-x["y0"])^2 ) )
}
seg_length = function(x) {
return( sqrt( (x[3]-x[1])^2 + (x[4]-x[2])^2 ) )
}
corners = function(x, w) {
u = c(x[1]-w, x[2]-w)
v = c(x[1]+w, x[2]+w)
return(t(matrix(c(u,v),2,2)))
}
insideRAE = function(x, r) {
xSat = r[1,1] <= x[1] & x[1] <= r[2,1]
ySat = r[1,2] <= x[2] & x[2] <= r[2,2]
return(xSat & ySat)
}
insideBoundingBox = function(x, bb) {
x = as.numeric(x)
bb = as.numeric(bb)
xSat = bb[1] <= x[1] & x[1] <= bb[3]
ySat = bb[2] <= x[2] & x[2] <= bb[4]
return(xSat & ySat)
}
hitsBB = function(bb, x, full=FALSE) {
stIn = insideBoundingBox(x=x[1:2], bb=bb)
enIn = insideBoundingBox(x=x[3:4], bb=bb)
s1 = c(bb[c(3,2)], bb[c(1,2)])
s2 = c(bb[c(1,2)], bb[c(1,4)])
s3 = c(bb[c(1,4)], bb[c(3,4)])
s4 = c(bb[c(3,4)], bb[c(3,2)])
botX = doSegmentsIntersect(segment1=s1, segment2=x)
lefX = doSegmentsIntersect(segment1=s2, segment2=x)
topX = doSegmentsIntersect(segment1=s3, segment2=x)
rigX = doSegmentsIntersect(segment1=s4, segment2=x)
r = c(b=botX, l=lefX, t=topX, r=rigX, s=stIn, e=enIn)
if(!full)
r = sum(r) > 0
return(r)
}
##############################################
# Map the RAE hit by each hyphae
hyphae_hits = function(hl, bbs) {
m = length(hl)
h2b = matrix(0, m, dim(bbs)[1])
for(j in 1:m) {
if(j %% 1000 == 0) print(j)
hi = hl[[j]][1:4]
hiBB = getBoundingBox(P0=hi[1:2], P1=hi[3:4])
xSAT = (hiBB[1] <= bbs[,3]) & (bbs[,1] <= hiBB[3])
ySAT = (hiBB[2] <= bbs[,4]) & (bbs[,2] <= hiBB[4])
bbInds = which(xSAT & ySAT)
bbIndsHits = bbInds[apply(bbs[bbInds,,drop=FALSE], 1, hitsBB, x=hi)]
h2b[j, bbIndsHits] = 1
}
return(h2b)
}
##############################################
#
grid_bounding_boxes = function(w=10, xrng=c(-50,50), yrng=c(-50,50)) {
x0 = seq(xrng[1], xrng[2], w)
y0 = seq(yrng[1], yrng[2], w)
centers = cbind(rep(x0, length(y0)), rep(y0, each=length(x0)))
bbs = cbind(centers-(w/2), centers+(w/2))
return(bbs)
}
RAEintersection <- function(m, b, side, bb){
if(length(side) == 1){
if(side %in% c("r", "l")){
x = bb[side]
p = c(x, m*x+b)
}
else {
y = bb[side]
if(is.finite(m)) {
p = c((y-b)/m, y)
}
else {
p = c(b, y)
}
}
}
return(p)
}
###########################################################
# Calculate the density in each RAE
hyphal_length_by_RAE = function(hl, h2bbs, bbs, plotting=FALSE) {
hPerBox = colSums(h2bbs)
d = array(0, dim(bbs)[1]) # density for each RAE
for(j in which(hPerBox>0) ) { # All RAE that have one or more hyphae
if(j %% 10 ==0) print(j)
hInds = which(h2bbs[,j] > 0)
if(plotting) polygon(x=bbs[j, c(1,1,3,3)], y=bbs[j,c(2,4,4,2)])
if(length(hInds) > 0) { ## should be
#segments(x0=hi[1], y0=hi[2], x1=hi[3], y1=hi[4], col=rgb(0.6,0.6,0.6,0.3))
# bbj = getBoundingBox(P0=bbs[j,1:2], P1=bbs[j,3:4]) # same as next line
bbj = bbs[j, ]
names(bbj) = c("l", "b", "r", "t")
for(i in hInds) {
hi = hl[[i]][1:4]
fl = hitsBB(bb=bbs[j,], x=hi, full=TRUE)
sel = sum(fl[5:6])
#print(sel)
if(sel == 2) { # both points in RAE
d[j] = d[j] + hl[[i]]["l"]
if(plotting) segments(x0=hi[1], y0=hi[2], x1=hi[3], y1=hi[4], lty=1)
}
else {
# linear fit to hyphae line
bm = c(NA, NA)
if(abs(hi[3]-hi[1]) > 0) { ## If not a vertical segment
bm[2] = (hi[4] - hi[2]) / (hi[3] - hi[1])
bm[1] = hi[4] - hi[3]*bm[2]
}
else
bm[1] = hi[1] # slope undefined (vertical segment). Just set b to x
# Reset either hi(x0,y0) or hi(x,y) to the coordinate where
# the hyphal segment and the border(s) intersect
if(sel == 1) { # Solve one intersection
side = names(which(fl[1:4]))[1]
xy = RAEintersection(m=bm[2], b=bm[1], side = side, bb = bbj)
if(fl["s"])
hi[3:4] = xy
else
hi[1:2] = xy
if(plotting) segments(x0=hi[1], y0=hi[2], x1=hi[3], y1=hi[4], lty=2)
}
else { # Solve 2 intersections (whole hyphae crosses this RAE)
sides = names(which(fl[1:4]))
# if line hits a corner/vertex, then 2 sides must be non-adjacent
if(length(sides)==3) {
print(paste("Warning: hyphae", i, "hits a corner"))
if(sum(fl[c(1,3)]) == 2) sides = c("b","t")
else sides = c("l","r")
}
# reset hi to be the coordinates where the hyphae line and the borders intersects
hi[1:2] = RAEintersection(m=bm[2], b=bm[1], side = sides[1], bb = bbj)
hi[3:4] = RAEintersection(m=bm[2], b=bm[1], side = sides[2], bb = bbj)
}
if(sum(!is.finite(hi))>0) { print(paste("Non-finite hi", i, hi[3]-hi[1], hi[4]-hi[2])) }
d[j] = d[j] + seg_length(hi)
if(plotting) segments(x0=hi[1], y0=hi[2], x1=hi[3], y1=hi[4], lty=3)
}
}
}
}
return(d)
}
##############################################
tipExtension <- function(ktip1, ktip2, Kt, l) {
# source: Lejeune et al 1995, Morphology of Trichoderma reesei QM 9414 in Submerged Cultures
return(ktip1+ktip2*(l/(l+Kt)))
}
tipExtensionMonod <- function(ktip1, ktip2, Kt, l, S, Ks) {
# source: Lejeune et al 1995, Morphology of Trichoderma reesei QM 9414 in Submerged Cultures
return( ( ktip1+ktip2*(l/(l+Kt)) ) * S/(S+Ks) )
}
perpendicularDistance <- function(x, xc, yc, R){
x1 = x[, "x0"]
x2 = x[, "x"]
y1 = x[, "y0"]
y2 = x[, "y"]
d = (abs((y2-y1)*xc-(x2-x1)*yc+x2*y1-y2*x1))/(sqrt((y2-y1)**2+(x2-x1)**2))
return(unique(c(which(d < R), which(d==R))))
# return(d <= R)
}
hyphae_hits_substrate <- function(hl, bbs){
m = length(hl)
h2b = matrix(0, m, dim(bbs)[1])
for (j in 1:m) {
hi = hl[[j]][c(3, 4)]
xSAT = (hi[1] <= bbs[,3]) & (bbs[,1] <= hi[1])
ySAT = (hi[2] <= bbs[,4]) & (bbs[,2] <= hi[2])
bbInds = which(xSAT & ySAT)
h2b[j, bbInds] = 1
}
return(h2b)
}
|
/functions_v1.R
|
no_license
|
BioBuilders2018/mycelium-simulations
|
R
| false
| false
| 6,958
|
r
|
#
#
#
# x = snapShots[[i]]
tMax = 1
# hcols=hyCols
plot_hyphae = function(x, tMax=100, hcols=rev(brewer.pal(11, "Spectral")), ...) {
segCol = data.frame(colorRamp(hcols, alpha=0.5)(x[,"t"]/max(tMax,1)))
colnames(segCol) = c("red", "green", "blue", "alpha")
segCols = rgb(red=segCol$red, green=segCol$green, blue=segCol$blue, maxColorValue = 255)
plot(NA, xlab="", ylab="", ...)
for(i in 1:dim(x)[1])
segments(x$x0[i], x$y0[i], x$x[i], x$y[i], col=segCols[i])
#points(x$x0, x$y0, col = segCols, pch = 20, cex=0.3)
}
#
# Helper function for hyphal length
#
hyphal_length = function(x) {
return( sqrt( (x["x"]-x["x0"])^2 + (x["y"]-x["y0"])^2 ) )
}
seg_length = function(x) {
return( sqrt( (x[3]-x[1])^2 + (x[4]-x[2])^2 ) )
}
corners = function(x, w) {
u = c(x[1]-w, x[2]-w)
v = c(x[1]+w, x[2]+w)
return(t(matrix(c(u,v),2,2)))
}
insideRAE = function(x, r) {
xSat = r[1,1] <= x[1] & x[1] <= r[2,1]
ySat = r[1,2] <= x[2] & x[2] <= r[2,2]
return(xSat & ySat)
}
insideBoundingBox = function(x, bb) {
x = as.numeric(x)
bb = as.numeric(bb)
xSat = bb[1] <= x[1] & x[1] <= bb[3]
ySat = bb[2] <= x[2] & x[2] <= bb[4]
return(xSat & ySat)
}
hitsBB = function(bb, x, full=FALSE) {
stIn = insideBoundingBox(x=x[1:2], bb=bb)
enIn = insideBoundingBox(x=x[3:4], bb=bb)
s1 = c(bb[c(3,2)], bb[c(1,2)])
s2 = c(bb[c(1,2)], bb[c(1,4)])
s3 = c(bb[c(1,4)], bb[c(3,4)])
s4 = c(bb[c(3,4)], bb[c(3,2)])
botX = doSegmentsIntersect(segment1=s1, segment2=x)
lefX = doSegmentsIntersect(segment1=s2, segment2=x)
topX = doSegmentsIntersect(segment1=s3, segment2=x)
rigX = doSegmentsIntersect(segment1=s4, segment2=x)
r = c(b=botX, l=lefX, t=topX, r=rigX, s=stIn, e=enIn)
if(!full)
r = sum(r) > 0
return(r)
}
##############################################
# Map the RAE hit by each hyphae
hyphae_hits = function(hl, bbs) {
m = length(hl)
h2b = matrix(0, m, dim(bbs)[1])
for(j in 1:m) {
if(j %% 1000 == 0) print(j)
hi = hl[[j]][1:4]
hiBB = getBoundingBox(P0=hi[1:2], P1=hi[3:4])
xSAT = (hiBB[1] <= bbs[,3]) & (bbs[,1] <= hiBB[3])
ySAT = (hiBB[2] <= bbs[,4]) & (bbs[,2] <= hiBB[4])
bbInds = which(xSAT & ySAT)
bbIndsHits = bbInds[apply(bbs[bbInds,,drop=FALSE], 1, hitsBB, x=hi)]
h2b[j, bbIndsHits] = 1
}
return(h2b)
}
##############################################
#
grid_bounding_boxes = function(w=10, xrng=c(-50,50), yrng=c(-50,50)) {
x0 = seq(xrng[1], xrng[2], w)
y0 = seq(yrng[1], yrng[2], w)
centers = cbind(rep(x0, length(y0)), rep(y0, each=length(x0)))
bbs = cbind(centers-(w/2), centers+(w/2))
return(bbs)
}
RAEintersection <- function(m, b, side, bb){
if(length(side) == 1){
if(side %in% c("r", "l")){
x = bb[side]
p = c(x, m*x+b)
}
else {
y = bb[side]
if(is.finite(m)) {
p = c((y-b)/m, y)
}
else {
p = c(b, y)
}
}
}
return(p)
}
###########################################################
# Calculate the density in each RAE
hyphal_length_by_RAE = function(hl, h2bbs, bbs, plotting=FALSE) {
hPerBox = colSums(h2bbs)
d = array(0, dim(bbs)[1]) # density for each RAE
for(j in which(hPerBox>0) ) { # All RAE that have one or more hyphae
if(j %% 10 ==0) print(j)
hInds = which(h2bbs[,j] > 0)
if(plotting) polygon(x=bbs[j, c(1,1,3,3)], y=bbs[j,c(2,4,4,2)])
if(length(hInds) > 0) { ## should be
#segments(x0=hi[1], y0=hi[2], x1=hi[3], y1=hi[4], col=rgb(0.6,0.6,0.6,0.3))
# bbj = getBoundingBox(P0=bbs[j,1:2], P1=bbs[j,3:4]) # same as next line
bbj = bbs[j, ]
names(bbj) = c("l", "b", "r", "t")
for(i in hInds) {
hi = hl[[i]][1:4]
fl = hitsBB(bb=bbs[j,], x=hi, full=TRUE)
sel = sum(fl[5:6])
#print(sel)
if(sel == 2) { # both points in RAE
d[j] = d[j] + hl[[i]]["l"]
if(plotting) segments(x0=hi[1], y0=hi[2], x1=hi[3], y1=hi[4], lty=1)
}
else {
# linear fit to hyphae line
bm = c(NA, NA)
if(abs(hi[3]-hi[1]) > 0) { ## If not a vertical segment
bm[2] = (hi[4] - hi[2]) / (hi[3] - hi[1])
bm[1] = hi[4] - hi[3]*bm[2]
}
else
bm[1] = hi[1] # slope undefined (vertical segment). Just set b to x
# Reset either hi(x0,y0) or hi(x,y) to the coordinate where
# the hyphal segment and the border(s) intersect
if(sel == 1) { # Solve one intersection
side = names(which(fl[1:4]))[1]
xy = RAEintersection(m=bm[2], b=bm[1], side = side, bb = bbj)
if(fl["s"])
hi[3:4] = xy
else
hi[1:2] = xy
if(plotting) segments(x0=hi[1], y0=hi[2], x1=hi[3], y1=hi[4], lty=2)
}
else { # Solve 2 intersections (whole hyphae crosses this RAE)
sides = names(which(fl[1:4]))
# if line hits a corner/vertex, then 2 sides must be non-adjacent
if(length(sides)==3) {
print(paste("Warning: hyphae", i, "hits a corner"))
if(sum(fl[c(1,3)]) == 2) sides = c("b","t")
else sides = c("l","r")
}
# reset hi to be the coordinates where the hyphae line and the borders intersects
hi[1:2] = RAEintersection(m=bm[2], b=bm[1], side = sides[1], bb = bbj)
hi[3:4] = RAEintersection(m=bm[2], b=bm[1], side = sides[2], bb = bbj)
}
if(sum(!is.finite(hi))>0) { print(paste("Non-finite hi", i, hi[3]-hi[1], hi[4]-hi[2])) }
d[j] = d[j] + seg_length(hi)
if(plotting) segments(x0=hi[1], y0=hi[2], x1=hi[3], y1=hi[4], lty=3)
}
}
}
}
return(d)
}
##############################################
tipExtension <- function(ktip1, ktip2, Kt, l) {
# source: Lejeune et al 1995, Morphology of Trichoderma reesei QM 9414 in Submerged Cultures
return(ktip1+ktip2*(l/(l+Kt)))
}
tipExtensionMonod <- function(ktip1, ktip2, Kt, l, S, Ks) {
# source: Lejeune et al 1995, Morphology of Trichoderma reesei QM 9414 in Submerged Cultures
return( ( ktip1+ktip2*(l/(l+Kt)) ) * S/(S+Ks) )
}
perpendicularDistance <- function(x, xc, yc, R){
x1 = x[, "x0"]
x2 = x[, "x"]
y1 = x[, "y0"]
y2 = x[, "y"]
d = (abs((y2-y1)*xc-(x2-x1)*yc+x2*y1-y2*x1))/(sqrt((y2-y1)**2+(x2-x1)**2))
return(unique(c(which(d < R), which(d==R))))
# return(d <= R)
}
hyphae_hits_substrate <- function(hl, bbs){
m = length(hl)
h2b = matrix(0, m, dim(bbs)[1])
for (j in 1:m) {
hi = hl[[j]][c(3, 4)]
xSAT = (hi[1] <= bbs[,3]) & (bbs[,1] <= hi[1])
ySAT = (hi[2] <= bbs[,4]) & (bbs[,2] <= hi[2])
bbInds = which(xSAT & ySAT)
h2b[j, bbInds] = 1
}
return(h2b)
}
|
test_that("multiplication works", {
ex_data <- tibble(group = rep(c("a", "b"), times = c(5, 10)),
x = 1:15,
y = 16:30)
res <- bed_group(data = ex_data, vars = c("x", "y"),
funs = list(length, nrow, mean),
group = group)
expect_equal(
res$group,
c("a", "b")
)
expect_equal(
res$length,
c(10, 20)
)
expect_equal(
res$nrow,
c(5, 10)
)
expect_equal(
res$mean,
c(mean(c(1:5, 16:20)), mean(c(6:15, 21:30)))
)
})
|
/tests/testthat/test-bed_group.R
|
permissive
|
USCbiostats/bedslider
|
R
| false
| false
| 542
|
r
|
test_that("multiplication works", {
ex_data <- tibble(group = rep(c("a", "b"), times = c(5, 10)),
x = 1:15,
y = 16:30)
res <- bed_group(data = ex_data, vars = c("x", "y"),
funs = list(length, nrow, mean),
group = group)
expect_equal(
res$group,
c("a", "b")
)
expect_equal(
res$length,
c(10, 20)
)
expect_equal(
res$nrow,
c(5, 10)
)
expect_equal(
res$mean,
c(mean(c(1:5, 16:20)), mean(c(6:15, 21:30)))
)
})
|
#!/usr/bin/Rscript
# Bhishan Poudel
# Jan 18, 2016
library(plotly)
p <- plot_ly(midwest, x = percollege, color = state, type = "box")
# plotly_POST publishes the figure to your plotly account on the web
plotly_POST(p, filename = "r-docs/midwest-boxplots", world_readable=TRUE)
|
/R/rprograms/plotting/plotlyExamples/plotly3.r
|
permissive
|
bhishanpdl/Programming
|
R
| false
| false
| 280
|
r
|
#!/usr/bin/Rscript
# Bhishan Poudel
# Jan 18, 2016
library(plotly)
p <- plot_ly(midwest, x = percollege, color = state, type = "box")
# plotly_POST publishes the figure to your plotly account on the web
plotly_POST(p, filename = "r-docs/midwest-boxplots", world_readable=TRUE)
|
t <- sample(1, 72)
two <- log(10) + t
fit1 <- glm(count ~ spray + offset(t), family="poisson", InsectSprays)
fit2 <- glm(count ~ spray + offset(two * t), family="poisson", InsectSprays)
summary(fit1)
summary(fit2)
|
/quizzes/quiz4qu5.R
|
no_license
|
BananuhBeatDown/Regression_Models
|
R
| false
| false
| 218
|
r
|
t <- sample(1, 72)
two <- log(10) + t
fit1 <- glm(count ~ spray + offset(t), family="poisson", InsectSprays)
fit2 <- glm(count ~ spray + offset(two * t), family="poisson", InsectSprays)
summary(fit1)
summary(fit2)
|
library(usmap)
library(ggplot2)
library(readr)
library(lubridate)
library(maps)
library(dplyr)
library(dslabs)
library(stringr)
library(rstudioapi)
library(tidyverse) # ggplot2, dplyr, tidyr, readr, purrr, tibble
library(magrittr) # pipes
install.packages("lintr")
library(lintr) # code linting
install.packages("sf")
library(sf) # spatial data handling
library(raster) # raster handling (needed for relief)
install.packages("viridis")
library(viridis) # viridis color scale
install.packages("cowplot")
library(cowplot) # stack ggplots
library(rmarkdown)
library(ggthemes)
install.packages("ggalt")
library(ggalt)
install.packages("biscale")
library(biscale)
library(cowplot)
library(reshape2)
library(viridis)
library(RColorBrewer)
#new variable for %of population >65 years
clean_df_updated_quantiles <- clean_df_updated %>%
mutate(percent_over_65 = (clean_df_updated$total_over_65 / clean_df_updated$total_population)*100)
#three groups of the variables for the bivariate map
summary(clean_df_updated_quantiles$cases_per_1000ppl)
case_rate_quantile<- quantile(clean_df_updated_quantiles$cases_per_1000ppl,c(0.33,0.66,1), na.rm = TRUE)
poverty_rate_quantile<- quantile(clean_df_updated_quantiles$percent_below_poverty_level,c(0.33,0.66,1), na.rm = TRUE)
high_mask_usage_quantile <-quantile(clean_df_updated_quantiles$high_mask_usage_sum,c(0.33,0.66,1), na.rm = TRUE)
household_size_quantile <-quantile(clean_df_updated_quantiles$avg_household_size,c(0.33,0.66,1), na.rm = TRUE)
percent_over_65_quantile <-quantile(clean_df_updated_quantiles$percent_over_65,c(0.33,0.66,1), na.rm = TRUE)
pop_density_quantile <-quantile(clean_df_updated_quantiles$pop_density_sq_km,c(0.33,0.66,1), na.rm = TRUE)
worked_home_quantile<-quantile(clean_df_updated_quantiles$homeoffice_per_1000ppl,c(0.33,0.66,1), na.rm = TRUE)
non_white_quantile <- quantile(clean_df_updated_quantiles$non_white_proportion ,c(0.33,0.66,1), na.rm = TRUE)
#categorical variable 1-3 to represent the three quantiles
clean_df_updated_quantiles<- clean_df_updated_quantiles %>% mutate(
y= ifelse(percent_below_poverty_level<poverty_rate_quantile[1],1,ifelse(percent_below_poverty_level<poverty_rate_quantile[2],2,3)) ,
x= ifelse(cases_per_1000ppl<case_rate_quantile[1],1,ifelse(cases_per_1000ppl<case_rate_quantile[2],2,3)),
z= ifelse(high_mask_usage_sum<high_mask_usage_quantile[1],1,ifelse(high_mask_usage_sum<high_mask_usage_quantile[2],2,3)),
a= ifelse(avg_household_size<household_size_quantile[1],1,ifelse(avg_household_size<household_size_quantile[2],2,3)),
b= ifelse(percent_over_65<percent_over_65_quantile[1],1,ifelse(percent_over_65<percent_over_65_quantile[2],2,3)),
c= ifelse(pop_density_sq_km<pop_density_quantile[1],1,ifelse(pop_density_sq_km<pop_density_quantile[2],2,3)),
d= ifelse(homeoffice_per_1000ppl<worked_home_quantile[1],1,ifelse(homeoffice_per_1000ppl<worked_home_quantile[2],2,3)),
e= ifelse(non_white_proportion<non_white_quantile[1],1,ifelse(non_white_proportion<non_white_quantile[2],2,3))
)
#transform the indicator variables to be numeric
clean_df_updated_quantiles$x = as.numeric(clean_df_updated_quantiles$x)
clean_df_updated_quantiles$y = as.numeric(clean_df_updated_quantiles$y)
clean_df_updated_quantiles$z = as.numeric(clean_df_updated_quantiles$z)
clean_df_updated_quantiles$a = as.numeric(clean_df_updated_quantiles$a)
clean_df_updated_quantiles$b = as.numeric(clean_df_updated_quantiles$b)
clean_df_updated_quantiles$c = as.numeric(clean_df_updated_quantiles$c)
clean_df_updated_quantiles$d = as.numeric(clean_df_updated_quantiles$d)
clean_df_updated_quantiles$e = as.numeric(clean_df_updated_quantiles$e)
#single variable for covid case rate and poverty
clean_df_updated_quantiles$bivariate <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$y==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$y==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$y==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$y==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$y==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$y==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$y==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$y==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$y==3, 9,
FALSE)))))))))
#single variable for covid case rate and mask use
clean_df_updated_quantiles$bivariate_mask <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$z==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$z==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$z==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$z==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$z==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$z==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$z==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$z==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$z==3, 9,
FALSE)))))))))
#single variable for covid case rate and household size
clean_df_updated_quantiles$bivariate_household <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$a==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$a==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$a==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$a==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$a==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$a==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$a==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$a==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$a==3, 9,
FALSE)))))))))
#single variable for covid case rate and percent over 65
clean_df_updated_quantiles$bivariate_over_65 <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$b==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$b==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$b==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$b==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$b==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$b==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$b==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$b==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$b==3, 9,
FALSE)))))))))
#single variable for covid case rate and population density
clean_df_updated_quantiles$bivariate_pop_density <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$c==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$c==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$c==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$c==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$c==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$c==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$c==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$c==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$c==3, 9,
FALSE)))))))))
#single variable for covid case rate and worked from home
clean_df_updated_quantiles$bivariate_worked_home <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$d==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$d==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$d==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$d==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$d==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$d==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$d==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$d==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$d==3, 9,
FALSE)))))))))
#single variable for covid case rate and proportion non white
clean_df_updated_quantiles$bivariate_non_white <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$e==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$e==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$e==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$e==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$e==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$e==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$e==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$e==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$e==3, 9,
FALSE)))))))))
#loading US county map data and plotting base map
AllCounty <- map_data("county")
AllCounty %>% ggplot(aes(x = long, y = lat, group = group)) +
geom_polygon(color = "red", fill = NA, size = .1 )
#wrangling data to remove lowecase and renaming
clean_df_updated_quantiles$county = tolower(clean_df_updated_quantiles$county)
AllCounty <- AllCounty %>% rename("county" = "subregion")
#merging map data with
AllCounty = left_join(AllCounty, clean_df_updated_quantiles, by= "county")
#making all bivariate variables factor variables
AllCounty$bivariate <- as.factor(AllCounty$bivariate)
AllCounty$bivariate_mask <- as.factor(AllCounty$bivariate_mask)
AllCounty$bivariate_household <- as.factor(AllCounty$bivariate_household)
AllCounty$bivariate_over_65 <- as.factor(AllCounty$bivariate_over_65)
AllCounty$bivariate_pop_density <- as.factor(AllCounty$bivariate_pop_density)
AllCounty$bivariate_worked_home <- as.factor(AllCounty$bivariate_worked_home)
AllCounty$bivariate_non_white <- as.factor(AllCounty$bivariate_non_white)
#testing out with basic map
AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = cases_per_1000ppl)) +
geom_polygon(color = "NA") +
theme(panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3)
#bivarate map of cases and poverty
AllCounty$bivariate <- as.factor(AllCounty$bivariate)
map_poverty <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and poverty in the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_poverty <-map_poverty + scale_fill_manual(values = cbp1)
map_poverty
#legend for map of cases and poverty
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_poverty<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing Poverty-->")
lg_poverty
#map plus legend for cases and poverty
ggdraw() +
draw_plot(map_poverty, 0, 0, 1, 1) +
draw_plot(lg_poverty, 0.05, 0.075, 0.25, 0.25)
#################################
#bivarate map of cases and mask use
map_mask <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_mask)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and mask use in the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_mask <-map_mask + scale_fill_manual(values = cbp1)
#map_mask
#legend for map of cases and mask use
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_mask<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing mask use-->")
lg_mask
#map plus legend for cases and mask use
ggdraw() +
draw_plot(map_mask, 0, 0, 1, 1) +
draw_plot(lg_mask, 0.05, 0.075, 0.25, 0.25)
#################################
#bivarate map of cases and household size
map_household <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_household)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and household size in the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_household <-map_household + scale_fill_manual(values = cbp1)
map_household
#legend for map of cases and household size
library(reshape2)
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_household<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing household size-->")
lg_household
#map plus legend for cases and household size
ggdraw() +
draw_plot(map_household, 0, 0, 1, 1) +
draw_plot(lg_household, 0.05, 0.075, 0.25, 0.25)
#################################
#bivarate map of cases and % over 65
map_65 <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_over_65)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and % over 65 in the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_65 <-map_65 + scale_fill_manual(values = cbp1)
map_65
#legend for map of cases and % over 65
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_65<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing percent over 65-->")
lg_65
#map plus legend for cases and % over 65
ggdraw() +
draw_plot(map_65, 0, 0, 1, 1) +
draw_plot(lg_65, 0.05, 0.075, 0.25, 0.25)
#################################
#bivarate map of cases and pop density
map_density <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_pop_density)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and population density the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_density <-map_density + scale_fill_manual(values = cbp1)
map_density
#legend for map of cases and population density
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_density<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing population density-->")
lg_density
#map plus legend for cases and population density
ggdraw() +
draw_plot(map_density, 0, 0, 1, 1) +
draw_plot(lg_density, 0.05, 0.075, 0.25, 0.25)
#################################
#bivarate map of cases and worked from home
map_home <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_worked_home)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and % working from home (WFH) the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_home <-map_home + scale_fill_manual(values = cbp1)
map_home
#legend for map of cases and worked from home
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_home<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing WFH-->")
lg_home
#map plus legend for cases and worked from home
ggdraw() +
draw_plot(map_home, 0, 0, 1, 1) +
draw_plot(lg_home, 0.05, 0.075, 0.25, 0.25)
#bivarate map of cases and poverty
map_race <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_non_white)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and race in the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_race <-map_poverty + scale_fill_manual(values = cbp1)
map_race
#legend for map of cases and poverty
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_race<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing Prop non-White-->")
lg_race
#map plus legend for cases and poverty
ggdraw() +
draw_plot(map_race, 0, 0, 1, 1) +
draw_plot(lg_race, 0.05, 0.075, 0.25, 0.25)
|
/all_variables_bivariate_maps.R
|
no_license
|
kbhangdia/BST260GroupProject_COVID
|
R
| false
| false
| 25,453
|
r
|
library(usmap)
library(ggplot2)
library(readr)
library(lubridate)
library(maps)
library(dplyr)
library(dslabs)
library(stringr)
library(rstudioapi)
library(tidyverse) # ggplot2, dplyr, tidyr, readr, purrr, tibble
library(magrittr) # pipes
install.packages("lintr")
library(lintr) # code linting
install.packages("sf")
library(sf) # spatial data handling
library(raster) # raster handling (needed for relief)
install.packages("viridis")
library(viridis) # viridis color scale
install.packages("cowplot")
library(cowplot) # stack ggplots
library(rmarkdown)
library(ggthemes)
install.packages("ggalt")
library(ggalt)
install.packages("biscale")
library(biscale)
library(cowplot)
library(reshape2)
library(viridis)
library(RColorBrewer)
#new variable for %of population >65 years
clean_df_updated_quantiles <- clean_df_updated %>%
mutate(percent_over_65 = (clean_df_updated$total_over_65 / clean_df_updated$total_population)*100)
#three groups of the variables for the bivariate map
summary(clean_df_updated_quantiles$cases_per_1000ppl)
case_rate_quantile<- quantile(clean_df_updated_quantiles$cases_per_1000ppl,c(0.33,0.66,1), na.rm = TRUE)
poverty_rate_quantile<- quantile(clean_df_updated_quantiles$percent_below_poverty_level,c(0.33,0.66,1), na.rm = TRUE)
high_mask_usage_quantile <-quantile(clean_df_updated_quantiles$high_mask_usage_sum,c(0.33,0.66,1), na.rm = TRUE)
household_size_quantile <-quantile(clean_df_updated_quantiles$avg_household_size,c(0.33,0.66,1), na.rm = TRUE)
percent_over_65_quantile <-quantile(clean_df_updated_quantiles$percent_over_65,c(0.33,0.66,1), na.rm = TRUE)
pop_density_quantile <-quantile(clean_df_updated_quantiles$pop_density_sq_km,c(0.33,0.66,1), na.rm = TRUE)
worked_home_quantile<-quantile(clean_df_updated_quantiles$homeoffice_per_1000ppl,c(0.33,0.66,1), na.rm = TRUE)
non_white_quantile <- quantile(clean_df_updated_quantiles$non_white_proportion ,c(0.33,0.66,1), na.rm = TRUE)
#categorical variable 1-3 to represent the three quantiles
clean_df_updated_quantiles<- clean_df_updated_quantiles %>% mutate(
y= ifelse(percent_below_poverty_level<poverty_rate_quantile[1],1,ifelse(percent_below_poverty_level<poverty_rate_quantile[2],2,3)) ,
x= ifelse(cases_per_1000ppl<case_rate_quantile[1],1,ifelse(cases_per_1000ppl<case_rate_quantile[2],2,3)),
z= ifelse(high_mask_usage_sum<high_mask_usage_quantile[1],1,ifelse(high_mask_usage_sum<high_mask_usage_quantile[2],2,3)),
a= ifelse(avg_household_size<household_size_quantile[1],1,ifelse(avg_household_size<household_size_quantile[2],2,3)),
b= ifelse(percent_over_65<percent_over_65_quantile[1],1,ifelse(percent_over_65<percent_over_65_quantile[2],2,3)),
c= ifelse(pop_density_sq_km<pop_density_quantile[1],1,ifelse(pop_density_sq_km<pop_density_quantile[2],2,3)),
d= ifelse(homeoffice_per_1000ppl<worked_home_quantile[1],1,ifelse(homeoffice_per_1000ppl<worked_home_quantile[2],2,3)),
e= ifelse(non_white_proportion<non_white_quantile[1],1,ifelse(non_white_proportion<non_white_quantile[2],2,3))
)
#transform the indicator variables to be numeric
clean_df_updated_quantiles$x = as.numeric(clean_df_updated_quantiles$x)
clean_df_updated_quantiles$y = as.numeric(clean_df_updated_quantiles$y)
clean_df_updated_quantiles$z = as.numeric(clean_df_updated_quantiles$z)
clean_df_updated_quantiles$a = as.numeric(clean_df_updated_quantiles$a)
clean_df_updated_quantiles$b = as.numeric(clean_df_updated_quantiles$b)
clean_df_updated_quantiles$c = as.numeric(clean_df_updated_quantiles$c)
clean_df_updated_quantiles$d = as.numeric(clean_df_updated_quantiles$d)
clean_df_updated_quantiles$e = as.numeric(clean_df_updated_quantiles$e)
#single variable for covid case rate and poverty
clean_df_updated_quantiles$bivariate <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$y==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$y==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$y==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$y==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$y==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$y==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$y==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$y==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$y==3, 9,
FALSE)))))))))
#single variable for covid case rate and mask use
clean_df_updated_quantiles$bivariate_mask <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$z==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$z==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$z==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$z==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$z==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$z==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$z==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$z==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$z==3, 9,
FALSE)))))))))
#single variable for covid case rate and household size
clean_df_updated_quantiles$bivariate_household <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$a==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$a==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$a==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$a==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$a==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$a==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$a==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$a==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$a==3, 9,
FALSE)))))))))
#single variable for covid case rate and percent over 65
clean_df_updated_quantiles$bivariate_over_65 <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$b==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$b==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$b==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$b==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$b==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$b==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$b==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$b==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$b==3, 9,
FALSE)))))))))
#single variable for covid case rate and population density
clean_df_updated_quantiles$bivariate_pop_density <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$c==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$c==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$c==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$c==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$c==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$c==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$c==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$c==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$c==3, 9,
FALSE)))))))))
#single variable for covid case rate and worked from home
clean_df_updated_quantiles$bivariate_worked_home <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$d==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$d==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$d==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$d==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$d==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$d==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$d==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$d==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$d==3, 9,
FALSE)))))))))
#single variable for covid case rate and proportion non white
clean_df_updated_quantiles$bivariate_non_white <- ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$e==1, 1,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$e==1, 2,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$e==1, 3,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$e==2, 4,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$e==2, 5,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$e==2, 6,
ifelse(clean_df_updated_quantiles$x==1 & clean_df_updated_quantiles$e==3, 7,
ifelse(clean_df_updated_quantiles$x==2 & clean_df_updated_quantiles$e==3, 8,
ifelse(clean_df_updated_quantiles$x==3 & clean_df_updated_quantiles$e==3, 9,
FALSE)))))))))
#loading US county map data and plotting base map
AllCounty <- map_data("county")
AllCounty %>% ggplot(aes(x = long, y = lat, group = group)) +
geom_polygon(color = "red", fill = NA, size = .1 )
#wrangling data to remove lowecase and renaming
clean_df_updated_quantiles$county = tolower(clean_df_updated_quantiles$county)
AllCounty <- AllCounty %>% rename("county" = "subregion")
#merging map data with
AllCounty = left_join(AllCounty, clean_df_updated_quantiles, by= "county")
#making all bivariate variables factor variables
AllCounty$bivariate <- as.factor(AllCounty$bivariate)
AllCounty$bivariate_mask <- as.factor(AllCounty$bivariate_mask)
AllCounty$bivariate_household <- as.factor(AllCounty$bivariate_household)
AllCounty$bivariate_over_65 <- as.factor(AllCounty$bivariate_over_65)
AllCounty$bivariate_pop_density <- as.factor(AllCounty$bivariate_pop_density)
AllCounty$bivariate_worked_home <- as.factor(AllCounty$bivariate_worked_home)
AllCounty$bivariate_non_white <- as.factor(AllCounty$bivariate_non_white)
#testing out with basic map
AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = cases_per_1000ppl)) +
geom_polygon(color = "NA") +
theme(panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3)
#bivarate map of cases and poverty
AllCounty$bivariate <- as.factor(AllCounty$bivariate)
map_poverty <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and poverty in the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_poverty <-map_poverty + scale_fill_manual(values = cbp1)
map_poverty
#legend for map of cases and poverty
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_poverty<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing Poverty-->")
lg_poverty
#map plus legend for cases and poverty
ggdraw() +
draw_plot(map_poverty, 0, 0, 1, 1) +
draw_plot(lg_poverty, 0.05, 0.075, 0.25, 0.25)
#################################
#bivarate map of cases and mask use
map_mask <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_mask)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and mask use in the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_mask <-map_mask + scale_fill_manual(values = cbp1)
#map_mask
#legend for map of cases and mask use
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_mask<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing mask use-->")
lg_mask
#map plus legend for cases and mask use
ggdraw() +
draw_plot(map_mask, 0, 0, 1, 1) +
draw_plot(lg_mask, 0.05, 0.075, 0.25, 0.25)
#################################
#bivarate map of cases and household size
map_household <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_household)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and household size in the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_household <-map_household + scale_fill_manual(values = cbp1)
map_household
#legend for map of cases and household size
library(reshape2)
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_household<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing household size-->")
lg_household
#map plus legend for cases and household size
ggdraw() +
draw_plot(map_household, 0, 0, 1, 1) +
draw_plot(lg_household, 0.05, 0.075, 0.25, 0.25)
#################################
#bivarate map of cases and % over 65
map_65 <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_over_65)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and % over 65 in the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_65 <-map_65 + scale_fill_manual(values = cbp1)
map_65
#legend for map of cases and % over 65
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_65<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing percent over 65-->")
lg_65
#map plus legend for cases and % over 65
ggdraw() +
draw_plot(map_65, 0, 0, 1, 1) +
draw_plot(lg_65, 0.05, 0.075, 0.25, 0.25)
#################################
#bivarate map of cases and pop density
map_density <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_pop_density)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and population density the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_density <-map_density + scale_fill_manual(values = cbp1)
map_density
#legend for map of cases and population density
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_density<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing population density-->")
lg_density
#map plus legend for cases and population density
ggdraw() +
draw_plot(map_density, 0, 0, 1, 1) +
draw_plot(lg_density, 0.05, 0.075, 0.25, 0.25)
#################################
#bivarate map of cases and worked from home
map_home <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_worked_home)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and % working from home (WFH) the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_home <-map_home + scale_fill_manual(values = cbp1)
map_home
#legend for map of cases and worked from home
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_home<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing WFH-->")
lg_home
#map plus legend for cases and worked from home
ggdraw() +
draw_plot(map_home, 0, 0, 1, 1) +
draw_plot(lg_home, 0.05, 0.075, 0.25, 0.25)
#bivarate map of cases and poverty
map_race <- AllCounty %>% ggplot(aes(x = long, y = lat, group = group, fill = bivariate_non_white)) +
geom_polygon(color = "NA") +
theme(legend.position = "None",
panel.grid.major = element_blank(),
panel.background = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
coord_fixed(1.3) +
labs(
title = "Covid 19 rates and race in the US",
subtitle = "bivariate choropleth map")
cbp1 <- c("#E8E8E8", "#ACE4E4", "#5AC8C8", "#DFB0D6",
"#A5ADD3", "#5698B9", "#BE64AC", "#8C62AA", "#3B4994")
map_race <-map_poverty + scale_fill_manual(values = cbp1)
map_race
#legend for map of cases and poverty
melt(matrix(1:9,nrow=3))
legendGoal=melt(matrix(1:9,nrow=3))
test<-ggplot(legendGoal, aes(Var2,Var1,fill = as.factor(value)))+ geom_tile()
test<- test + scale_fill_manual(name="",values=cbp1)
lg_race<-test + theme(legend.position="none", axis.text=element_blank(),line=element_blank()) + xlab("Increasing COVID rates -->") + ylab("Increasing Prop non-White-->")
lg_race
#map plus legend for cases and poverty
ggdraw() +
draw_plot(map_race, 0, 0, 1, 1) +
draw_plot(lg_race, 0.05, 0.075, 0.25, 0.25)
|
library(argparser)
library(TopmedPipeline)
library(GWASTools)
library(dplyr)
library(tidyr)
sessionInfo()
argp <- arg_parser("Pedigree check")
argp <- add_argument(argp, "config", help="path to config file")
argv <- parse_args(argp)
config <- readConfig(argv$config)
required <- c("pedigree_file")
optional <- c("concat_family_individ"=FALSE,
"out_file"="exp_rel.RData",
"err_file"="ped_errs.RData")
config <- setConfigDefaults(config, required, optional)
print(config)
pedfile <- config["pedigree_file"]
sep <- switch(tools::file_ext(pedfile),
csv = ",",
txt = "\t",
"")
hdr <- !(tools::file_ext(pedfile) == "fam")
ped <- read.table(pedfile, sep=sep, header=hdr, comment.char="#", na.strings=c("", "NA"), as.is=TRUE, fill=TRUE, blank.lines.skip=TRUE)
head(ped)
# if this is a dbGaP file, strip out dbGaP subject ID so we only have one ID column
if (any(grepl("^dbGaP", names(ped)))) {
ped <- ped %>%
select(-starts_with("dbGaP"))
}
# check column names
names(ped) <- tolower(names(ped))
cols <- lapply(c("fam", "subj", "father", "mother", "sex"),
function(x) {
i <- which(grepl(x, names(ped)))
if (length(i) == 1) return(i) else return(NA)
})
names(cols) <- c("family", "individ", "father", "mother", "sex")
cols <- unlist(cols)
if (is.na(cols["family"])) cols["family"] <- 1
if (is.na(cols["individ"])) cols["individ"] <- 2
if (is.na(cols["father"])) cols["father"] <- 3
if (is.na(cols["mother"])) cols["mother"] <- 4
if (is.na(cols["sex"])) cols["sex"] <- 5
if (!setequal(cols, 1:5)) {
stop("Cannot parse pedigree file. Columns should be FAMILY_ID, SUBJECT_ID, FATHER, MOTHER, SEX.")
}
ped <- ped[,unname(cols)]
names(ped) <- names(cols)
# set mother and father ID for founders to 0
ped <- ped %>%
mutate(father=ifelse(is.na(father), 0, father),
mother=ifelse(is.na(mother), 0, mother))
# standardize sex column
if (is.numeric(ped$sex)) {
ped <- ped %>%
mutate(sex=c("M", "F")[sex])
} else {
if (any(tolower(ped$sex) %in% c("male", "female"))) {
ped <- ped %>%
mutate(sex=toupper(substr(sex,1,1)))
}
}
head(ped)
# make sure individ is unique
if (as.logical(config["concat_family_individ"])) {
ped <- ped %>%
mutate(individ=paste(family, individ, sep="_"),
father=ifelse(father == 0, 0, paste(family, father, sep="_")),
mother=ifelse(mother == 0, 0, paste(family, mother, sep="_")))
}
# check for pedigree errors
chk <- pedigreeCheck(ped)
names(chk)
if (!is.null(chk)) {
save(chk, file=config["err_file"])
}
if ("duplicates" %in% names(chk)) {
if (!all(chk$duplicates$match)) {
stop("Pedigree has duplicate subjects with conflicting data.")
}
ped <- distinct(ped)
}
if ("unknown.parent.rows" %in% names(chk)) {
ped <- ped %>%
mutate(father=ifelse(father == 0 & mother != 0,
paste(family, "father", row_number(), sep="_"),
father),
mother=ifelse(father != 0 & mother == 0,
paste(family, "mother", row_number(), sep="_"),
mother))
}
## repeat check after correcting unknown parent rows to get new
## dummy parents in "no.individ.entry"
chk <- pedigreeCheck(ped)
if ("parent.no.individ.entry" %in% names(chk)) {
both <- chk$parent.no.individ.entry %>%
filter(no_individ_entry == "both") %>%
separate(parentID, into=c("mother", "father"), sep=";") %>%
select(family, father, mother) %>%
pivot_longer(-family, names_to="no_individ_entry", values_to="parentID")
parents <- chk$parent.no.individ.entry %>%
filter(no_individ_entry != "both") %>%
bind_rows(both) %>%
mutate(sex=ifelse(no_individ_entry == "father", "M", "F"),
father="0", mother="0") %>%
select(family, individ=parentID, father, mother, sex)
ped <- bind_rows(ped, parents)
}
## repeat check after adding dummy parents
chk <- pedigreeCheck(ped)
if ("one.person.fams" %in% names(chk)) {
ped <- ped %>%
filter(!family %in% chk$one.person.fams$family)
}
if ("subfamilies.ident" %in% names(chk)) {
ped <- ped %>%
left_join(chk$subfamilies.ident, by=c("family", "individ")) %>%
mutate(family=ifelse(is.na(subfamily), family, paste(family, subfamily, sep="_"))) %>%
select(-subfamily)
}
chk <- pedigreeCheck(ped)
## sometimes we need to do this again after assigning subfamilies
if ("one.person.fams" %in% names(chk)) {
ped <- ped %>%
filter(!family %in% chk$one.person.fams$family)
}
chk <- pedigreeCheck(ped)
names(chk)
if (!is.null(chk)) {
stop("pedigree had unresolvable errors")
}
# define relative categories
source("https://raw.githubusercontent.com/UW-GAC/QCpipeline/master/QCpipeline/R/expRelsCategory.R")
rel <- expRelsCategory(ped)
rel <- rel$relprs.all
save(rel, file=config["out_file"])
table(rel$relation, rel$exp.rel)
|
/R/pedigree_format.R
|
no_license
|
UW-GAC/analysis_pipeline
|
R
| false
| false
| 5,086
|
r
|
library(argparser)
library(TopmedPipeline)
library(GWASTools)
library(dplyr)
library(tidyr)
sessionInfo()
argp <- arg_parser("Pedigree check")
argp <- add_argument(argp, "config", help="path to config file")
argv <- parse_args(argp)
config <- readConfig(argv$config)
required <- c("pedigree_file")
optional <- c("concat_family_individ"=FALSE,
"out_file"="exp_rel.RData",
"err_file"="ped_errs.RData")
config <- setConfigDefaults(config, required, optional)
print(config)
pedfile <- config["pedigree_file"]
sep <- switch(tools::file_ext(pedfile),
csv = ",",
txt = "\t",
"")
hdr <- !(tools::file_ext(pedfile) == "fam")
ped <- read.table(pedfile, sep=sep, header=hdr, comment.char="#", na.strings=c("", "NA"), as.is=TRUE, fill=TRUE, blank.lines.skip=TRUE)
head(ped)
# if this is a dbGaP file, strip out dbGaP subject ID so we only have one ID column
if (any(grepl("^dbGaP", names(ped)))) {
ped <- ped %>%
select(-starts_with("dbGaP"))
}
# check column names
names(ped) <- tolower(names(ped))
cols <- lapply(c("fam", "subj", "father", "mother", "sex"),
function(x) {
i <- which(grepl(x, names(ped)))
if (length(i) == 1) return(i) else return(NA)
})
names(cols) <- c("family", "individ", "father", "mother", "sex")
cols <- unlist(cols)
if (is.na(cols["family"])) cols["family"] <- 1
if (is.na(cols["individ"])) cols["individ"] <- 2
if (is.na(cols["father"])) cols["father"] <- 3
if (is.na(cols["mother"])) cols["mother"] <- 4
if (is.na(cols["sex"])) cols["sex"] <- 5
if (!setequal(cols, 1:5)) {
stop("Cannot parse pedigree file. Columns should be FAMILY_ID, SUBJECT_ID, FATHER, MOTHER, SEX.")
}
ped <- ped[,unname(cols)]
names(ped) <- names(cols)
# set mother and father ID for founders to 0
ped <- ped %>%
mutate(father=ifelse(is.na(father), 0, father),
mother=ifelse(is.na(mother), 0, mother))
# standardize sex column
if (is.numeric(ped$sex)) {
ped <- ped %>%
mutate(sex=c("M", "F")[sex])
} else {
if (any(tolower(ped$sex) %in% c("male", "female"))) {
ped <- ped %>%
mutate(sex=toupper(substr(sex,1,1)))
}
}
head(ped)
# make sure individ is unique
if (as.logical(config["concat_family_individ"])) {
ped <- ped %>%
mutate(individ=paste(family, individ, sep="_"),
father=ifelse(father == 0, 0, paste(family, father, sep="_")),
mother=ifelse(mother == 0, 0, paste(family, mother, sep="_")))
}
# check for pedigree errors
chk <- pedigreeCheck(ped)
names(chk)
if (!is.null(chk)) {
save(chk, file=config["err_file"])
}
if ("duplicates" %in% names(chk)) {
if (!all(chk$duplicates$match)) {
stop("Pedigree has duplicate subjects with conflicting data.")
}
ped <- distinct(ped)
}
if ("unknown.parent.rows" %in% names(chk)) {
ped <- ped %>%
mutate(father=ifelse(father == 0 & mother != 0,
paste(family, "father", row_number(), sep="_"),
father),
mother=ifelse(father != 0 & mother == 0,
paste(family, "mother", row_number(), sep="_"),
mother))
}
## repeat check after correcting unknown parent rows to get new
## dummy parents in "no.individ.entry"
chk <- pedigreeCheck(ped)
if ("parent.no.individ.entry" %in% names(chk)) {
both <- chk$parent.no.individ.entry %>%
filter(no_individ_entry == "both") %>%
separate(parentID, into=c("mother", "father"), sep=";") %>%
select(family, father, mother) %>%
pivot_longer(-family, names_to="no_individ_entry", values_to="parentID")
parents <- chk$parent.no.individ.entry %>%
filter(no_individ_entry != "both") %>%
bind_rows(both) %>%
mutate(sex=ifelse(no_individ_entry == "father", "M", "F"),
father="0", mother="0") %>%
select(family, individ=parentID, father, mother, sex)
ped <- bind_rows(ped, parents)
}
## repeat check after adding dummy parents
chk <- pedigreeCheck(ped)
if ("one.person.fams" %in% names(chk)) {
ped <- ped %>%
filter(!family %in% chk$one.person.fams$family)
}
if ("subfamilies.ident" %in% names(chk)) {
ped <- ped %>%
left_join(chk$subfamilies.ident, by=c("family", "individ")) %>%
mutate(family=ifelse(is.na(subfamily), family, paste(family, subfamily, sep="_"))) %>%
select(-subfamily)
}
chk <- pedigreeCheck(ped)
## sometimes we need to do this again after assigning subfamilies
if ("one.person.fams" %in% names(chk)) {
ped <- ped %>%
filter(!family %in% chk$one.person.fams$family)
}
chk <- pedigreeCheck(ped)
names(chk)
if (!is.null(chk)) {
stop("pedigree had unresolvable errors")
}
# define relative categories
source("https://raw.githubusercontent.com/UW-GAC/QCpipeline/master/QCpipeline/R/expRelsCategory.R")
rel <- expRelsCategory(ped)
rel <- rel$relprs.all
save(rel, file=config["out_file"])
table(rel$relation, rel$exp.rel)
|
library(tsibble)
### Name: fill_gaps
### Title: Turn implicit missing values into explicit missing values
### Aliases: fill_gaps
### ** Examples
harvest <- tsibble(
year = c(2010, 2011, 2013, 2011, 2012, 2014),
fruit = rep(c("kiwi", "cherry"), each = 3),
kilo = sample(1:10, size = 6),
key = id(fruit), index = year
)
# gaps as default `NA` ----
fill_gaps(harvest, .full = TRUE)
full_harvest <- fill_gaps(harvest, .full = FALSE)
full_harvest
# use fill() to fill `NA` by previous/next entry
full_harvest %>%
group_by(fruit) %>%
fill(kilo, .direction = "down")
# replace gaps with a specific value ----
harvest %>%
fill_gaps(kilo = 0L)
# replace gaps using a function by variable ----
harvest %>%
fill_gaps(kilo = sum(kilo))
# replace gaps using a function for each group ----
harvest %>%
group_by(fruit) %>%
fill_gaps(kilo = sum(kilo))
# leaves existing `NA` untouched ----
harvest[2, 3] <- NA
harvest %>%
group_by(fruit) %>%
fill_gaps(kilo = sum(kilo, na.rm = TRUE))
# replace NA ----
pedestrian %>%
group_by(Sensor) %>%
fill_gaps(Count = as.integer(median(Count)))
|
/data/genthat_extracted_code/tsibble/examples/fill_gaps.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,111
|
r
|
library(tsibble)
### Name: fill_gaps
### Title: Turn implicit missing values into explicit missing values
### Aliases: fill_gaps
### ** Examples
harvest <- tsibble(
year = c(2010, 2011, 2013, 2011, 2012, 2014),
fruit = rep(c("kiwi", "cherry"), each = 3),
kilo = sample(1:10, size = 6),
key = id(fruit), index = year
)
# gaps as default `NA` ----
fill_gaps(harvest, .full = TRUE)
full_harvest <- fill_gaps(harvest, .full = FALSE)
full_harvest
# use fill() to fill `NA` by previous/next entry
full_harvest %>%
group_by(fruit) %>%
fill(kilo, .direction = "down")
# replace gaps with a specific value ----
harvest %>%
fill_gaps(kilo = 0L)
# replace gaps using a function by variable ----
harvest %>%
fill_gaps(kilo = sum(kilo))
# replace gaps using a function for each group ----
harvest %>%
group_by(fruit) %>%
fill_gaps(kilo = sum(kilo))
# leaves existing `NA` untouched ----
harvest[2, 3] <- NA
harvest %>%
group_by(fruit) %>%
fill_gaps(kilo = sum(kilo, na.rm = TRUE))
# replace NA ----
pedestrian %>%
group_by(Sensor) %>%
fill_gaps(Count = as.integer(median(Count)))
|
# BEMTOOL - Bio-Economic Model TOOLs - version 2.5
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# BEMTOOL is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
#
#
#
#
#
#
#
#
# ------------------------------------------------------------------------------
# Add the columns to to be rendered in the tree
# ------------------------------------------------------------------------------
#
bmt_price_importweight.add_columns <- function(treeview) {
# print("Adding column to the model...")
bmt_price_importweight.model <- treeview$getModel()
# number column
renderer <- gtkCellRendererTextNew()
# gSignalConnect(renderer, "edited", cell.edited, model)
year_frame <- data.frame(c(0))
colnames(year_frame) <- c(" Species ")
renderer$setData("column", year_frame)
treeview$insertColumnWithAttributes(-1, " Species " , renderer, text = 0, editable = FALSE)
for (e in 1:length(BMT_YEARS_FORECAST)) {
# number column
renderer <- gtkCellRendererTextNew()
gSignalConnect(renderer, "edited", bmt_price_importweight.cell_edited, bmt_price_importweight.model)
month_frame <- data.frame(c(e))
colnames(month_frame) <- paste(" ", BMT_YEARS_FORECAST[e], " ", sep="")
renderer$setData("column", month_frame)
treeview$insertColumnWithAttributes(-1, as.character(paste(" ", BMT_YEARS_FORECAST[e], " ", sep="")), renderer, text = e, editable = (length(BMT_YEARS_FORECAST)+1))
}
}
|
/BEMTOOL-ver2.5-2018_0901/bmtgui/economic_params/price/price_importweight/price_importweight.add_columns.r
|
no_license
|
gresci/BEMTOOL2.5
|
R
| false
| false
| 1,798
|
r
|
# BEMTOOL - Bio-Economic Model TOOLs - version 2.5
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# BEMTOOL is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
#
#
#
#
#
#
#
#
# ------------------------------------------------------------------------------
# Add the columns to to be rendered in the tree
# ------------------------------------------------------------------------------
#
bmt_price_importweight.add_columns <- function(treeview) {
# print("Adding column to the model...")
bmt_price_importweight.model <- treeview$getModel()
# number column
renderer <- gtkCellRendererTextNew()
# gSignalConnect(renderer, "edited", cell.edited, model)
year_frame <- data.frame(c(0))
colnames(year_frame) <- c(" Species ")
renderer$setData("column", year_frame)
treeview$insertColumnWithAttributes(-1, " Species " , renderer, text = 0, editable = FALSE)
for (e in 1:length(BMT_YEARS_FORECAST)) {
# number column
renderer <- gtkCellRendererTextNew()
gSignalConnect(renderer, "edited", bmt_price_importweight.cell_edited, bmt_price_importweight.model)
month_frame <- data.frame(c(e))
colnames(month_frame) <- paste(" ", BMT_YEARS_FORECAST[e], " ", sep="")
renderer$setData("column", month_frame)
treeview$insertColumnWithAttributes(-1, as.character(paste(" ", BMT_YEARS_FORECAST[e], " ", sep="")), renderer, text = e, editable = (length(BMT_YEARS_FORECAST)+1))
}
}
|
##### Santiago Lacouture & Lancelot Henry de Frahan
#' Sales Taxes
#' Replication File. Updated on 04/18/2023
#' Step 5b: Reduced form evidence of non-linearities.
#' Export IV estimates by quantiles of lagged price distribution,
#' and distribution of current prices, using relevant estimation weights.
#'
library(data.table)
library(futile.logger)
library(lfe)
library(Matrix)
library(zoo)
library(tidyverse)
library(stringr)
setwd("/project/igaarder")
rm(list = ls())
## input filepath ----------------------------------------------
all_pi <- fread("Data/Replication_v4/all_pi_DLL.csv")
pricedist <- T
## output filepath ----------------------------------------------
iv.output.results.file <- "Data/Replication_v4/IV_subsamples_initprice_DLL.csv"
output.emp.price.dist <- "Data/Replication_v4/Emp_price_subsamples_initprice_DLL.csv"
## We only want to use the "true" tax variation
all_pi <- all_pi[non_imp_tax_strong == 1] ## all_pi should already only include this sample
# Create demeaned current prices
all_pi[, n.ln_cpricei2 := ln_cpricei2 - mean(ln_cpricei2, na.rm = T), by = .(module_by_time)]
# Create treatment groups
all_pi[, treated := DL.ln_sales_tax != 0]
FE_opts <- c("region_by_module_by_time", "division_by_module_by_time")
### Estimation ----
LRdiff_res <- data.table(NULL)
empirical_price_dist <- data.table(NULL)
## Run within
flog.info("Iteration 0")
for (n.g in 1:5) {
# Create groups of initial values of tax rate
# We use the full weighted distribution
all_pi <- all_pi[, quantile := cut(dm.L.ln_cpricei2,
breaks = quantile(dm.L.ln_cpricei2, probs = seq(0, 1, by = 1/n.g), na.rm = T, weight = base.sales),
labels = 1:n.g, right = FALSE)]
quantlab <- round(quantile(all_pi$dm.L.ln_cpricei2,
probs = seq(0, 1, by = 1/n.g), na.rm = T,
weight = all_pi$base.sales), digits = 4)
# Saturate fixed effects
all_pi[, group_region_by_module_by_time := .GRP, by = .(region_by_module_by_time, quantile)]
all_pi[, group_division_by_module_by_time := .GRP, by = .(division_by_module_by_time, quantile)]
## Estimate RF and FS
for (FE in FE_opts) {
## Produce appropiate weights implied by regression
grouped_FE <- paste0("group_", FE)
all_pi[, wVAR := weighted.mean((DL.ln_sales_tax -
weighted.mean(DL.ln_sales_tax,
w = base.sales, na.rm = T))^2,
w = base.sales, na.rm = T), by = grouped_FE]
all_pi[, wVAR := ifelse(is.na(wVAR), 0, wVAR)]
# Weight normalized within quantile
all_pi[, base.sales.q := (wVAR*base.sales)/sum(wVAR*base.sales), by = .(quantile)]
all_pi[, base.sales.qor := base.sales/sum(base.sales), by = .(quantile)]
if (pricedist) {
# capture prices by bins
step.log.p <- (max(all_pi$ln_cpricei2, na.rm = T) - min(all_pi$ln_cpricei2, na.rm = T) )/1500
step.n.log.p <- (max(all_pi$n.ln_cpricei2, na.rm = T) - min(all_pi$n.ln_cpricei2, na.rm = T)) /1500
min.log.p <- min(all_pi$ln_cpricei2, na.rm = T)
min.n.log.p <- min(all_pi$n.ln_cpricei2, na.rm = T)
all_pi[, d.lp := floor((ln_cpricei2 - min.log.p)/step.log.p)]
all_pi[, d.n.lp := floor((n.ln_cpricei2 - min.n.log.p)/step.n.log.p)]
### Version 1: using bases.sales
# Produce empirical weighted distribution of (de-meaned) current prices
d1 <- all_pi[, .(dens.log.p = sum(base.sales.qor)), by = .(quantile, d.lp)]
d1[, dens.log.p := dens.log.p/sum(dens.log.p), by =.(quantile)]
d1[, log.p := d.lp*step.log.p + min.log.p + step.log.p/2]
# Produce empirical weighted distribution of log (de-meaned) current prices
d2 <- all_pi[, .(dens.n.log.p = sum(base.sales.qor)), by = .(quantile, d.n.lp)]
d2[, dens.n.log.p := dens.n.log.p/sum(dens.n.log.p), by =.(quantile)]
d2[, log.n.p := d.n.lp*step.n.log.p + min.n.log.p + step.n.log.p/2]
prices_densities <- merge(d1, d2, by.x = c("d.lp", "quantile"), by.y = c("d.n.lp", "quantile"))
prices_densities[, n.groups := n.g]
prices_densities[, controls := FE]
prices_densities[, treated := NA]
prices_densities[, w := "base.sales"]
empirical_price_dist<- rbind(empirical_price_dist, prices_densities)
fwrite(empirical_price_dist, output.emp.price.dist)
## Repeat by treatment group
# Produce empirical weighted distribution of log (de-meaned) current prices
d1 <- all_pi[, .(dens.log.p = sum(base.sales.qor)), by = .(quantile, d.lp, treated)]
d1[, dens.log.p := dens.log.p/sum(dens.log.p), by =.(quantile, treated)]
d1[, log.p := d.lp*step.log.p + min.log.p + step.log.p/2]
d2 <- all_pi[, .(dens.n.log.p = sum(base.sales.qor)), by = .(quantile, d.n.lp, treated)]
d2[, dens.n.log.p := dens.n.log.p/sum(dens.n.log.p), by =.(quantile, treated)]
d2[, log.n.p := d.n.lp*step.n.log.p + min.n.log.p + step.n.log.p/2]
prices_densities <- merge(d1, d2, by.x = c("d.lp", "quantile", "treated"), by.y = c("d.n.lp", "quantile", "treated"))
prices_densities[, n.groups := n.g]
prices_densities[, controls := FE]
prices_densities[, w := "base.sales"]
empirical_price_dist<- rbind(empirical_price_dist, prices_densities)
fwrite(empirical_price_dist, output.emp.price.dist)
### Version 2: using ``cohort-corrected'' weights
# Produce empirical weighted distribution of (de-meaned) current prices
d1 <- all_pi[, .(dens.log.p = sum(base.sales.q)), by = .(quantile, d.lp)]
d1[, dens.log.p := dens.log.p/sum(dens.log.p), by =.(quantile)]
d1[, log.p := d.lp*step.log.p + min.log.p + step.log.p/2]
# Produce empirical weighted distribution of log (de-meaned) current prices
d2 <- all_pi[, .(dens.n.log.p = sum(base.sales.q)), by = .(quantile, d.n.lp)]
d2[, dens.n.log.p := dens.n.log.p/sum(dens.n.log.p), by =.(quantile)]
d2[, log.n.p := d.n.lp*step.n.log.p + min.n.log.p + step.n.log.p/2]
prices_densities <- merge(d1, d2, by.x = c("d.lp", "quantile"), by.y = c("d.n.lp", "quantile"))
prices_densities[, n.groups := n.g]
prices_densities[, controls := FE]
prices_densities[, treated := NA]
prices_densities[, w := "base.sales.q"]
empirical_price_dist<- rbind(empirical_price_dist, prices_densities)
fwrite(empirical_price_dist, output.emp.price.dist)
## Repeat by treatment group
# Produce empirical weighted distribution of log (de-meaned) current prices
d1 <- all_pi[, .(dens.log.p = sum(base.sales.q)), by = .(quantile, d.lp, treated)]
d1[, dens.log.p := dens.log.p/sum(dens.log.p), by =.(quantile, treated)]
d1[, log.p := d.lp*step.log.p + min.log.p + step.log.p/2]
d2 <- all_pi[, .(dens.n.log.p = sum(base.sales.q)), by = .(quantile, d.n.lp, treated)]
d2[, dens.n.log.p := dens.n.log.p/sum(dens.n.log.p), by =.(quantile, treated)]
d2[, log.n.p := d.n.lp*step.n.log.p + min.n.log.p + step.n.log.p/2]
prices_densities <- merge(d1, d2, by.x = c("d.lp", "quantile", "treated"), by.y = c("d.n.lp", "quantile", "treated"))
prices_densities[, n.groups := n.g]
prices_densities[, controls := FE]
prices_densities[, w := "base.sales.q"]
empirical_price_dist<- rbind(empirical_price_dist, prices_densities)
fwrite(empirical_price_dist, output.emp.price.dist)
}
## Produce IVs
for (q in unique(all_pi$quantile)) {
if (nrow(all_pi[quantile == q]) > 0) {
formula1 <- as.formula(paste0("DL.ln_quantity3 ~ 0 | ",
FE,
" | (DL.ln_cpricei2 ~ DL.ln_sales_tax) | module_by_state"))
res1 <- felm(formula = formula1, data = all_pi[quantile == q],
weights = all_pi[quantile == q]$base.sales)
## attach results
res1.dt <- data.table(coef(summary(res1)), keep.rownames=T)
res1.dt[, outcome := "IV"]
res1.dt[, controls := FE]
res1.dt[, group := q]
res1.dt[, n.groups := n.g]
LRdiff_res <- rbind(LRdiff_res, res1.dt, fill = T)
fwrite(LRdiff_res, iv.output.results.file)
## First-stage
formula1 <- as.formula(paste0("DL.ln_cpricei2 ~ DL.ln_sales_tax | ", FE, " | 0 | module_by_state"))
res1 <- felm(formula = formula1, data = all_pi[quantile == q],
weights = all_pi[quantile == q]$base.sales)
## attach results
res1.dt <- data.table(coef(summary(res1)), keep.rownames=T)
res1.dt[, outcome := "DL.ln_cpricei2"]
res1.dt[, controls := FE]
res1.dt[, group := q]
res1.dt[, n.groups := n.g]
LRdiff_res <- rbind(LRdiff_res, res1.dt, fill = T)
fwrite(LRdiff_res, iv.output.results.file)
## Reduced-Form
formula1 <- as.formula(paste0("DL.ln_quantity3 ~ DL.ln_sales_tax | ", FE, " | 0 | module_by_state"))
res1 <- felm(formula = formula1, data = all_pi[quantile == q],
weights = all_pi[quantile == q]$base.sales)
## attach results
res1.dt <- data.table(coef(summary(res1)), keep.rownames=T)
res1.dt[, outcome := "DL.ln_quantity3"]
res1.dt[, controls := FE]
res1.dt[, group := q]
res1.dt[, n.groups := n.g]
LRdiff_res <- rbind(LRdiff_res, res1.dt, fill = T)
fwrite(LRdiff_res, iv.output.results.file)
}
}
}
}
|
/Replication/Replication_v4/DiD_nonlinearities_DLL_v4.R
|
no_license
|
lancelothdf/sales.taxes
|
R
| false
| false
| 9,777
|
r
|
##### Santiago Lacouture & Lancelot Henry de Frahan
#' Sales Taxes
#' Replication File. Updated on 04/18/2023
#' Step 5b: Reduced form evidence of non-linearities.
#' Export IV estimates by quantiles of lagged price distribution,
#' and distribution of current prices, using relevant estimation weights.
#'
library(data.table)
library(futile.logger)
library(lfe)
library(Matrix)
library(zoo)
library(tidyverse)
library(stringr)
setwd("/project/igaarder")
rm(list = ls())
## input filepath ----------------------------------------------
all_pi <- fread("Data/Replication_v4/all_pi_DLL.csv")
pricedist <- T
## output filepath ----------------------------------------------
iv.output.results.file <- "Data/Replication_v4/IV_subsamples_initprice_DLL.csv"
output.emp.price.dist <- "Data/Replication_v4/Emp_price_subsamples_initprice_DLL.csv"
## We only want to use the "true" tax variation
all_pi <- all_pi[non_imp_tax_strong == 1] ## all_pi should already only include this sample
# Create demeaned current prices
all_pi[, n.ln_cpricei2 := ln_cpricei2 - mean(ln_cpricei2, na.rm = T), by = .(module_by_time)]
# Create treatment groups
all_pi[, treated := DL.ln_sales_tax != 0]
FE_opts <- c("region_by_module_by_time", "division_by_module_by_time")
### Estimation ----
LRdiff_res <- data.table(NULL)
empirical_price_dist <- data.table(NULL)
## Run within
flog.info("Iteration 0")
for (n.g in 1:5) {
# Create groups of initial values of tax rate
# We use the full weighted distribution
all_pi <- all_pi[, quantile := cut(dm.L.ln_cpricei2,
breaks = quantile(dm.L.ln_cpricei2, probs = seq(0, 1, by = 1/n.g), na.rm = T, weight = base.sales),
labels = 1:n.g, right = FALSE)]
quantlab <- round(quantile(all_pi$dm.L.ln_cpricei2,
probs = seq(0, 1, by = 1/n.g), na.rm = T,
weight = all_pi$base.sales), digits = 4)
# Saturate fixed effects
all_pi[, group_region_by_module_by_time := .GRP, by = .(region_by_module_by_time, quantile)]
all_pi[, group_division_by_module_by_time := .GRP, by = .(division_by_module_by_time, quantile)]
## Estimate RF and FS
for (FE in FE_opts) {
## Produce appropiate weights implied by regression
grouped_FE <- paste0("group_", FE)
all_pi[, wVAR := weighted.mean((DL.ln_sales_tax -
weighted.mean(DL.ln_sales_tax,
w = base.sales, na.rm = T))^2,
w = base.sales, na.rm = T), by = grouped_FE]
all_pi[, wVAR := ifelse(is.na(wVAR), 0, wVAR)]
# Weight normalized within quantile
all_pi[, base.sales.q := (wVAR*base.sales)/sum(wVAR*base.sales), by = .(quantile)]
all_pi[, base.sales.qor := base.sales/sum(base.sales), by = .(quantile)]
if (pricedist) {
# capture prices by bins
step.log.p <- (max(all_pi$ln_cpricei2, na.rm = T) - min(all_pi$ln_cpricei2, na.rm = T) )/1500
step.n.log.p <- (max(all_pi$n.ln_cpricei2, na.rm = T) - min(all_pi$n.ln_cpricei2, na.rm = T)) /1500
min.log.p <- min(all_pi$ln_cpricei2, na.rm = T)
min.n.log.p <- min(all_pi$n.ln_cpricei2, na.rm = T)
all_pi[, d.lp := floor((ln_cpricei2 - min.log.p)/step.log.p)]
all_pi[, d.n.lp := floor((n.ln_cpricei2 - min.n.log.p)/step.n.log.p)]
### Version 1: using bases.sales
# Produce empirical weighted distribution of (de-meaned) current prices
d1 <- all_pi[, .(dens.log.p = sum(base.sales.qor)), by = .(quantile, d.lp)]
d1[, dens.log.p := dens.log.p/sum(dens.log.p), by =.(quantile)]
d1[, log.p := d.lp*step.log.p + min.log.p + step.log.p/2]
# Produce empirical weighted distribution of log (de-meaned) current prices
d2 <- all_pi[, .(dens.n.log.p = sum(base.sales.qor)), by = .(quantile, d.n.lp)]
d2[, dens.n.log.p := dens.n.log.p/sum(dens.n.log.p), by =.(quantile)]
d2[, log.n.p := d.n.lp*step.n.log.p + min.n.log.p + step.n.log.p/2]
prices_densities <- merge(d1, d2, by.x = c("d.lp", "quantile"), by.y = c("d.n.lp", "quantile"))
prices_densities[, n.groups := n.g]
prices_densities[, controls := FE]
prices_densities[, treated := NA]
prices_densities[, w := "base.sales"]
empirical_price_dist<- rbind(empirical_price_dist, prices_densities)
fwrite(empirical_price_dist, output.emp.price.dist)
## Repeat by treatment group
# Produce empirical weighted distribution of log (de-meaned) current prices
d1 <- all_pi[, .(dens.log.p = sum(base.sales.qor)), by = .(quantile, d.lp, treated)]
d1[, dens.log.p := dens.log.p/sum(dens.log.p), by =.(quantile, treated)]
d1[, log.p := d.lp*step.log.p + min.log.p + step.log.p/2]
d2 <- all_pi[, .(dens.n.log.p = sum(base.sales.qor)), by = .(quantile, d.n.lp, treated)]
d2[, dens.n.log.p := dens.n.log.p/sum(dens.n.log.p), by =.(quantile, treated)]
d2[, log.n.p := d.n.lp*step.n.log.p + min.n.log.p + step.n.log.p/2]
prices_densities <- merge(d1, d2, by.x = c("d.lp", "quantile", "treated"), by.y = c("d.n.lp", "quantile", "treated"))
prices_densities[, n.groups := n.g]
prices_densities[, controls := FE]
prices_densities[, w := "base.sales"]
empirical_price_dist<- rbind(empirical_price_dist, prices_densities)
fwrite(empirical_price_dist, output.emp.price.dist)
### Version 2: using ``cohort-corrected'' weights
# Produce empirical weighted distribution of (de-meaned) current prices
d1 <- all_pi[, .(dens.log.p = sum(base.sales.q)), by = .(quantile, d.lp)]
d1[, dens.log.p := dens.log.p/sum(dens.log.p), by =.(quantile)]
d1[, log.p := d.lp*step.log.p + min.log.p + step.log.p/2]
# Produce empirical weighted distribution of log (de-meaned) current prices
d2 <- all_pi[, .(dens.n.log.p = sum(base.sales.q)), by = .(quantile, d.n.lp)]
d2[, dens.n.log.p := dens.n.log.p/sum(dens.n.log.p), by =.(quantile)]
d2[, log.n.p := d.n.lp*step.n.log.p + min.n.log.p + step.n.log.p/2]
prices_densities <- merge(d1, d2, by.x = c("d.lp", "quantile"), by.y = c("d.n.lp", "quantile"))
prices_densities[, n.groups := n.g]
prices_densities[, controls := FE]
prices_densities[, treated := NA]
prices_densities[, w := "base.sales.q"]
empirical_price_dist<- rbind(empirical_price_dist, prices_densities)
fwrite(empirical_price_dist, output.emp.price.dist)
## Repeat by treatment group
# Produce empirical weighted distribution of log (de-meaned) current prices
d1 <- all_pi[, .(dens.log.p = sum(base.sales.q)), by = .(quantile, d.lp, treated)]
d1[, dens.log.p := dens.log.p/sum(dens.log.p), by =.(quantile, treated)]
d1[, log.p := d.lp*step.log.p + min.log.p + step.log.p/2]
d2 <- all_pi[, .(dens.n.log.p = sum(base.sales.q)), by = .(quantile, d.n.lp, treated)]
d2[, dens.n.log.p := dens.n.log.p/sum(dens.n.log.p), by =.(quantile, treated)]
d2[, log.n.p := d.n.lp*step.n.log.p + min.n.log.p + step.n.log.p/2]
prices_densities <- merge(d1, d2, by.x = c("d.lp", "quantile", "treated"), by.y = c("d.n.lp", "quantile", "treated"))
prices_densities[, n.groups := n.g]
prices_densities[, controls := FE]
prices_densities[, w := "base.sales.q"]
empirical_price_dist<- rbind(empirical_price_dist, prices_densities)
fwrite(empirical_price_dist, output.emp.price.dist)
}
## Produce IVs
for (q in unique(all_pi$quantile)) {
if (nrow(all_pi[quantile == q]) > 0) {
formula1 <- as.formula(paste0("DL.ln_quantity3 ~ 0 | ",
FE,
" | (DL.ln_cpricei2 ~ DL.ln_sales_tax) | module_by_state"))
res1 <- felm(formula = formula1, data = all_pi[quantile == q],
weights = all_pi[quantile == q]$base.sales)
## attach results
res1.dt <- data.table(coef(summary(res1)), keep.rownames=T)
res1.dt[, outcome := "IV"]
res1.dt[, controls := FE]
res1.dt[, group := q]
res1.dt[, n.groups := n.g]
LRdiff_res <- rbind(LRdiff_res, res1.dt, fill = T)
fwrite(LRdiff_res, iv.output.results.file)
## First-stage
formula1 <- as.formula(paste0("DL.ln_cpricei2 ~ DL.ln_sales_tax | ", FE, " | 0 | module_by_state"))
res1 <- felm(formula = formula1, data = all_pi[quantile == q],
weights = all_pi[quantile == q]$base.sales)
## attach results
res1.dt <- data.table(coef(summary(res1)), keep.rownames=T)
res1.dt[, outcome := "DL.ln_cpricei2"]
res1.dt[, controls := FE]
res1.dt[, group := q]
res1.dt[, n.groups := n.g]
LRdiff_res <- rbind(LRdiff_res, res1.dt, fill = T)
fwrite(LRdiff_res, iv.output.results.file)
## Reduced-Form
formula1 <- as.formula(paste0("DL.ln_quantity3 ~ DL.ln_sales_tax | ", FE, " | 0 | module_by_state"))
res1 <- felm(formula = formula1, data = all_pi[quantile == q],
weights = all_pi[quantile == q]$base.sales)
## attach results
res1.dt <- data.table(coef(summary(res1)), keep.rownames=T)
res1.dt[, outcome := "DL.ln_quantity3"]
res1.dt[, controls := FE]
res1.dt[, group := q]
res1.dt[, n.groups := n.g]
LRdiff_res <- rbind(LRdiff_res, res1.dt, fill = T)
fwrite(LRdiff_res, iv.output.results.file)
}
}
}
}
|
#' Adds an uiOutput and renders an enhanced rhandsontable html widget
#'
#' @description dq_handsontable_output adds a fluidRow containing a column with
#' the given width, ready to support a dq_handsontable.
#'
#' @param id id of the element
#' @param width width of the table in bootstrap columns
#' @param offset optional offset of the column
#'
#' @return dq_handsontable_output: fluidRow containing the output fields
#' @rdname dq_render_handsontable
#' @export
dq_handsontable_output <- function(id, width = 12L, offset = 0L) {
requireNamespace("rhandsontable", quietly = TRUE)
requireNamespace("shiny", quietly = TRUE)
if (is.null(id)) return(NULL)
ns <- dq_NS(id)
shiny::fluidRow(shiny::column(
width, offset = offset,
shiny::uiOutput(ns("filters")),
rhandsontable::rHandsontableOutput(id),
shiny::uiOutput(ns("pages")),
init()
))
}
#' Adds an uiOutput and renders an enhanced rhandsontable html widget
#'
#' @description dq_render_handsontable renders a rhandsontable into the given
#' uiOutput id with the given data and parameters. Can also contain several
#' filters to filter the data and a feature to split the table into several
#' pages with a given page size. The function will also add all needed
#' observeEvents to establish the required functionalities. If table is not
#' readOnly, all user inputs will automatically stored and updated independent
#' from any filters, sortings or pages.
#'
#' @param data data to show in the table, should be a data.frame'ish object, can
#' also be reactive(Val) or a reactiveValues object holding the data under the
#' given id (e.g. myReactiveValues[[id]] <- data). In case of reactiveVal(ues)
#' data will always be in sync with user inputs.
#' @param context the context used to specify all ui elements used for this
#' table, can be omitted which ends up in a randomly generated context
#' NOTE: this parameter is deprecated and will be removed soon
#' @param filters optional, adds filters for each column, types must be one of
#' "Text", "Select", "Range", "Date", "Auto" or "" (can be abbreviated) to add a
#' Text-, Select-, Range-, DateRange-, AutocompleteInput or none, vectors of
#' length one will add a filter of this type for each column and NA will try to
#' guess proper filters, can also contain nested lists specifying type and
#' initial value (e.g. list(list(type = "T", value = "init"), NA, "T", ...))
#' @param reset optional logical, specify whether to add a button to reset
#' filters and sort buttons to initial values or not
#' @param page_size optional integer, number of items per page, can be one of
#' 10, 25, 50, 100 or any other value(s) which will be added to this list, first
#' value will be used initially, NULL will disable paging at all
#' @param sorting optional, specify whether to add sort buttons for every column
#' or not, as normal rhandsontable sorting won't work properly when table is
#' paged, value can be logical of length one or a vector specifying the initial
#' sort "col"umn and "dir"ection e.g. c(dir="down", col="Colname")
#' @param columns optional, specify which columns to show in the table, useful
#' in combination with reactive values, which will still hold all the data
#' @param width_align optional boolean to align filter widths with hot columns,
#' should only be used with either horizontal_scroll, stretchH = "all" or a
#' table fitting in its output element
#' @param horizontal_scroll optional boolean to scroll the filter row according
#' to the hot table, especially useful for tables with many columns
#' @param table_param optional list, specify parameters to hand to rhandsontable
#' table element
#' @param cols_param optional list, specify parameters to hand to rhandsontable
#' cols elements
#' @param col_param optional list of lists to specify parameters to hand to
#' rhandsontable col elements
#' @param cell_param optional list of lists to specify parameters to hand to
#' rhandsontable cells
#' @param session shiny session object
#'
#' @return dq_render_handsontable: the given data
#' @author richard.kunze
#' @export
#' @seealso \code{\link[rhandsontable:rhandsontable]{rhandsontable}},
#' \code{\link[rhandsontable:hot_cols]{hot_cols}} and
#' \code{\link[rhandsontable:hot_col]{hot_col}}
#'
#' @examples ## Only run examples in interactive R sessions
#' if (interactive()) {
#'
#' library(shiny)
#' shinyApp(
#' ui = fluidPage(
#' dq_handsontable_output("randomTable", 9L)
#' ),
#' server = function(input, output, session) {
#' hw <- c("Hello", "my", "funny", "world!")
#' data <- data.frame(A = rep(hw, 500), B = hw[c(2,3,4,1)],
#' C = 1:500, D = Sys.Date() - 0:499, stringsAsFactors = FALSE)
#' dq_render_handsontable("randomTable", data,
#' filters = c("A", NA, NA, NA), sorting = c(dir = "up", col = "B"),
#' page_size = c(17L, 5L, 500L, 1000L), width_align = TRUE,
#' col_param = list(list(col = 1L, type = "dropdown", source = letters)),
#' cell_param = list(list(row = 2:9, col = 1:2, readOnly = TRUE))
#' )
#' }
#' )
#'
#' }
dq_render_handsontable <- function(
id, data, context = NULL, filters = "T", page_size = 25L, reset = TRUE,
sorting = NULL, columns = NULL, width_align = FALSE, horizontal_scroll = FALSE,
table_param = NULL, cols_param = NULL, col_param = NULL, cell_param = NULL,
session = shiny::getDefaultReactiveDomain()
) {
requireNamespace("rhandsontable", quietly = TRUE)
requireNamespace("shiny", quietly = TRUE)
# initial settings
if (is.null(id) || is.null(data) || is.null(session)) return()
if (!missing(context)) {
warning("Context parameter is deprecated and will be removed soon!")
}
if (length(columns) == 0L) columns <- TRUE
ns <- dq_NS(id)
app_input <- session$input
app_output <- session$output
session <- session$makeScope(id)
input <- session$input
output <- session$output
table_data <- data
dqv <- shiny::reactiveValues()
paged <- length(page_size) > 0L && any(page_size > 0L)
to_sort <- (length(sorting) > 0L && !identical(sorting, FALSE))
no_update <- FALSE
filter_values <- shiny::reactive(get_filters(input))
reduced <- shiny::reactive({
if (is.null(dqv$full)) return()
if (is.null(filters)) {
dqv$full[, columns, drop = FALSE]
} else {
f_vals <- filter_values()
if (length(f_vals) == 0) return()
l <- vapply(f_vals, length, 0L)
df <- text_filter(dqv$full[, columns, drop = FALSE], f_vals[l == 1L])
range_filter(df, f_vals[l == 2L])
}
})
sorted <- shiny::reactive({
if (to_sort && length(reduced())) sort_data(reduced(), dqv$sorting)
else reduced()
})
hot <- shiny::reactive({
if (paged && length(sorted())) {
sel <- as.integer(input$pageSize)
update_page(sorted(), input$pageNum, sel, session)
} else {
sorted()
}
})
if (shiny::is.reactivevalues(table_data)) {
shiny::observeEvent(table_data[[id]], {
if (no_update) {
no_update <<- FALSE
} else {
dqv$full <- as.data.frame(table_data[[id]])
if (!is.null(filters)) {
update_filters(dqv$full[, columns, drop = FALSE], filters, session)
}
}
}, ignoreInit = TRUE)
dqv$full <- as.data.frame(shiny::isolate(table_data[[id]]))
} else if (shiny::is.reactive(table_data)) {
shiny::observeEvent(table_data(), {
if (no_update) {
no_update <<- FALSE
} else {
dqv$full <- as.data.frame(table_data())
if (!is.null(filters)) {
update_filters(dqv$full[, columns, drop = FALSE], filters, session)
}
}
}, ignoreInit = TRUE)
dqv$full <- as.data.frame(shiny::isolate(table_data()))
} else {
dqv$full <- as.data.frame(table_data)
}
# define page_id which is needed for table rendering and reduce data to first page
sorting <- check_sorting(sorting, to_sort, shiny::isolate(names(dqv$full)))
# render filter row and add observer for filters
output$filters <- shiny::renderUI({
if (is.null(filters)) return()
# add names(dq$full) dependency
if (TRUE || is.null(names(dqv$full))) {
# correct filters according to (new?) dataset
filters <<- correct_filters(filters, shiny::isolate(dqv$full[, columns, drop = FALSE]))
}
filter_row(ns, dqv, filters, columns, sorting, reset)
})
# merge default table/cols parameters with given ones
table_default <- list(readOnly = FALSE, stretchH = "all", contextMenu = FALSE)
table_default <- append(table_param, table_default)
table_default <- table_default[!duplicated(names(table_default))]
cols_default <- list(colWidths = 1L, highlightCol = TRUE, dateFormat = "YYYY-MM-DD",
highlightRow = TRUE, manualColumnResize = TRUE)
cols_default <- append(cols_param, cols_default)
cols_default <- cols_default[!duplicated(names(cols_default))]
params <- list(table_default, cols_default, col_param, cell_param)
params[[1L]] <- add_scripts(params[[1L]], isTRUE(width_align),
isTRUE(horizontal_scroll))
# render dq_handsontable
app_output[[id]] <- rhandsontable::renderRHandsontable({
if (is.null(hot())) return()
params[[1L]]$data <- hot()
params[[2L]]$hot <- do.call(rhandsontable::rhandsontable, params[[1L]])
res <- do.call(rhandsontable::hot_cols, params[[2L]])
for (x in params[[3L]]) {
res <- do.call(rhandsontable::hot_col, append(list(res), x))
}
for (x in params[[4L]]) {
x$row <- match(x$row, rownames(hot()))
x$row <- x$row[!is.na(x$row)]
res <- do.call(dq_hot_cell, append(list(res), x))
}
res$dependencies <- append(res$dependencies, init())
res
})
# render paging row and add observer for inputs
page_sizes <- sort(unique(c(page_size, 10L, 25L, 50L, 100L)))
output$pages <- shiny::renderUI({
if (paged) paging_row(ns, page_size[1L], page_sizes)
})
output$maxPages <- shiny::renderText({
s <- as.integer(input$pageSize)
paste("of ", ceiling(max(NROW(reduced()) / s, 1L)))
})
# add sort buttons
if (to_sort) {
sorts <- add_sorting_observer(
input, session, dqv, page_size,
shiny::isolate(names(dqv$full[, columns, drop = FALSE]))
)
}
# add reset button
if (reset) {
shiny::observeEvent(input[["filter-reset"]], {
for (n in grep("^filter", names(input), value = TRUE)) {
shiny::updateTextInput(session, n, value = "")
reset_slider_input(n)
}
if (to_sort) {
dqv$sorting <- list(dir = "", col = "")
lapply(sorts, function(n) update_icon_state_button(session, n, value = 1L))
}
})
}
# add observer for table changes
shiny::observeEvent(app_input[[id]], {
if (!is.null(app_input[[id]]$changes$source)) {
row_names <- as.character(rownames(rhandsontable::hot_to_r(app_input[[id]])))
col_names <- names(hot())
lapply(app_input[[id]]$changes$changes, function(ch) {
row <- ch[[1L]] + 1L
col <- ch[[2L]] + 1L
dqv$full[row_names[row], col_names[col]] <- ch[[4L]]
})
if (shiny::is.reactivevalues(table_data)) {
no_update <<- TRUE
table_data[[id]] <- dqv$full
} else if (inherits(table_data, "reactiveVal")) {
no_update <<- TRUE
table_data(dqv$full)
}
if (!is.null(filters)) {
update_filters(dqv$full[, columns, drop = FALSE], filters, session)
}
}
}, ignoreInit = TRUE)
shiny::isolate(dqv$full)
}
#' @author richard.kunze
add_scripts <- function(params, width, scroll) {
if (width || scroll) {
params$afterRender <- htmlwidgets::JS(
"function() {",
" var hider = $(this.rootElement).find('.wtHider');",
" var $filter = $('#' + this.rootElement.id + '-filters');",
" $filter.css('overflow', 'hidden');",
" var row = $filter.find('.row');",
" row.width(hider.width());",
if (width) paste(
" var els = $filter.find('.form-group');",
" for (var i = 0; i < els.length; i++) {",
" $(els[i]).outerWidth($(this.getCell(0, i)).outerWidth());",
" }", sep = "\n"
),
"}"
)
}
if (scroll) {
params$afterScrollHorizontally <- htmlwidgets::JS(
"function() {
var $f = $('#' + this.rootElement.id + '-filters');
$f.scrollLeft($(this.rootElement).find('.wtHolder').scrollLeft());
}"
)
}
params
}
|
/old_dependencies/dqshiny/R/dq_handsontable.R
|
no_license
|
bigliolimatteo/time_series_modeling_app
|
R
| false
| false
| 12,756
|
r
|
#' Adds an uiOutput and renders an enhanced rhandsontable html widget
#'
#' @description dq_handsontable_output adds a fluidRow containing a column with
#' the given width, ready to support a dq_handsontable.
#'
#' @param id id of the element
#' @param width width of the table in bootstrap columns
#' @param offset optional offset of the column
#'
#' @return dq_handsontable_output: fluidRow containing the output fields
#' @rdname dq_render_handsontable
#' @export
dq_handsontable_output <- function(id, width = 12L, offset = 0L) {
requireNamespace("rhandsontable", quietly = TRUE)
requireNamespace("shiny", quietly = TRUE)
if (is.null(id)) return(NULL)
ns <- dq_NS(id)
shiny::fluidRow(shiny::column(
width, offset = offset,
shiny::uiOutput(ns("filters")),
rhandsontable::rHandsontableOutput(id),
shiny::uiOutput(ns("pages")),
init()
))
}
#' Adds an uiOutput and renders an enhanced rhandsontable html widget
#'
#' @description dq_render_handsontable renders a rhandsontable into the given
#' uiOutput id with the given data and parameters. Can also contain several
#' filters to filter the data and a feature to split the table into several
#' pages with a given page size. The function will also add all needed
#' observeEvents to establish the required functionalities. If table is not
#' readOnly, all user inputs will automatically stored and updated independent
#' from any filters, sortings or pages.
#'
#' @param data data to show in the table, should be a data.frame'ish object, can
#' also be reactive(Val) or a reactiveValues object holding the data under the
#' given id (e.g. myReactiveValues[[id]] <- data). In case of reactiveVal(ues)
#' data will always be in sync with user inputs.
#' @param context the context used to specify all ui elements used for this
#' table, can be omitted which ends up in a randomly generated context
#' NOTE: this parameter is deprecated and will be removed soon
#' @param filters optional, adds filters for each column, types must be one of
#' "Text", "Select", "Range", "Date", "Auto" or "" (can be abbreviated) to add a
#' Text-, Select-, Range-, DateRange-, AutocompleteInput or none, vectors of
#' length one will add a filter of this type for each column and NA will try to
#' guess proper filters, can also contain nested lists specifying type and
#' initial value (e.g. list(list(type = "T", value = "init"), NA, "T", ...))
#' @param reset optional logical, specify whether to add a button to reset
#' filters and sort buttons to initial values or not
#' @param page_size optional integer, number of items per page, can be one of
#' 10, 25, 50, 100 or any other value(s) which will be added to this list, first
#' value will be used initially, NULL will disable paging at all
#' @param sorting optional, specify whether to add sort buttons for every column
#' or not, as normal rhandsontable sorting won't work properly when table is
#' paged, value can be logical of length one or a vector specifying the initial
#' sort "col"umn and "dir"ection e.g. c(dir="down", col="Colname")
#' @param columns optional, specify which columns to show in the table, useful
#' in combination with reactive values, which will still hold all the data
#' @param width_align optional boolean to align filter widths with hot columns,
#' should only be used with either horizontal_scroll, stretchH = "all" or a
#' table fitting in its output element
#' @param horizontal_scroll optional boolean to scroll the filter row according
#' to the hot table, especially useful for tables with many columns
#' @param table_param optional list, specify parameters to hand to rhandsontable
#' table element
#' @param cols_param optional list, specify parameters to hand to rhandsontable
#' cols elements
#' @param col_param optional list of lists to specify parameters to hand to
#' rhandsontable col elements
#' @param cell_param optional list of lists to specify parameters to hand to
#' rhandsontable cells
#' @param session shiny session object
#'
#' @return dq_render_handsontable: the given data
#' @author richard.kunze
#' @export
#' @seealso \code{\link[rhandsontable:rhandsontable]{rhandsontable}},
#' \code{\link[rhandsontable:hot_cols]{hot_cols}} and
#' \code{\link[rhandsontable:hot_col]{hot_col}}
#'
#' @examples ## Only run examples in interactive R sessions
#' if (interactive()) {
#'
#' library(shiny)
#' shinyApp(
#' ui = fluidPage(
#' dq_handsontable_output("randomTable", 9L)
#' ),
#' server = function(input, output, session) {
#' hw <- c("Hello", "my", "funny", "world!")
#' data <- data.frame(A = rep(hw, 500), B = hw[c(2,3,4,1)],
#' C = 1:500, D = Sys.Date() - 0:499, stringsAsFactors = FALSE)
#' dq_render_handsontable("randomTable", data,
#' filters = c("A", NA, NA, NA), sorting = c(dir = "up", col = "B"),
#' page_size = c(17L, 5L, 500L, 1000L), width_align = TRUE,
#' col_param = list(list(col = 1L, type = "dropdown", source = letters)),
#' cell_param = list(list(row = 2:9, col = 1:2, readOnly = TRUE))
#' )
#' }
#' )
#'
#' }
dq_render_handsontable <- function(
id, data, context = NULL, filters = "T", page_size = 25L, reset = TRUE,
sorting = NULL, columns = NULL, width_align = FALSE, horizontal_scroll = FALSE,
table_param = NULL, cols_param = NULL, col_param = NULL, cell_param = NULL,
session = shiny::getDefaultReactiveDomain()
) {
requireNamespace("rhandsontable", quietly = TRUE)
requireNamespace("shiny", quietly = TRUE)
# initial settings
if (is.null(id) || is.null(data) || is.null(session)) return()
if (!missing(context)) {
warning("Context parameter is deprecated and will be removed soon!")
}
if (length(columns) == 0L) columns <- TRUE
ns <- dq_NS(id)
app_input <- session$input
app_output <- session$output
session <- session$makeScope(id)
input <- session$input
output <- session$output
table_data <- data
dqv <- shiny::reactiveValues()
paged <- length(page_size) > 0L && any(page_size > 0L)
to_sort <- (length(sorting) > 0L && !identical(sorting, FALSE))
no_update <- FALSE
filter_values <- shiny::reactive(get_filters(input))
reduced <- shiny::reactive({
if (is.null(dqv$full)) return()
if (is.null(filters)) {
dqv$full[, columns, drop = FALSE]
} else {
f_vals <- filter_values()
if (length(f_vals) == 0) return()
l <- vapply(f_vals, length, 0L)
df <- text_filter(dqv$full[, columns, drop = FALSE], f_vals[l == 1L])
range_filter(df, f_vals[l == 2L])
}
})
sorted <- shiny::reactive({
if (to_sort && length(reduced())) sort_data(reduced(), dqv$sorting)
else reduced()
})
hot <- shiny::reactive({
if (paged && length(sorted())) {
sel <- as.integer(input$pageSize)
update_page(sorted(), input$pageNum, sel, session)
} else {
sorted()
}
})
if (shiny::is.reactivevalues(table_data)) {
shiny::observeEvent(table_data[[id]], {
if (no_update) {
no_update <<- FALSE
} else {
dqv$full <- as.data.frame(table_data[[id]])
if (!is.null(filters)) {
update_filters(dqv$full[, columns, drop = FALSE], filters, session)
}
}
}, ignoreInit = TRUE)
dqv$full <- as.data.frame(shiny::isolate(table_data[[id]]))
} else if (shiny::is.reactive(table_data)) {
shiny::observeEvent(table_data(), {
if (no_update) {
no_update <<- FALSE
} else {
dqv$full <- as.data.frame(table_data())
if (!is.null(filters)) {
update_filters(dqv$full[, columns, drop = FALSE], filters, session)
}
}
}, ignoreInit = TRUE)
dqv$full <- as.data.frame(shiny::isolate(table_data()))
} else {
dqv$full <- as.data.frame(table_data)
}
# define page_id which is needed for table rendering and reduce data to first page
sorting <- check_sorting(sorting, to_sort, shiny::isolate(names(dqv$full)))
# render filter row and add observer for filters
output$filters <- shiny::renderUI({
if (is.null(filters)) return()
# add names(dq$full) dependency
if (TRUE || is.null(names(dqv$full))) {
# correct filters according to (new?) dataset
filters <<- correct_filters(filters, shiny::isolate(dqv$full[, columns, drop = FALSE]))
}
filter_row(ns, dqv, filters, columns, sorting, reset)
})
# merge default table/cols parameters with given ones
table_default <- list(readOnly = FALSE, stretchH = "all", contextMenu = FALSE)
table_default <- append(table_param, table_default)
table_default <- table_default[!duplicated(names(table_default))]
cols_default <- list(colWidths = 1L, highlightCol = TRUE, dateFormat = "YYYY-MM-DD",
highlightRow = TRUE, manualColumnResize = TRUE)
cols_default <- append(cols_param, cols_default)
cols_default <- cols_default[!duplicated(names(cols_default))]
params <- list(table_default, cols_default, col_param, cell_param)
params[[1L]] <- add_scripts(params[[1L]], isTRUE(width_align),
isTRUE(horizontal_scroll))
# render dq_handsontable
app_output[[id]] <- rhandsontable::renderRHandsontable({
if (is.null(hot())) return()
params[[1L]]$data <- hot()
params[[2L]]$hot <- do.call(rhandsontable::rhandsontable, params[[1L]])
res <- do.call(rhandsontable::hot_cols, params[[2L]])
for (x in params[[3L]]) {
res <- do.call(rhandsontable::hot_col, append(list(res), x))
}
for (x in params[[4L]]) {
x$row <- match(x$row, rownames(hot()))
x$row <- x$row[!is.na(x$row)]
res <- do.call(dq_hot_cell, append(list(res), x))
}
res$dependencies <- append(res$dependencies, init())
res
})
# render paging row and add observer for inputs
page_sizes <- sort(unique(c(page_size, 10L, 25L, 50L, 100L)))
output$pages <- shiny::renderUI({
if (paged) paging_row(ns, page_size[1L], page_sizes)
})
output$maxPages <- shiny::renderText({
s <- as.integer(input$pageSize)
paste("of ", ceiling(max(NROW(reduced()) / s, 1L)))
})
# add sort buttons
if (to_sort) {
sorts <- add_sorting_observer(
input, session, dqv, page_size,
shiny::isolate(names(dqv$full[, columns, drop = FALSE]))
)
}
# add reset button
if (reset) {
shiny::observeEvent(input[["filter-reset"]], {
for (n in grep("^filter", names(input), value = TRUE)) {
shiny::updateTextInput(session, n, value = "")
reset_slider_input(n)
}
if (to_sort) {
dqv$sorting <- list(dir = "", col = "")
lapply(sorts, function(n) update_icon_state_button(session, n, value = 1L))
}
})
}
# add observer for table changes
shiny::observeEvent(app_input[[id]], {
if (!is.null(app_input[[id]]$changes$source)) {
row_names <- as.character(rownames(rhandsontable::hot_to_r(app_input[[id]])))
col_names <- names(hot())
lapply(app_input[[id]]$changes$changes, function(ch) {
row <- ch[[1L]] + 1L
col <- ch[[2L]] + 1L
dqv$full[row_names[row], col_names[col]] <- ch[[4L]]
})
if (shiny::is.reactivevalues(table_data)) {
no_update <<- TRUE
table_data[[id]] <- dqv$full
} else if (inherits(table_data, "reactiveVal")) {
no_update <<- TRUE
table_data(dqv$full)
}
if (!is.null(filters)) {
update_filters(dqv$full[, columns, drop = FALSE], filters, session)
}
}
}, ignoreInit = TRUE)
shiny::isolate(dqv$full)
}
#' @author richard.kunze
add_scripts <- function(params, width, scroll) {
if (width || scroll) {
params$afterRender <- htmlwidgets::JS(
"function() {",
" var hider = $(this.rootElement).find('.wtHider');",
" var $filter = $('#' + this.rootElement.id + '-filters');",
" $filter.css('overflow', 'hidden');",
" var row = $filter.find('.row');",
" row.width(hider.width());",
if (width) paste(
" var els = $filter.find('.form-group');",
" for (var i = 0; i < els.length; i++) {",
" $(els[i]).outerWidth($(this.getCell(0, i)).outerWidth());",
" }", sep = "\n"
),
"}"
)
}
if (scroll) {
params$afterScrollHorizontally <- htmlwidgets::JS(
"function() {
var $f = $('#' + this.rootElement.id + '-filters');
$f.scrollLeft($(this.rootElement).find('.wtHolder').scrollLeft());
}"
)
}
params
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/projectKNNs.R
\name{projectKNNs}
\alias{projectKNNs}
\title{Project a distance matrix into a lower-dimensional space.}
\usage{
projectKNNs(wij, dim = 2, sgd_batches = NULL, M = 5, gamma = 7,
alpha = 1, rho = 1, coords = NULL, useDegree = FALSE,
momentum = NULL, seed = NULL, threads = NULL,
verbose = getOption("verbose", TRUE))
}
\arguments{
\item{wij}{A symmetric sparse matrix of edge weights, in C-compressed format, as created with the \code{Matrix} package.}
\item{dim}{The number of dimensions for the projection space.}
\item{sgd_batches}{The number of edges to process during SGD. Defaults to a value set based on the size of the dataset. If the parameter given is
between \code{0} and \code{1}, the default value will be multiplied by the parameter.}
\item{M}{The number of negative edges to sample for each positive edge.}
\item{gamma}{The strength of the force pushing non-neighbor nodes apart.}
\item{alpha}{Hyperparameter used in the default distance function, \eqn{1 / (1 + \alpha \dot ||y_i - y_j||^2)}. The function relates the distance
between points in the low-dimensional projection to the likelihood that the two points are nearest neighbors. Increasing \eqn{\alpha} tends
to push nodes and their neighbors closer together; decreasing \eqn{\alpha} produces a broader distribution. Setting \eqn{\alpha} to zero
enables the alternative distance function. \eqn{\alpha} below zero is meaningless.}
\item{rho}{Initial learning rate.}
\item{coords}{An initialized coordinate matrix.}
\item{useDegree}{Whether to use vertex degree to determine weights in negative sampling (if \code{TRUE}), or the sum of the vertex's edges (the default). See Notes.}
\item{momentum}{If not \code{NULL} (the default), SGD with momentum is used, with this multiplier, which must be between 0 and 1. Note that
momentum can drastically speed-up training time, at the cost of additional memory consumed.}
\item{seed}{Random seed to be passed to the C++ functions; sampled from hardware entropy pool if \code{NULL} (the default).
Note that if the seed is not \code{NULL} (the default), the maximum number of threads will be set to 1 in phases of the algorithm
that would otherwise be non-deterministic.}
\item{threads}{The maximum number of threads to spawn. Determined automatically if \code{NULL} (the default).}
\item{verbose}{Verbosity}
}
\value{
A dense [N,D] matrix of the coordinates projecting the w_ij matrix into the lower-dimensional space.
}
\description{
Takes as input a sparse matrix of the edge weights connecting each node to its nearest neighbors, and outputs
a matrix of coordinates embedding the inputs in a lower-dimensional space.
}
\details{
The algorithm attempts to estimate a \code{dim}-dimensional embedding using stochastic gradient descent and
negative sampling.
The objective function is: \deqn{ O = \sum_{(i,j)\in E} w_{ij} (\log f(||p(e_{ij} = 1||) + \sum_{k=1}^{M} E_{jk~P_{n}(j)} \gamma \log(1 - f(||p(e_{ij_k} - 1||)))}
where \eqn{f()} is a probabilistic function relating the distance between two points in the low-dimensional projection space,
and the probability that they are nearest neighbors.
The default probabilistic function is \eqn{1 / (1 + \alpha \dot ||x||^2)}. If \eqn{\alpha} is set to zero,
an alternative probabilistic function, \eqn{1 / (1 + \exp(x^2))} will be used instead.
Note that the input matrix should be symmetric. If any columns in the matrix are empty, the function will fail.
}
\note{
If specified, \code{seed} is passed to the C++ and used to initialize the random number generator. This will not, however, be
sufficient to ensure reproducible results, because the initial coordinate matrix is generated using the \code{R} random number generator.
To ensure reproducibility, call \code{\link[base]{set.seed}} before calling this function, or pass it a pre-allocated coordinate matrix.
The original paper called for weights in negative sampling to be calculated according to the degree of each vertex, the number of edges
connecting to the vertex. The reference implementation, however, uses the sum of the weights of the edges to each vertex. In experiments, the
difference was imperceptible with small (MNIST-size) datasets, but the results seems aesthetically preferrable using degree. The default
is to use the edge weights, consistent with the reference implementation.
}
\examples{
\dontrun{
data(CO2)
CO2$Plant <- as.integer(CO2$Plant)
CO2$Type <- as.integer(CO2$Type)
CO2$Treatment <- as.integer(CO2$Treatment)
co <- scale(as.matrix(CO2))
# Very small datasets often produce a warning regarding the alias table. This is safely ignored.
suppressWarnings(vis <- largeVis(t(co), K = 20, sgd_batches = 1, threads = 2))
suppressWarnings(coords <- projectKNNs(vis$wij, threads = 2))
plot(t(coords))
}
}
|
/man/projectKNNs.Rd
|
no_license
|
idroz/largeVis-R
|
R
| false
| true
| 4,865
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/projectKNNs.R
\name{projectKNNs}
\alias{projectKNNs}
\title{Project a distance matrix into a lower-dimensional space.}
\usage{
projectKNNs(wij, dim = 2, sgd_batches = NULL, M = 5, gamma = 7,
alpha = 1, rho = 1, coords = NULL, useDegree = FALSE,
momentum = NULL, seed = NULL, threads = NULL,
verbose = getOption("verbose", TRUE))
}
\arguments{
\item{wij}{A symmetric sparse matrix of edge weights, in C-compressed format, as created with the \code{Matrix} package.}
\item{dim}{The number of dimensions for the projection space.}
\item{sgd_batches}{The number of edges to process during SGD. Defaults to a value set based on the size of the dataset. If the parameter given is
between \code{0} and \code{1}, the default value will be multiplied by the parameter.}
\item{M}{The number of negative edges to sample for each positive edge.}
\item{gamma}{The strength of the force pushing non-neighbor nodes apart.}
\item{alpha}{Hyperparameter used in the default distance function, \eqn{1 / (1 + \alpha \dot ||y_i - y_j||^2)}. The function relates the distance
between points in the low-dimensional projection to the likelihood that the two points are nearest neighbors. Increasing \eqn{\alpha} tends
to push nodes and their neighbors closer together; decreasing \eqn{\alpha} produces a broader distribution. Setting \eqn{\alpha} to zero
enables the alternative distance function. \eqn{\alpha} below zero is meaningless.}
\item{rho}{Initial learning rate.}
\item{coords}{An initialized coordinate matrix.}
\item{useDegree}{Whether to use vertex degree to determine weights in negative sampling (if \code{TRUE}), or the sum of the vertex's edges (the default). See Notes.}
\item{momentum}{If not \code{NULL} (the default), SGD with momentum is used, with this multiplier, which must be between 0 and 1. Note that
momentum can drastically speed-up training time, at the cost of additional memory consumed.}
\item{seed}{Random seed to be passed to the C++ functions; sampled from hardware entropy pool if \code{NULL} (the default).
Note that if the seed is not \code{NULL} (the default), the maximum number of threads will be set to 1 in phases of the algorithm
that would otherwise be non-deterministic.}
\item{threads}{The maximum number of threads to spawn. Determined automatically if \code{NULL} (the default).}
\item{verbose}{Verbosity}
}
\value{
A dense [N,D] matrix of the coordinates projecting the w_ij matrix into the lower-dimensional space.
}
\description{
Takes as input a sparse matrix of the edge weights connecting each node to its nearest neighbors, and outputs
a matrix of coordinates embedding the inputs in a lower-dimensional space.
}
\details{
The algorithm attempts to estimate a \code{dim}-dimensional embedding using stochastic gradient descent and
negative sampling.
The objective function is: \deqn{ O = \sum_{(i,j)\in E} w_{ij} (\log f(||p(e_{ij} = 1||) + \sum_{k=1}^{M} E_{jk~P_{n}(j)} \gamma \log(1 - f(||p(e_{ij_k} - 1||)))}
where \eqn{f()} is a probabilistic function relating the distance between two points in the low-dimensional projection space,
and the probability that they are nearest neighbors.
The default probabilistic function is \eqn{1 / (1 + \alpha \dot ||x||^2)}. If \eqn{\alpha} is set to zero,
an alternative probabilistic function, \eqn{1 / (1 + \exp(x^2))} will be used instead.
Note that the input matrix should be symmetric. If any columns in the matrix are empty, the function will fail.
}
\note{
If specified, \code{seed} is passed to the C++ and used to initialize the random number generator. This will not, however, be
sufficient to ensure reproducible results, because the initial coordinate matrix is generated using the \code{R} random number generator.
To ensure reproducibility, call \code{\link[base]{set.seed}} before calling this function, or pass it a pre-allocated coordinate matrix.
The original paper called for weights in negative sampling to be calculated according to the degree of each vertex, the number of edges
connecting to the vertex. The reference implementation, however, uses the sum of the weights of the edges to each vertex. In experiments, the
difference was imperceptible with small (MNIST-size) datasets, but the results seems aesthetically preferrable using degree. The default
is to use the edge weights, consistent with the reference implementation.
}
\examples{
\dontrun{
data(CO2)
CO2$Plant <- as.integer(CO2$Plant)
CO2$Type <- as.integer(CO2$Type)
CO2$Treatment <- as.integer(CO2$Treatment)
co <- scale(as.matrix(CO2))
# Very small datasets often produce a warning regarding the alias table. This is safely ignored.
suppressWarnings(vis <- largeVis(t(co), K = 20, sgd_batches = 1, threads = 2))
suppressWarnings(coords <- projectKNNs(vis$wij, threads = 2))
plot(t(coords))
}
}
|
###############################################
### Resultat Generale #######
###############################################
###############################################
### 1- HN_GARCH_ESS_Returns #######
###############################################
##a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5]
Sol_1=C(2.697046e-13, 2.250781e-05, 8.973734e+00, 8.793940e-01, 1.037815e+00)
se_1=C(1.404920e-02, 1.468525e-06, 1.405362e-02, 1.450208e-02, 1.423384e-02)
> RMSE1$in
[1] 0.05742586
> RMSE2$out
[1] 0.07281952
> RMSE1$we
[1] 0.06380394
> MPE
[1] 2.155738
> MAE
[1] 2.232735
> MAE2
[1] 2.232735
> Vrmse
[1] 51.445
###############################################
### 2- HN_GARCH_ESS_Returns_VIX #######
###############################################
### a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
Sol_2=C(3.285081e-05, 3.600148e-04, 9.258200e+00, 2.510017e-01, 1.353767e-06, 9.718883e-01)
se_2=C(2.842440e-06, 5.193550e-05, 2.021788e-03, 2.230861e-03, 2.061679e-06, 2.021286e-03)
> RMSE$in
[1] 0.05629081
> RMSE$out
[1] 0.06630924
> RMSE$we
[1] 0.05866588
> MPE
[1] 1.768878
> MAE
[1] 1.919463
> MAE2
[1] 1.919463
> Vrmse
[1] 44.58743
###############################################
### 3- HN_GARCH_ESS_Returns_Option #######
###############################################
##a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5]
Sol_3=c(1.854299e-04, 3.345238e-04 ,0.142406e+01 ,1.124012e-03 ,6.573458e-01)
# RMSE
> RMSE1$in
[1] 0.05589917
> RMSE1$out
[1] 0.0651246
> RMSE1$we
[1] 0.0580351
> MPE
[1] 1.904807
> MAE
[1] 2.027195
> MAE2
[1] 2.027195
> Vrmse
[1] 46.89971
###############################################
### 4- HN_GARCH_Qua_Returns_VIX #######
###############################################
### a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; Pi=para_h[6] ; ro=para_h[7]
Sol_4=c(1.587016e-06 ,2.219773e-04 ,7.262716e+00 ,5.113005e-01 ,1.810804e+00 ,1.001000e+00,8.940039e-01)
se_4=C(6.448236e-08, 6.448125e-04, 6.396021e-04, 6.396128e-04, 6.396022e-04, 6.396025e-04, 6.396021e-04)
# RMSE
> RMSE$in
[1] 0.05454388
> RMSE$out
[1] 0.06336831
> RMSE$we
[1] 0.05762633
> MPE
[1] 1.494109
> MAE
[1] 1.673732
> MAE2
[1] 1.673732
> Vrmse
[1] 34.85252
###############################################
### 5- HN_GARCH_Qua_Returns_OPtion #######
###############################################
##a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; Pi=para_h[6]
Sol_4=c(2.278319e-05, 1.969895e-04, 9.964591e+00, 1.419832e-01, 1.723114e-03, 1.272333e+00)
# RMSE
> RMSE$in
[1] 0.05354098
> RMSE$out
[1] 0.06270406
> RMSE$we
[1] 0.05734967
> MPE
[1] 1.176335
> MAE
[1] 1.732023
> MAE2
[1] 1.732023
> Vrmse
[1] 39.29519
######################################
### 6- IG_GARCH_Returns ######
######################################
## w=para_h[1]; b=para_h[2]; a=para_h[3]; c= para_h[4]; neta=para_h[5] ; nu=para_h[6];
Sol_6=c(9.824101e-06, 1.215742e-03, 3.319843e+03, 4.543030e-05, -7.531343e-03, 1.258401e+02)
se_6=C(2.129407e-07, 2.572956e-04 ,2.196540e-04 ,1.167541e-07, 2.008657e-05, 2.196547e-04)
# RMSE
> RMSE1$in
[1] 0.03436143
> RMSE2$out
[1] 0.0454152
> RMSE2$we
[1] 0.04040568
> MPE
[1] 1.587222
> MAE
[1] 1.587713
> MAE2
[1] 1.587713
> Vrmse
[1] 38.32732
###########################################
### 7- IG_GARCH_Returns_VIX ######
###########################################
## w=para_h[1]; b=para_h[2]; a=para_h[3]; c= para_h[4]; neta=para_h[5] ; nu=para_h[6];
Sol_7=c(9.427303e-06, 2.051123e-03 , 3.317425e+03 , 4.725221e-05 ,-7.973112e-03 , 1.258399e+02 ,9.946611e-01)
se_7=C(9.356137e-05, 1.119981e-04, 9.356927e-05, 1.434307e-06, 9.359197e-05, 9.356968e-05, 9.357352e-05)
# RMSE
> RMSE1$in
[1] 0.03381406
> RMSE2$out
[1] 0.04287557
> RMSE2$we
[1] 0.03891758
> MPE
[1] 1.166707
> MAE
[1] 1.210328
> MAE2
[1] 1.210328
> Vrmse
[1] 28.88739
#####################################################
### 8- IG_GARCH_Returns_option #######
#####################################################
##w=para_h[1]; b=para_h[2]; a=para_h[3]; c= para_h[4]; neta=para_h[5] ; nu=para_h[6]
Sol_8=c(3.249840e-06 , 0.077376e-03 ,3.3013217e+03 , 5.04031e-05, -8.278782e-03 , 1.25631e+02 )
se_8=C(1.32858e-07, 7.404556e-04, 2.079540e-04 , 3.85941e-07, 3.28657e-05, 1.27947e-04)
# RMSE
> RMSE1$in
[1] 0.03335749
> RMSE2$out
[1] 0.04190233
> RMSE2$we
[1] 0.0383845
> MPE
[1] 1.308704
> MAE
[1] 1.326823
> MAE2
[1] 1.326823
> Vrmse
[1] 31.77595
##################################################
### 9- IG_GARCH_Returns_VIX #######
##################################################
# w=para_h[1]; b=para_h[2]; a=para_h[3]; c= para_h[4]; neta=para_h[5] ; nu=para_h[6] ; PI=para_h[7] ; ro=para_h[8]
Sol_9=c(1.003502e-05 , 2.417558e-03 , 3.317749e+03, 4.525727e-05, -7.519624e-03 , 1.258809e+02, 1.100000e+00, 9.959896e-01)
se_9=C(1.628877e-07, 9.720065e-05, 8.309831e-05, 3.770727e-07, 8.312097e-05, 8.309859e-05, 8.309834e-05, 8.763270e-05)
# RMSE
> RMSE1$in
[1] 0.03379284
> RMSE2$out
[1] 0.0412024
> RMSE2$we
[1] 0.0365899
> MPE
[1] 1.113527
> MAE
[1] 1.16912
> MAE2
[1] 1.16912
> Vrmse
[1] 27.8605
##################################################
### 10- IG_GARCH_Returns_option #######
##################################################
# w=para_h[1]; b=para_h[2]; a=para_h[3]; c= para_h[4]; neta=para_h[5] ; nu=para_h[6] ; PI=para_h[7]
Sol_10=c(1.010747e-05 ,2.282316e-03, 3.317425e+03 , 4.514664e-05 ,-7.499894e-03, 1.258394e+02 ,1.100010e+00)
se_10=C(4.68491490e-06, 1.6817655e-07, 4.6817651e-06, 4.81465164e-06, 9.681465416e-03, 7.99451e-05, 2.210526e-06)
# RMSE
> RMSE1$in
[1] 0.03327742
> RMSE2$out
[1] 0.04005787
> RMSE2$we
[1] 0.03640032
> MPE
[1] 1.231865
> MAE
[1] 1.268672
> MAE2
[1] 1.268672
> Vrmse
[1] 30.19202
#############################################
### 11- GJR_GARCH_Returns #######
#############################################
##a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5]
Sol_11=c(1.445949e-05, 3.107684e-01, 1.055816e-01, 6.311811e-01, 4.208730e-03)
se_11=C(1.698409e-06, 1.139433e-02, 9.096206e-03, 1.105324e-02, 8.979822e-03)
# RMSE
> RMSE1$in
[1] 0.0575311
> RMSE2$out
[1] 0.07227221
> RMSE2$we
[1] 0.06368361
> MPE
[1] 0.020954
> MAE
[1] 0.021782
> MAE2
[1] 0.021782
> Vrmse
[1] 18.45451
> MPE
[1] 0.4439605
> MAE
[1] 0.6298235
> MAE2
[1] 0.6298235
> Vrmse
[1] 18.45451
#############################################
### 12- GJR_GARCH_Returns_VIX #######
#############################################
##a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
Sol_12=c(4.966114e-06, 1.240920e-01, 2.314276e-02, 8.504266e-01, 1.989254e-01, 8.924053e-01)
se_12=C(1.233970e-06, 3.752923e-03, 4.757924e-03, 3.692452e-03, 3.462100e-03, 3.419816e-03)
# RMSE
> RMSE1$in
[1] 0.055115937
> RMSE2$out
[1] 0.06619975
> RMSE2$we
[1] 0.0631625
> MPE
[1] -0.3462374
> MAE
[1] 0.4104046
> MAE2
[1] 0.4104046
> Vrmse
[1] 13.94554
#######################################
### 13- NGARCH_Returns #######
#######################################
Sol_13=c(2.027476e-05 , 9.992334e-01 , 7.237317e-09 , 1.025128e+00, -1.338783e-01, 6.037568e+00, -2.645889e+00)
se_13=C(4.932143e-06, 1.857258e-04, 1.533786e-08, 1.531803e-04, 1.531834e-04, 1.531803e-04, 1.531803e-04)
# RMSE
> RMSE1$in
[1] 0.05661986
> RMSE2$out
[1] 0.07257198
> RMSE2$we
[1] 0.06366201
> MPE
[1] 0.4439605
> MAE
[1] 0.6298235
> MAE2
[1] 0.6298235
> Vrmse
[1] 18.45451
#######################################
### 14- NGARCH_Ret-vix #######
#######################################
## a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7] ; ro=para_h[8]## ; c=para_h[5]; d=para_h[6] ; ro=para_h[8]
Sol_12=c(4.705257e-06, 7.957262e-01, 6.170762e-02, 1.394690e+00, 5.144851e-02, 1.795145e+00 ,-2.685911e-01, 9.541714e-01)
se_12=C(2.507650e-07, 6.415229e-04, 5.511385e-04, 5.550372e-04, 5.545759e-04, 5.367108e-04, 5.367107e-04, 5.386347e-04)
# RMSE
> RMSE1$in
[1] 0.05529999
> RMSE2$out
[1] 0.066078389
> RMSE2$we
[1] 0.06307517
> MPE
[1] -0.5589676
> MAE
[1] 0.566944
> MAE2
[1] 0.566944
> Vrmse
[1] 18.14527
#########################################
### 15-NIG_HN_GARCH_ret #######
#########################################
### a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
Sol_15_Vol=c(1.862873e-12, 2.250036e-05, 4.804450e+01, 8.274026e-01 ,4.921045e+00)
se_15_Vol=C(2.091005e-12, 2.091005e-08, 2.091004e-08, 2.091004e-08, 2.091004e-08)
Sol_15_Dis=c(1.24017703, -0.03604831, 1.42421603, 1.78017616)
se_15_Dis=C(0.099042750, 0.008793694 , 0.00135326644, 0.0087537762021)
# RMSE
> RMSE1$in
[1] 0.05351607
> RMSE2$out
[1] 0.07095312
> RMSE2$we
[1] 0.06234915
> MPE
[1] -0.5589676
> MAE
[1] 0.566944
> MAE2
[1] 0.566944
> Vrmse
[1] 18.14527
#########################################
### 16-NIG_HN_GARCH_ret_vix #######
#########################################
Sol_16_Vol=c(3.788371e-13, 1.520721e-06, 4.651210e+02, 6.620008e-01, 4.400007e-01, 9.646967e-01)
se_16_Vol=C(3.626398e-13, 3.626398e-09, 3.626376e-09, 3.626376e-09, 3.626376e-09, 3.626376e-09)
Sol_16_Dis=c(1.337329911, -0.004432882 , 1.551758651 , 1.424519069)
se_16_Dis=C(0.001101934 , 0.007270608 ,0.00148702057, 0.002726753)
# RMSE
> RMSE1$in
[1] 0.05226903
> RMSE2$out
[1] 0.06419075
> RMSE2$we
[1] 0.06195388
> MPE
[1] -0.5589676
> MAE
[1] 0.566944
> MAE2
[1] 0.566944
> Vrmse
[1] 18.14527
#########################################
### 17-NIG_HN_GARCH_ret_vix #######
#########################################
Sol_17_Dis=c(0.541743068,-0.005313759,0.839760873,1.792759694)
se_17_Dis=C( 0.025217477, 0.00770, 0.00680 , 0.00355)
# RMSE
> RMSE1$in
[1] 0.05279462
> RMSE2$out
[1] 0.06430162
> RMSE2$we
[1] 0.06202299
> MPE
[1] -0.5589676
> MAE
[1] 0.566944
> MAE2
[1] 0.566944
> Vrmse
[1] 18.14527
#############################################
### 18- GJR_GARCH_Returns #######
#############################################
##a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5]
Sol_18_dis=c(1.270250581, -0.002520285 , 1.620497081 , 1.973447871)
se_18_dis=C(0.00084284, 0.00448010,0.00305425,0.00952546)
# RMSE
> RMSE1$in
[1] 0.05486513
> RMSE2$out
[1] 0.07194120
> RMSE2$we
[1] 0.06276413
> MPE
[1] 0.4439605
> MAE
[1] 0.6298235
> MAE2
[1] 0.6298235
> Vrmse
[1] 18.45451
#############################################
### 19- GJR_GARCH_Returns_VIX #######
#############################################
##a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
Sol_19=c(1.269943539, -0.002488772, 1.620073612, 2.088112106)
se_19=C(0.0009133685822, 0.0000913399, 0.0008702860938, 0.0000870)
# RMSE
> RMSE1$in
[1] 0.0533685822
> RMSE2$out
[1] 0.0632860938
> RMSE2$we
[1] 0.0612541291
> MPE
[1] -0.3462374
> MAE
[1] 0.4104046
> MAE2
[1] 0.4104046
> Vrmse
[1] 13.94554
|
/Simulation_juin2018/Resultat_all.R
|
no_license
|
Fanirisoa/dynamic_pricing
|
R
| false
| false
| 11,831
|
r
|
###############################################
### Resultat Generale #######
###############################################
###############################################
### 1- HN_GARCH_ESS_Returns #######
###############################################
##a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5]
Sol_1=C(2.697046e-13, 2.250781e-05, 8.973734e+00, 8.793940e-01, 1.037815e+00)
se_1=C(1.404920e-02, 1.468525e-06, 1.405362e-02, 1.450208e-02, 1.423384e-02)
> RMSE1$in
[1] 0.05742586
> RMSE2$out
[1] 0.07281952
> RMSE1$we
[1] 0.06380394
> MPE
[1] 2.155738
> MAE
[1] 2.232735
> MAE2
[1] 2.232735
> Vrmse
[1] 51.445
###############################################
### 2- HN_GARCH_ESS_Returns_VIX #######
###############################################
### a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
Sol_2=C(3.285081e-05, 3.600148e-04, 9.258200e+00, 2.510017e-01, 1.353767e-06, 9.718883e-01)
se_2=C(2.842440e-06, 5.193550e-05, 2.021788e-03, 2.230861e-03, 2.061679e-06, 2.021286e-03)
> RMSE$in
[1] 0.05629081
> RMSE$out
[1] 0.06630924
> RMSE$we
[1] 0.05866588
> MPE
[1] 1.768878
> MAE
[1] 1.919463
> MAE2
[1] 1.919463
> Vrmse
[1] 44.58743
###############################################
### 3- HN_GARCH_ESS_Returns_Option #######
###############################################
##a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5]
Sol_3=c(1.854299e-04, 3.345238e-04 ,0.142406e+01 ,1.124012e-03 ,6.573458e-01)
# RMSE
> RMSE1$in
[1] 0.05589917
> RMSE1$out
[1] 0.0651246
> RMSE1$we
[1] 0.0580351
> MPE
[1] 1.904807
> MAE
[1] 2.027195
> MAE2
[1] 2.027195
> Vrmse
[1] 46.89971
###############################################
### 4- HN_GARCH_Qua_Returns_VIX #######
###############################################
### a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; Pi=para_h[6] ; ro=para_h[7]
Sol_4=c(1.587016e-06 ,2.219773e-04 ,7.262716e+00 ,5.113005e-01 ,1.810804e+00 ,1.001000e+00,8.940039e-01)
se_4=C(6.448236e-08, 6.448125e-04, 6.396021e-04, 6.396128e-04, 6.396022e-04, 6.396025e-04, 6.396021e-04)
# RMSE
> RMSE$in
[1] 0.05454388
> RMSE$out
[1] 0.06336831
> RMSE$we
[1] 0.05762633
> MPE
[1] 1.494109
> MAE
[1] 1.673732
> MAE2
[1] 1.673732
> Vrmse
[1] 34.85252
###############################################
### 5- HN_GARCH_Qua_Returns_OPtion #######
###############################################
##a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; Pi=para_h[6]
Sol_4=c(2.278319e-05, 1.969895e-04, 9.964591e+00, 1.419832e-01, 1.723114e-03, 1.272333e+00)
# RMSE
> RMSE$in
[1] 0.05354098
> RMSE$out
[1] 0.06270406
> RMSE$we
[1] 0.05734967
> MPE
[1] 1.176335
> MAE
[1] 1.732023
> MAE2
[1] 1.732023
> Vrmse
[1] 39.29519
######################################
### 6- IG_GARCH_Returns ######
######################################
## w=para_h[1]; b=para_h[2]; a=para_h[3]; c= para_h[4]; neta=para_h[5] ; nu=para_h[6];
Sol_6=c(9.824101e-06, 1.215742e-03, 3.319843e+03, 4.543030e-05, -7.531343e-03, 1.258401e+02)
se_6=C(2.129407e-07, 2.572956e-04 ,2.196540e-04 ,1.167541e-07, 2.008657e-05, 2.196547e-04)
# RMSE
> RMSE1$in
[1] 0.03436143
> RMSE2$out
[1] 0.0454152
> RMSE2$we
[1] 0.04040568
> MPE
[1] 1.587222
> MAE
[1] 1.587713
> MAE2
[1] 1.587713
> Vrmse
[1] 38.32732
###########################################
### 7- IG_GARCH_Returns_VIX ######
###########################################
## w=para_h[1]; b=para_h[2]; a=para_h[3]; c= para_h[4]; neta=para_h[5] ; nu=para_h[6];
Sol_7=c(9.427303e-06, 2.051123e-03 , 3.317425e+03 , 4.725221e-05 ,-7.973112e-03 , 1.258399e+02 ,9.946611e-01)
se_7=C(9.356137e-05, 1.119981e-04, 9.356927e-05, 1.434307e-06, 9.359197e-05, 9.356968e-05, 9.357352e-05)
# RMSE
> RMSE1$in
[1] 0.03381406
> RMSE2$out
[1] 0.04287557
> RMSE2$we
[1] 0.03891758
> MPE
[1] 1.166707
> MAE
[1] 1.210328
> MAE2
[1] 1.210328
> Vrmse
[1] 28.88739
#####################################################
### 8- IG_GARCH_Returns_option #######
#####################################################
##w=para_h[1]; b=para_h[2]; a=para_h[3]; c= para_h[4]; neta=para_h[5] ; nu=para_h[6]
Sol_8=c(3.249840e-06 , 0.077376e-03 ,3.3013217e+03 , 5.04031e-05, -8.278782e-03 , 1.25631e+02 )
se_8=C(1.32858e-07, 7.404556e-04, 2.079540e-04 , 3.85941e-07, 3.28657e-05, 1.27947e-04)
# RMSE
> RMSE1$in
[1] 0.03335749
> RMSE2$out
[1] 0.04190233
> RMSE2$we
[1] 0.0383845
> MPE
[1] 1.308704
> MAE
[1] 1.326823
> MAE2
[1] 1.326823
> Vrmse
[1] 31.77595
##################################################
### 9- IG_GARCH_Returns_VIX #######
##################################################
# w=para_h[1]; b=para_h[2]; a=para_h[3]; c= para_h[4]; neta=para_h[5] ; nu=para_h[6] ; PI=para_h[7] ; ro=para_h[8]
Sol_9=c(1.003502e-05 , 2.417558e-03 , 3.317749e+03, 4.525727e-05, -7.519624e-03 , 1.258809e+02, 1.100000e+00, 9.959896e-01)
se_9=C(1.628877e-07, 9.720065e-05, 8.309831e-05, 3.770727e-07, 8.312097e-05, 8.309859e-05, 8.309834e-05, 8.763270e-05)
# RMSE
> RMSE1$in
[1] 0.03379284
> RMSE2$out
[1] 0.0412024
> RMSE2$we
[1] 0.0365899
> MPE
[1] 1.113527
> MAE
[1] 1.16912
> MAE2
[1] 1.16912
> Vrmse
[1] 27.8605
##################################################
### 10- IG_GARCH_Returns_option #######
##################################################
# w=para_h[1]; b=para_h[2]; a=para_h[3]; c= para_h[4]; neta=para_h[5] ; nu=para_h[6] ; PI=para_h[7]
Sol_10=c(1.010747e-05 ,2.282316e-03, 3.317425e+03 , 4.514664e-05 ,-7.499894e-03, 1.258394e+02 ,1.100010e+00)
se_10=C(4.68491490e-06, 1.6817655e-07, 4.6817651e-06, 4.81465164e-06, 9.681465416e-03, 7.99451e-05, 2.210526e-06)
# RMSE
> RMSE1$in
[1] 0.03327742
> RMSE2$out
[1] 0.04005787
> RMSE2$we
[1] 0.03640032
> MPE
[1] 1.231865
> MAE
[1] 1.268672
> MAE2
[1] 1.268672
> Vrmse
[1] 30.19202
#############################################
### 11- GJR_GARCH_Returns #######
#############################################
##a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5]
Sol_11=c(1.445949e-05, 3.107684e-01, 1.055816e-01, 6.311811e-01, 4.208730e-03)
se_11=C(1.698409e-06, 1.139433e-02, 9.096206e-03, 1.105324e-02, 8.979822e-03)
# RMSE
> RMSE1$in
[1] 0.0575311
> RMSE2$out
[1] 0.07227221
> RMSE2$we
[1] 0.06368361
> MPE
[1] 0.020954
> MAE
[1] 0.021782
> MAE2
[1] 0.021782
> Vrmse
[1] 18.45451
> MPE
[1] 0.4439605
> MAE
[1] 0.6298235
> MAE2
[1] 0.6298235
> Vrmse
[1] 18.45451
#############################################
### 12- GJR_GARCH_Returns_VIX #######
#############################################
##a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
Sol_12=c(4.966114e-06, 1.240920e-01, 2.314276e-02, 8.504266e-01, 1.989254e-01, 8.924053e-01)
se_12=C(1.233970e-06, 3.752923e-03, 4.757924e-03, 3.692452e-03, 3.462100e-03, 3.419816e-03)
# RMSE
> RMSE1$in
[1] 0.055115937
> RMSE2$out
[1] 0.06619975
> RMSE2$we
[1] 0.0631625
> MPE
[1] -0.3462374
> MAE
[1] 0.4104046
> MAE2
[1] 0.4104046
> Vrmse
[1] 13.94554
#######################################
### 13- NGARCH_Returns #######
#######################################
Sol_13=c(2.027476e-05 , 9.992334e-01 , 7.237317e-09 , 1.025128e+00, -1.338783e-01, 6.037568e+00, -2.645889e+00)
se_13=C(4.932143e-06, 1.857258e-04, 1.533786e-08, 1.531803e-04, 1.531834e-04, 1.531803e-04, 1.531803e-04)
# RMSE
> RMSE1$in
[1] 0.05661986
> RMSE2$out
[1] 0.07257198
> RMSE2$we
[1] 0.06366201
> MPE
[1] 0.4439605
> MAE
[1] 0.6298235
> MAE2
[1] 0.6298235
> Vrmse
[1] 18.45451
#######################################
### 14- NGARCH_Ret-vix #######
#######################################
## a0=para_h[1]; b1=para_h[2]; a1=para_h[3]; gama= para_h[4]; lambda= para_h[5]; a=para_h[6]; b=para_h[7] ; ro=para_h[8]## ; c=para_h[5]; d=para_h[6] ; ro=para_h[8]
Sol_12=c(4.705257e-06, 7.957262e-01, 6.170762e-02, 1.394690e+00, 5.144851e-02, 1.795145e+00 ,-2.685911e-01, 9.541714e-01)
se_12=C(2.507650e-07, 6.415229e-04, 5.511385e-04, 5.550372e-04, 5.545759e-04, 5.367108e-04, 5.367107e-04, 5.386347e-04)
# RMSE
> RMSE1$in
[1] 0.05529999
> RMSE2$out
[1] 0.066078389
> RMSE2$we
[1] 0.06307517
> MPE
[1] -0.5589676
> MAE
[1] 0.566944
> MAE2
[1] 0.566944
> Vrmse
[1] 18.14527
#########################################
### 15-NIG_HN_GARCH_ret #######
#########################################
### a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
Sol_15_Vol=c(1.862873e-12, 2.250036e-05, 4.804450e+01, 8.274026e-01 ,4.921045e+00)
se_15_Vol=C(2.091005e-12, 2.091005e-08, 2.091004e-08, 2.091004e-08, 2.091004e-08)
Sol_15_Dis=c(1.24017703, -0.03604831, 1.42421603, 1.78017616)
se_15_Dis=C(0.099042750, 0.008793694 , 0.00135326644, 0.0087537762021)
# RMSE
> RMSE1$in
[1] 0.05351607
> RMSE2$out
[1] 0.07095312
> RMSE2$we
[1] 0.06234915
> MPE
[1] -0.5589676
> MAE
[1] 0.566944
> MAE2
[1] 0.566944
> Vrmse
[1] 18.14527
#########################################
### 16-NIG_HN_GARCH_ret_vix #######
#########################################
Sol_16_Vol=c(3.788371e-13, 1.520721e-06, 4.651210e+02, 6.620008e-01, 4.400007e-01, 9.646967e-01)
se_16_Vol=C(3.626398e-13, 3.626398e-09, 3.626376e-09, 3.626376e-09, 3.626376e-09, 3.626376e-09)
Sol_16_Dis=c(1.337329911, -0.004432882 , 1.551758651 , 1.424519069)
se_16_Dis=C(0.001101934 , 0.007270608 ,0.00148702057, 0.002726753)
# RMSE
> RMSE1$in
[1] 0.05226903
> RMSE2$out
[1] 0.06419075
> RMSE2$we
[1] 0.06195388
> MPE
[1] -0.5589676
> MAE
[1] 0.566944
> MAE2
[1] 0.566944
> Vrmse
[1] 18.14527
#########################################
### 17-NIG_HN_GARCH_ret_vix #######
#########################################
Sol_17_Dis=c(0.541743068,-0.005313759,0.839760873,1.792759694)
se_17_Dis=C( 0.025217477, 0.00770, 0.00680 , 0.00355)
# RMSE
> RMSE1$in
[1] 0.05279462
> RMSE2$out
[1] 0.06430162
> RMSE2$we
[1] 0.06202299
> MPE
[1] -0.5589676
> MAE
[1] 0.566944
> MAE2
[1] 0.566944
> Vrmse
[1] 18.14527
#############################################
### 18- GJR_GARCH_Returns #######
#############################################
##a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5]
Sol_18_dis=c(1.270250581, -0.002520285 , 1.620497081 , 1.973447871)
se_18_dis=C(0.00084284, 0.00448010,0.00305425,0.00952546)
# RMSE
> RMSE1$in
[1] 0.05486513
> RMSE2$out
[1] 0.07194120
> RMSE2$we
[1] 0.06276413
> MPE
[1] 0.4439605
> MAE
[1] 0.6298235
> MAE2
[1] 0.6298235
> Vrmse
[1] 18.45451
#############################################
### 19- GJR_GARCH_Returns_VIX #######
#############################################
##a0=para_h[1]; a1=para_h[2]; a2=para_h[3]; b1= para_h[4] ; lamda0= para_h[5] ; ro=para_h[6]
Sol_19=c(1.269943539, -0.002488772, 1.620073612, 2.088112106)
se_19=C(0.0009133685822, 0.0000913399, 0.0008702860938, 0.0000870)
# RMSE
> RMSE1$in
[1] 0.0533685822
> RMSE2$out
[1] 0.0632860938
> RMSE2$we
[1] 0.0612541291
> MPE
[1] -0.3462374
> MAE
[1] 0.4104046
> MAE2
[1] 0.4104046
> Vrmse
[1] 13.94554
|
#########
#Coursera project 2 in Reproducible research
setwd("C:/Users/gissurj/R/Coursera/Reproducible research/RepData_PeerAssessment2")
############
#Download data and get it ready
download.file("https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2FStormData.csv.bz2", "Storm.bz2")
Storm <- read.csv("Storm.bz2")
tmpdir=unzip("Storm.zip")
#install.packages("sqldf")
#install.packages("lubridate")
#install.packages("timeDate")
#install.packages("reshape2")
#library("sqldf")
#library("timeDate")
#library(lattice)
#library("lubridate")
#library(reshape2)
#analyze the data little
str(Storm)
summary(Storm)
tail(Storm)
num_obs <- dim(Storm)[1]
num_variables <- dim(Storm)[2]
#analyze observations over year
names(Storm)
# Reconstruct the date field
a <-colsplit(Storm$BGN_DATE," ",c("Date","Time"))[1]
Storm[,"DATE"] <- a
Storm$YEAR <- year(Storm$DATE)
Storm$MONTH <- month(Storm$DATE)
#Lets minimize the dataset while we explore it to this century
S <- sqldf("select * from Storm where year > 1999")
S_2011 <- sqldf("select * from Storm where year = 2011")
# The columns names are not self explanatory but with digging here are some discoveries
# DMG - Stands for damange
# Prop - Are properties
# PROPDMGEXP -> here are numbers stored in number e.g. M stands for million and T stands for thousund
# CropDMG stands for crop damange
# I believe that the MAG column stands for magnitude
# The fatalities and injuries columns are self explained.
######
#Extra analysis
num_obs_on_day[,"DAGS"] <- a
a <- as.Date(a$Date,format = "%m/%d/%Y")
num_obs_on_day <- sqldf("select BGN_DATE, count(*) obs from Storm group by BGN_DATE")
a <-colsplit(num_obs_on_day$BGN_DATE," ",c("Date","Time"))[1]
num_obs_on_day[,"dags"] <- a
num_obs_on_day$BGN_DATE <- as.Date(a$Date,format = "%m/%d/%Y")
num_obs_on_day$dags<-as.Date(num_obs_on_day$dags,format = "%m/%d/%Y")
num_obs_on_day$year <- year(num_obs_on_day$dags)
summary(num_obs_on_day)
|
/CP2.R
|
no_license
|
Gissi/RepData_PeerAssessment2
|
R
| false
| false
| 1,965
|
r
|
#########
#Coursera project 2 in Reproducible research
setwd("C:/Users/gissurj/R/Coursera/Reproducible research/RepData_PeerAssessment2")
############
#Download data and get it ready
download.file("https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2FStormData.csv.bz2", "Storm.bz2")
Storm <- read.csv("Storm.bz2")
tmpdir=unzip("Storm.zip")
#install.packages("sqldf")
#install.packages("lubridate")
#install.packages("timeDate")
#install.packages("reshape2")
#library("sqldf")
#library("timeDate")
#library(lattice)
#library("lubridate")
#library(reshape2)
#analyze the data little
str(Storm)
summary(Storm)
tail(Storm)
num_obs <- dim(Storm)[1]
num_variables <- dim(Storm)[2]
#analyze observations over year
names(Storm)
# Reconstruct the date field
a <-colsplit(Storm$BGN_DATE," ",c("Date","Time"))[1]
Storm[,"DATE"] <- a
Storm$YEAR <- year(Storm$DATE)
Storm$MONTH <- month(Storm$DATE)
#Lets minimize the dataset while we explore it to this century
S <- sqldf("select * from Storm where year > 1999")
S_2011 <- sqldf("select * from Storm where year = 2011")
# The columns names are not self explanatory but with digging here are some discoveries
# DMG - Stands for damange
# Prop - Are properties
# PROPDMGEXP -> here are numbers stored in number e.g. M stands for million and T stands for thousund
# CropDMG stands for crop damange
# I believe that the MAG column stands for magnitude
# The fatalities and injuries columns are self explained.
######
#Extra analysis
num_obs_on_day[,"DAGS"] <- a
a <- as.Date(a$Date,format = "%m/%d/%Y")
num_obs_on_day <- sqldf("select BGN_DATE, count(*) obs from Storm group by BGN_DATE")
a <-colsplit(num_obs_on_day$BGN_DATE," ",c("Date","Time"))[1]
num_obs_on_day[,"dags"] <- a
num_obs_on_day$BGN_DATE <- as.Date(a$Date,format = "%m/%d/%Y")
num_obs_on_day$dags<-as.Date(num_obs_on_day$dags,format = "%m/%d/%Y")
num_obs_on_day$year <- year(num_obs_on_day$dags)
summary(num_obs_on_day)
|
start.val.tpm <-
function (start.val, data, type, constraint) {
n <- nrow(data)
p <- ncol(data)
cmptStrVal <- is.null(start.val) || (start.val == "random" || (is.matrix(start.val) && length(start.val) != 3*p))
randStrVal <- length(start.val) == 1 && start.val == "random"
if (cmptStrVal) {
rs <- as.vector(rowSums(data, na.rm = TRUE))
len.uni <- length(unique(rs))
rs <- factor(rs, labels = 1:len.uni)
rs <- as.numeric(levels(rs))[as.integer(rs)]
z <- cbind(1, seq(-3, 3, len = len.uni)[rs])
if (randStrVal)
z[, 2] <- rnorm(n)
old <- options(warn = (2))
on.exit(options(old))
coefs <- matrix(0, p, 2)
for (i in 1:p) {
y <- data[, i]
na.ind <- !is.na(y)
y. <- y[na.ind]
z. <- z[na.ind, ]
fm <- try(glm.fit(z., y., family = binomial()), silent = TRUE)
coefs[i, ] <- if (!inherits(fm, "try-error")) {
fm$coef
} else {
c(0, 1)
}
}
coefs <- cbind(qlogis(seq(0.05, 0.15, length = p))[order(order(coefs[, 1], decreasing = TRUE))], coefs)
coefs <- if (type == "rasch") c(coefs[, 1:2], abs(mean(coefs[, 3]))) else as.vector(coefs)
} else {
coefs <- start.val
coefs[, 1] <- qlogis(coefs[, 1])
coefs <- if (type == "rasch") c(coefs[, 1:2], abs(mean(coefs[, 3]))) else as.vector(coefs)
}
if (!is.null(constraint)) {
if (type == "rasch" && any(ind <- constraint[, 2] == 3))
coefs[-c((constraint[!ind, 2] - 1) * p + constraint[!ind, 1], length(coefs))]
else
coefs[-((constraint[, 2] - 1) * p + constraint[, 1])]
} else
coefs
}
|
/R/start.val.tpm.R
|
no_license
|
state-o-flux/ltm
|
R
| false
| false
| 1,809
|
r
|
start.val.tpm <-
function (start.val, data, type, constraint) {
n <- nrow(data)
p <- ncol(data)
cmptStrVal <- is.null(start.val) || (start.val == "random" || (is.matrix(start.val) && length(start.val) != 3*p))
randStrVal <- length(start.val) == 1 && start.val == "random"
if (cmptStrVal) {
rs <- as.vector(rowSums(data, na.rm = TRUE))
len.uni <- length(unique(rs))
rs <- factor(rs, labels = 1:len.uni)
rs <- as.numeric(levels(rs))[as.integer(rs)]
z <- cbind(1, seq(-3, 3, len = len.uni)[rs])
if (randStrVal)
z[, 2] <- rnorm(n)
old <- options(warn = (2))
on.exit(options(old))
coefs <- matrix(0, p, 2)
for (i in 1:p) {
y <- data[, i]
na.ind <- !is.na(y)
y. <- y[na.ind]
z. <- z[na.ind, ]
fm <- try(glm.fit(z., y., family = binomial()), silent = TRUE)
coefs[i, ] <- if (!inherits(fm, "try-error")) {
fm$coef
} else {
c(0, 1)
}
}
coefs <- cbind(qlogis(seq(0.05, 0.15, length = p))[order(order(coefs[, 1], decreasing = TRUE))], coefs)
coefs <- if (type == "rasch") c(coefs[, 1:2], abs(mean(coefs[, 3]))) else as.vector(coefs)
} else {
coefs <- start.val
coefs[, 1] <- qlogis(coefs[, 1])
coefs <- if (type == "rasch") c(coefs[, 1:2], abs(mean(coefs[, 3]))) else as.vector(coefs)
}
if (!is.null(constraint)) {
if (type == "rasch" && any(ind <- constraint[, 2] == 3))
coefs[-c((constraint[!ind, 2] - 1) * p + constraint[!ind, 1], length(coefs))]
else
coefs[-((constraint[, 2] - 1) * p + constraint[, 1])]
} else
coefs
}
|
# 1. Carga de datos
parameters = read_yaml('02_parameter/parameters.yml', fileEncoding = 'UTF-8')
# 0.1. Leer data
data_original = read.csv('01_input/data_original_concejales_2016.csv', sep=';', fileEncoding = 'UTF-8-BOM')
# 0.4. Cupos por unidad (comuna/distrito)
if(parameters['modo'][[1]] == 'concejales'){
seats_raw = read.xlsx('02_parameter/01_diccionarios_cupos/Cupos_Concejales.xlsx')
asientos <- seats_raw$Cupo
} else if(parameters['modo'][[1]] == 'convencionales'){
seats_raw = read.xlsx('02_parameter/01_diccionarios_cupos/Cupos_Convencionales.xlsx')
asientos <- seats_raw$Cupo
} else if(parameters['modo'][[1]] == 'diputados'){
seats_raw = read.xlsx('02_parameter/01_diccionarios_cupos/Cupos_Diputados.xlsx')
asientos <- seats_raw$Cupo
}
# 0.5. Cargar coaliciones
dicc_02_raw = read.xlsx("02_parameter/02_diccionario_coaliciones/diccionario_siglacoa_v2.xlsx")#, sep=';')
scenario = parameters['coaliciones'][[1]]
# 0.6. Cargar de datos a pipeline de simulación
data_dip17 = data_original
# 2. Construir coaliciones
dicc_02 = dicc_02_raw[,c("Sigla",scenario)]
colnames(dicc_02) <- c("Sigla","Coalicion")
data_original_coa = merge(data_original,dicc_02, by = "Sigla")
SEATS = sum(seats_raw$Cupo)
# 3. Construimos la generalización del D'Hondt
# 3.1. A nivel de Pacto
diccionario_PactoCupos <-function(distrito,data){
# Esta función entrega el número de cupos por pacto para cada distrito
transiente = data[data$ID == distrito,]
transiente_pacto = transiente %>%
group_by(Coalicion) %>%
summarise(Votacion = sum(Votacion), .groups = 'drop')
transiente_pacto = transiente_pacto[order(-transiente_pacto$Votacion),]
raw = dHondt(transiente_pacto$Votacion, transiente_pacto$Coalicion,asientos[distrito])
#rownames(transiente_pacto) = NULL
largo_raw = length(raw)
largo_full = nrow(transiente_pacto)
reper = largo_full - largo_raw
if(reper == 0){
transiente_pacto$Cupos = raw
}
else{
raw_modified = c(raw,rep(0,reper))
transiente_pacto$Cupos = raw_modified
}
transiente_pacto$ID = distrito
return(transiente_pacto)
}
# 3.2. A nivel de Partido
diccionario_PartidoCupos <- function(distrito, data){
transiente = data[data$ID == distrito,]
transiente_partido = transiente %>%
group_by(Sigla, Coalicion) %>%
summarise(Votacion = sum(Votacion), .groups = 'drop')
reveal_cupos = diccionario_PactoCupos(distrito,data)
reveal_cupos$Votacion=NULL
reveal_cupos$ID=NULL
transiente_partido = merge(transiente_partido,reveal_cupos, by = "Coalicion")
transiente_partido$Cupos_Partido = 0
lista_coalicion = unique(transiente_partido$Coalicion)
output = data.frame(Coalicion = NULL, Sigla = NULL, Votacion = NULL, Cupos= NULL, Cupos_Partido=NULL)
for(i in 1:length(lista_coalicion)){
subtransiente= transiente_partido[transiente_partido$Coalicion == lista_coalicion[i],]
if(subtransiente$Cupos[1]>1){
subtransiente = subtransiente[order(-subtransiente$Votacion),]
rownames(subtransiente) = NULL
Cupos = subtransiente$Cupos[1]
raw= dHondt(subtransiente$Votacion, subtransiente$Sigla,Cupos)
largo_raw = length(raw)
largo_full = nrow(subtransiente)
reper = largo_full - largo_raw
if(reper == 0){
subtransiente$Cupos_Partido = raw
output=rbind(output,subtransiente)
}
else{
raw_modified = c(raw,rep(0,reper))
subtransiente$Cupos_Partido = raw_modified
output=rbind(output,subtransiente)
}
}
else if(subtransiente$Cupos[1]==1){
maxi = max(subtransiente$Votacion)
subtransiente$Cupos_Partido[subtransiente$Votacion == maxi] = 1
output=rbind(output,subtransiente)
}
else{
subtransiente$Cupos_Partido = 0
output=rbind(output,subtransiente)
}
}
output = output[order(-output$Cupos_Partido),]
output$ID = distrito
return(output)
}
# 4. Funcion de simulacion
ElectoSimulate <- function(data){
wrapper=data.frame(Coalicion=NULL,Sigla=NULL,Votacion=NULL,Cupos_Partido=NULL,ID=NULL)
#ptime <- system.time({
for(z in 1:length(asientos)){
#OPORTUNIDAD GIGANTE PARA PARALELIZAR
wrapper = bind_rows(wrapper,diccionario_PartidoCupos(z,data))
}
return(wrapper)
}
# 5. Función simulación MÚLTIPLES escenario
SIMULATE_NOW_MANY <-function(){
n_escenarios = parameters['n_simulaciones'][[1]]
multiples_escenarios = Simulador_escenarios_parallel(full_new, full, n_escenarios)
nucleos = parameters['n_cores'][[1]]
if(nucleos >= detectCores()){
print(paste0('Parametro de nucleos excede el maximo, nucleos establecido en ',detectCores()-1))
nucleos = detectCores() - 1
}
print('Simulando elecciones:')
ptime_2 <- system.time({
rr <- mclapply(multiples_escenarios, ElectoSimulate, mc.cores = nucleos)
# rr <- mapply(ElectoSimulate, multiples_escenarios )
})[3]
print(paste0(n_escenarios, ' escenarios simulados en ',round(ptime_2,0),' segundos'))
vector_sim = 1:n_escenarios
rr_2 <- mapply(cbind, rr, "Simulacion"=vector_sim, SIMPLIFY=F)
rr_2_df = do.call(rbind.data.frame, rr_2)
wrapper_total_detalle = rr_2_df[rr_2_df$Votacion>0,]
wrapper_coa = wrapper_total_detalle %>%
group_by(Coalicion, Simulacion) %>%
summarise(Votacion = sum(Cupos_Partido), .groups = 'drop')
colnames(wrapper_coa) = c('Coalicion', 'Simulacion','Asientos_ganados')
coa_global = wrapper_coa %>% group_by(Coalicion) %>%
summarise(Asientos_ganados = median(Asientos_ganados), .groups = 'drop')
coa_global$Asientos_ganados = round(SEATS*round(coa_global$Asientos_ganados/sum(coa_global$Asientos_ganados),4),0)
wrapper_party = wrapper_total_detalle %>%
group_by(Sigla, Simulacion) %>%
summarise(Votacion = sum(Cupos_Partido), .groups = 'drop')
colnames(wrapper_party) = c('Partido', 'Simulacion','Asientos_ganados')
party_global = wrapper_party %>% group_by(Partido) %>%
summarise(Asientos_ganados = median(Asientos_ganados), .groups = 'drop')
party_global$Asientos_ganados = round(SEATS*round(party_global$Asientos_ganados/sum(party_global$Asientos_ganados),4),0)
new_output_path = paste0('98_output/',parameters['experiment_tag'][[1]])
dir.create(new_output_path)
write_yaml(parameters, file=paste0(new_output_path, '/used_parameter.yml'), fileEncoding = "UTF-8")
write.xlsx(wrapper_coa, file=paste0(new_output_path, '/simulacion_coa.xlsx'), row.names = FALSE)
write.xlsx(wrapper_party, file=paste0(new_output_path,'/simulacion_partido.xlsx'), row.names = FALSE)
write.xlsx(coa_global, file=paste0(new_output_path,'/coalicion_resumen_median.xlsx'), row.names = FALSE)
write.xlsx(party_global, file=paste0(new_output_path,'/partido_resumen_median.xlsx'), row.names = FALSE)
write.xlsx(wrapper_total_detalle, file=paste0(new_output_path,'/simulacion_detalle.xlsx'), row.names = FALSE)
source('00_code/02_reporting/viz.R')
create_densities_party(wrapper_party, new_output_path)
create_densities_coalition(wrapper_coa, new_output_path)
print(as.data.frame(coa_global[coa_global$Asientos_ganados>0,]))
print(as.data.frame(party_global[party_global$Asientos_ganados>0,]))
#create_chamber(wrapper_coa, new_output_path)
create_waffle(as.data.frame(coa_global), new_output_path,'/waffle-coa.png', 'Resultado por coalicion')
create_waffle(as.data.frame(party_global), new_output_path,'/waffle-party.png', 'Resultado por partido')
# Reportar votacion-participación
participacion = wrapper_total_detalle %>% group_by(Simulacion) %>%
summarise(Participacion = sum(Votacion), .groups = 'drop')
print(paste0('Participacion estimada:'))
print(summary(participacion$Participacion)[1])
print(summary(participacion$Participacion)[2])
print(summary(participacion$Participacion)[3])
print(summary(participacion$Participacion)[4])
print(summary(participacion$Participacion)[5])
print(summary(participacion$Participacion)[6])
nulos_y_blancos = (parameters['blancos_promedio'][[1]]+parameters['nulos_promedio'][[1]])*parameters['padron_total'][[1]]
print(round(100*(median(participacion$Participacion) + nulos_y_blancos)/parameters['padron_total'][[1]],1))
}
|
/00_code/01_data_science/simulador.R
|
no_license
|
goyanedelv/general-election-simulator-chile
|
R
| false
| false
| 8,172
|
r
|
# 1. Carga de datos
parameters = read_yaml('02_parameter/parameters.yml', fileEncoding = 'UTF-8')
# 0.1. Leer data
data_original = read.csv('01_input/data_original_concejales_2016.csv', sep=';', fileEncoding = 'UTF-8-BOM')
# 0.4. Cupos por unidad (comuna/distrito)
if(parameters['modo'][[1]] == 'concejales'){
seats_raw = read.xlsx('02_parameter/01_diccionarios_cupos/Cupos_Concejales.xlsx')
asientos <- seats_raw$Cupo
} else if(parameters['modo'][[1]] == 'convencionales'){
seats_raw = read.xlsx('02_parameter/01_diccionarios_cupos/Cupos_Convencionales.xlsx')
asientos <- seats_raw$Cupo
} else if(parameters['modo'][[1]] == 'diputados'){
seats_raw = read.xlsx('02_parameter/01_diccionarios_cupos/Cupos_Diputados.xlsx')
asientos <- seats_raw$Cupo
}
# 0.5. Cargar coaliciones
dicc_02_raw = read.xlsx("02_parameter/02_diccionario_coaliciones/diccionario_siglacoa_v2.xlsx")#, sep=';')
scenario = parameters['coaliciones'][[1]]
# 0.6. Cargar de datos a pipeline de simulación
data_dip17 = data_original
# 2. Construir coaliciones
dicc_02 = dicc_02_raw[,c("Sigla",scenario)]
colnames(dicc_02) <- c("Sigla","Coalicion")
data_original_coa = merge(data_original,dicc_02, by = "Sigla")
SEATS = sum(seats_raw$Cupo)
# 3. Construimos la generalización del D'Hondt
# 3.1. A nivel de Pacto
diccionario_PactoCupos <-function(distrito,data){
# Esta función entrega el número de cupos por pacto para cada distrito
transiente = data[data$ID == distrito,]
transiente_pacto = transiente %>%
group_by(Coalicion) %>%
summarise(Votacion = sum(Votacion), .groups = 'drop')
transiente_pacto = transiente_pacto[order(-transiente_pacto$Votacion),]
raw = dHondt(transiente_pacto$Votacion, transiente_pacto$Coalicion,asientos[distrito])
#rownames(transiente_pacto) = NULL
largo_raw = length(raw)
largo_full = nrow(transiente_pacto)
reper = largo_full - largo_raw
if(reper == 0){
transiente_pacto$Cupos = raw
}
else{
raw_modified = c(raw,rep(0,reper))
transiente_pacto$Cupos = raw_modified
}
transiente_pacto$ID = distrito
return(transiente_pacto)
}
# 3.2. A nivel de Partido
diccionario_PartidoCupos <- function(distrito, data){
transiente = data[data$ID == distrito,]
transiente_partido = transiente %>%
group_by(Sigla, Coalicion) %>%
summarise(Votacion = sum(Votacion), .groups = 'drop')
reveal_cupos = diccionario_PactoCupos(distrito,data)
reveal_cupos$Votacion=NULL
reveal_cupos$ID=NULL
transiente_partido = merge(transiente_partido,reveal_cupos, by = "Coalicion")
transiente_partido$Cupos_Partido = 0
lista_coalicion = unique(transiente_partido$Coalicion)
output = data.frame(Coalicion = NULL, Sigla = NULL, Votacion = NULL, Cupos= NULL, Cupos_Partido=NULL)
for(i in 1:length(lista_coalicion)){
subtransiente= transiente_partido[transiente_partido$Coalicion == lista_coalicion[i],]
if(subtransiente$Cupos[1]>1){
subtransiente = subtransiente[order(-subtransiente$Votacion),]
rownames(subtransiente) = NULL
Cupos = subtransiente$Cupos[1]
raw= dHondt(subtransiente$Votacion, subtransiente$Sigla,Cupos)
largo_raw = length(raw)
largo_full = nrow(subtransiente)
reper = largo_full - largo_raw
if(reper == 0){
subtransiente$Cupos_Partido = raw
output=rbind(output,subtransiente)
}
else{
raw_modified = c(raw,rep(0,reper))
subtransiente$Cupos_Partido = raw_modified
output=rbind(output,subtransiente)
}
}
else if(subtransiente$Cupos[1]==1){
maxi = max(subtransiente$Votacion)
subtransiente$Cupos_Partido[subtransiente$Votacion == maxi] = 1
output=rbind(output,subtransiente)
}
else{
subtransiente$Cupos_Partido = 0
output=rbind(output,subtransiente)
}
}
output = output[order(-output$Cupos_Partido),]
output$ID = distrito
return(output)
}
# 4. Funcion de simulacion
ElectoSimulate <- function(data){
wrapper=data.frame(Coalicion=NULL,Sigla=NULL,Votacion=NULL,Cupos_Partido=NULL,ID=NULL)
#ptime <- system.time({
for(z in 1:length(asientos)){
#OPORTUNIDAD GIGANTE PARA PARALELIZAR
wrapper = bind_rows(wrapper,diccionario_PartidoCupos(z,data))
}
return(wrapper)
}
# 5. Función simulación MÚLTIPLES escenario
SIMULATE_NOW_MANY <-function(){
n_escenarios = parameters['n_simulaciones'][[1]]
multiples_escenarios = Simulador_escenarios_parallel(full_new, full, n_escenarios)
nucleos = parameters['n_cores'][[1]]
if(nucleos >= detectCores()){
print(paste0('Parametro de nucleos excede el maximo, nucleos establecido en ',detectCores()-1))
nucleos = detectCores() - 1
}
print('Simulando elecciones:')
ptime_2 <- system.time({
rr <- mclapply(multiples_escenarios, ElectoSimulate, mc.cores = nucleos)
# rr <- mapply(ElectoSimulate, multiples_escenarios )
})[3]
print(paste0(n_escenarios, ' escenarios simulados en ',round(ptime_2,0),' segundos'))
vector_sim = 1:n_escenarios
rr_2 <- mapply(cbind, rr, "Simulacion"=vector_sim, SIMPLIFY=F)
rr_2_df = do.call(rbind.data.frame, rr_2)
wrapper_total_detalle = rr_2_df[rr_2_df$Votacion>0,]
wrapper_coa = wrapper_total_detalle %>%
group_by(Coalicion, Simulacion) %>%
summarise(Votacion = sum(Cupos_Partido), .groups = 'drop')
colnames(wrapper_coa) = c('Coalicion', 'Simulacion','Asientos_ganados')
coa_global = wrapper_coa %>% group_by(Coalicion) %>%
summarise(Asientos_ganados = median(Asientos_ganados), .groups = 'drop')
coa_global$Asientos_ganados = round(SEATS*round(coa_global$Asientos_ganados/sum(coa_global$Asientos_ganados),4),0)
wrapper_party = wrapper_total_detalle %>%
group_by(Sigla, Simulacion) %>%
summarise(Votacion = sum(Cupos_Partido), .groups = 'drop')
colnames(wrapper_party) = c('Partido', 'Simulacion','Asientos_ganados')
party_global = wrapper_party %>% group_by(Partido) %>%
summarise(Asientos_ganados = median(Asientos_ganados), .groups = 'drop')
party_global$Asientos_ganados = round(SEATS*round(party_global$Asientos_ganados/sum(party_global$Asientos_ganados),4),0)
new_output_path = paste0('98_output/',parameters['experiment_tag'][[1]])
dir.create(new_output_path)
write_yaml(parameters, file=paste0(new_output_path, '/used_parameter.yml'), fileEncoding = "UTF-8")
write.xlsx(wrapper_coa, file=paste0(new_output_path, '/simulacion_coa.xlsx'), row.names = FALSE)
write.xlsx(wrapper_party, file=paste0(new_output_path,'/simulacion_partido.xlsx'), row.names = FALSE)
write.xlsx(coa_global, file=paste0(new_output_path,'/coalicion_resumen_median.xlsx'), row.names = FALSE)
write.xlsx(party_global, file=paste0(new_output_path,'/partido_resumen_median.xlsx'), row.names = FALSE)
write.xlsx(wrapper_total_detalle, file=paste0(new_output_path,'/simulacion_detalle.xlsx'), row.names = FALSE)
source('00_code/02_reporting/viz.R')
create_densities_party(wrapper_party, new_output_path)
create_densities_coalition(wrapper_coa, new_output_path)
print(as.data.frame(coa_global[coa_global$Asientos_ganados>0,]))
print(as.data.frame(party_global[party_global$Asientos_ganados>0,]))
#create_chamber(wrapper_coa, new_output_path)
create_waffle(as.data.frame(coa_global), new_output_path,'/waffle-coa.png', 'Resultado por coalicion')
create_waffle(as.data.frame(party_global), new_output_path,'/waffle-party.png', 'Resultado por partido')
# Reportar votacion-participación
participacion = wrapper_total_detalle %>% group_by(Simulacion) %>%
summarise(Participacion = sum(Votacion), .groups = 'drop')
print(paste0('Participacion estimada:'))
print(summary(participacion$Participacion)[1])
print(summary(participacion$Participacion)[2])
print(summary(participacion$Participacion)[3])
print(summary(participacion$Participacion)[4])
print(summary(participacion$Participacion)[5])
print(summary(participacion$Participacion)[6])
nulos_y_blancos = (parameters['blancos_promedio'][[1]]+parameters['nulos_promedio'][[1]])*parameters['padron_total'][[1]]
print(round(100*(median(participacion$Participacion) + nulos_y_blancos)/parameters['padron_total'][[1]],1))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chadPlot.R
\name{chadPlot}
\alias{chadPlot}
\title{Print a data url'd image to console}
\usage{
chadPlot()
}
\description{
This functions switches off the current graphics device, then prints out an image
tag with a data url of the file. This is intended for use in chad notebooks.
}
|
/man/chadPlot.Rd
|
no_license
|
nfultz/chad
|
R
| false
| true
| 364
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chadPlot.R
\name{chadPlot}
\alias{chadPlot}
\title{Print a data url'd image to console}
\usage{
chadPlot()
}
\description{
This functions switches off the current graphics device, then prints out an image
tag with a data url of the file. This is intended for use in chad notebooks.
}
|
rankhospital <- function(state, outcome, num = "best") {
outcomeDf <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
stateVect <- unique(outcomeDf$State)
outcomeVect = c("heart attack","heart failure","pneumonia")
if(!(state %in% stateVect)) {
stop("invalid state")
}
if(!(outcome %in% outcomeVect)) {
stop("invalid outcome")
}
outcomeDfRows <- nrow(outcomeDf[outcomeDf$State == state,])
if(num != "best" && num != "worst") {
if(as.numeric(num) > outcomeDfRows) {
return (NA)
}
}
result <- character(1)
if(outcome == outcomeVect[1]) {
outcomeStateDf <- outcomeDf[outcomeDf$State == state &
outcomeDf$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack != "Not Available",]
sortedDf <- outcomeStateDf[order(as.numeric(outcomeStateDf$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)
, outcomeStateDf$Hospital.Name),]
} else if(outcome == outcomeVect[2]) {
outcomeStateDf <- outcomeDf[outcomeDf$State == state &
outcomeDf$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure != "Not Available",]
sortedDf <- outcomeStateDf[order(as.numeric(outcomeStateDf$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)
, outcomeStateDf$Hospital.Name),]
} else if(outcome == outcomeVect[3]) {
outcomeStateDf <- outcomeDf[outcomeDf$State == state &
outcomeDf$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia != "Not Available",]
sortedDf <- outcomeStateDf[order(as.numeric(outcomeStateDf$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)
, outcomeStateDf$Hospital.Name),]
}
if(num == "best") {
result <- as.character(sortedDf[1,][2])
} else if(num == "worst") {
result <- as.character(sortedDf[nrow(sortedDf),][2])
} else {
numRank <- as.numeric(num)
if(numRank > nrow(sortedDf)) {
return (NA)
} else {
result <- as.character(sortedDf[numRank,][2])
}
}
result
}
|
/rankhospital.R
|
no_license
|
abhinavg6/DataScience-RProg-Project
|
R
| false
| false
| 2,366
|
r
|
rankhospital <- function(state, outcome, num = "best") {
outcomeDf <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
stateVect <- unique(outcomeDf$State)
outcomeVect = c("heart attack","heart failure","pneumonia")
if(!(state %in% stateVect)) {
stop("invalid state")
}
if(!(outcome %in% outcomeVect)) {
stop("invalid outcome")
}
outcomeDfRows <- nrow(outcomeDf[outcomeDf$State == state,])
if(num != "best" && num != "worst") {
if(as.numeric(num) > outcomeDfRows) {
return (NA)
}
}
result <- character(1)
if(outcome == outcomeVect[1]) {
outcomeStateDf <- outcomeDf[outcomeDf$State == state &
outcomeDf$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack != "Not Available",]
sortedDf <- outcomeStateDf[order(as.numeric(outcomeStateDf$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)
, outcomeStateDf$Hospital.Name),]
} else if(outcome == outcomeVect[2]) {
outcomeStateDf <- outcomeDf[outcomeDf$State == state &
outcomeDf$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure != "Not Available",]
sortedDf <- outcomeStateDf[order(as.numeric(outcomeStateDf$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)
, outcomeStateDf$Hospital.Name),]
} else if(outcome == outcomeVect[3]) {
outcomeStateDf <- outcomeDf[outcomeDf$State == state &
outcomeDf$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia != "Not Available",]
sortedDf <- outcomeStateDf[order(as.numeric(outcomeStateDf$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)
, outcomeStateDf$Hospital.Name),]
}
if(num == "best") {
result <- as.character(sortedDf[1,][2])
} else if(num == "worst") {
result <- as.character(sortedDf[nrow(sortedDf),][2])
} else {
numRank <- as.numeric(num)
if(numRank > nrow(sortedDf)) {
return (NA)
} else {
result <- as.character(sortedDf[numRank,][2])
}
}
result
}
|
args = commandArgs(trailingOnly=TRUE)
setwd("c:/scripts/")
setwd(args[1])
source("run.R",encoding = "UTF-8")
|
/scripts/sql_wrapper.R
|
no_license
|
danielfm123/sqlsaturday2018_conecting_sql_and_r
|
R
| false
| false
| 115
|
r
|
args = commandArgs(trailingOnly=TRUE)
setwd("c:/scripts/")
setwd(args[1])
source("run.R",encoding = "UTF-8")
|
###### 樂透推薦自動化流程
#setwd("E:/LBH/Dropbox/GitHub/Lottery/")
source("lotteryAnalysis.R",encoding="UTF-8")
######################################################
#autoAnalysisProcess(crawler = T)
######################################################
temp1_1 <- sample(1:49,6)
temp1_2 <- historyRecordFN49(examineResult = temp1_1)
temp1_3 <- historyRecordCombo3FN49(examineResult = temp1_1,historyRecord = temp1_2 )
######################################################
### 按現實情況的機率分布,隨機抽六個號碼,並與過去歷史紀錄作驗證
temp2_1 <- chooseBall49FN()
temp2_2 <- historyRecordFN49(examineResult = temp2_1)
temp2_3 <- historyRecordCombo3FN49(examineResult = temp2_1,historyRecord = temp2_2)
######################################################
### 重新生成部分矩陣,內積生成推薦矩陣
temp3_1 <- partialMatrix49FN(chooseBall49FN(c(2,3,4,5)))
temp3_2 <- recommendMatrix49FN(recommendMatrix = temp3_1 )
temp3_3 <- recommendResultFN49(recommendResult = temp3_2,score=105)
temp3_4 <- historyRecordFN49(examineResult = temp3_3[,1])
temp3_5 <- historyRecordCombo3FN49(examineResult = temp3_3[,1],historyRecord = temp3_4 )
######################################################
firstBall <- chooseBall49FN(1)
#itemMatrix49 <- itemMatrix49FN(,terms=c(3,4))
recommendMX49 <- partialMatrix49FN(firstBall)
recommendResult49<- recommendMatrix49FN()
finalResult <- recommendResultFN49(score=0)
# step2
#itemMatrix49 <- itemMatrix49FN(,terms=c(4,5))
recommendMX49 <- partialMatrix49FN(finalResult[c(1,2),1])
recommendResult49<- recommendMatrix49FN()
finalResult <- recommendResultFN49(score=0,reserve=finalResult[c(1,2),1])
# step3
#itemMatrix49 <- itemMatrix49FN(,terms=c(5,6))
recommendMX49 <- partialMatrix49FN(finalResult[c(1,2,3),1])
recommendResult49<- recommendMatrix49FN()
finalResult <- recommendResultFN49(score=0,reserve=finalResult[c(1,2,3),1])
# step4
#itemMatrix49 <- itemMatrix49FN(,terms=c(6,7))
recommendMX49 <- partialMatrix49FN(finalResult[c(1,2,3,4),1])
recommendResult49<- recommendMatrix49FN()
finalResult <- recommendResultFN49(score=0,reserve=finalResult[c(1,2,3,4),1])
# step5
#itemMatrix49 <- itemMatrix49FN(,terms=c(7,8))
recommendMX49 <- partialMatrix49FN(finalResult[c(1,2,3,4,5),1])
recommendResult49<- recommendMatrix49FN()
finalResult <- recommendResultFN49(score=0,,reserve=finalResult[c(1,2,3,4,5),1])
temp1 <- finalResult
|
/AutoLotteryAnalysis.R
|
no_license
|
BingHongLi/LotteryRecomm
|
R
| false
| false
| 2,432
|
r
|
###### 樂透推薦自動化流程
#setwd("E:/LBH/Dropbox/GitHub/Lottery/")
source("lotteryAnalysis.R",encoding="UTF-8")
######################################################
#autoAnalysisProcess(crawler = T)
######################################################
temp1_1 <- sample(1:49,6)
temp1_2 <- historyRecordFN49(examineResult = temp1_1)
temp1_3 <- historyRecordCombo3FN49(examineResult = temp1_1,historyRecord = temp1_2 )
######################################################
### 按現實情況的機率分布,隨機抽六個號碼,並與過去歷史紀錄作驗證
temp2_1 <- chooseBall49FN()
temp2_2 <- historyRecordFN49(examineResult = temp2_1)
temp2_3 <- historyRecordCombo3FN49(examineResult = temp2_1,historyRecord = temp2_2)
######################################################
### 重新生成部分矩陣,內積生成推薦矩陣
temp3_1 <- partialMatrix49FN(chooseBall49FN(c(2,3,4,5)))
temp3_2 <- recommendMatrix49FN(recommendMatrix = temp3_1 )
temp3_3 <- recommendResultFN49(recommendResult = temp3_2,score=105)
temp3_4 <- historyRecordFN49(examineResult = temp3_3[,1])
temp3_5 <- historyRecordCombo3FN49(examineResult = temp3_3[,1],historyRecord = temp3_4 )
######################################################
firstBall <- chooseBall49FN(1)
#itemMatrix49 <- itemMatrix49FN(,terms=c(3,4))
recommendMX49 <- partialMatrix49FN(firstBall)
recommendResult49<- recommendMatrix49FN()
finalResult <- recommendResultFN49(score=0)
# step2
#itemMatrix49 <- itemMatrix49FN(,terms=c(4,5))
recommendMX49 <- partialMatrix49FN(finalResult[c(1,2),1])
recommendResult49<- recommendMatrix49FN()
finalResult <- recommendResultFN49(score=0,reserve=finalResult[c(1,2),1])
# step3
#itemMatrix49 <- itemMatrix49FN(,terms=c(5,6))
recommendMX49 <- partialMatrix49FN(finalResult[c(1,2,3),1])
recommendResult49<- recommendMatrix49FN()
finalResult <- recommendResultFN49(score=0,reserve=finalResult[c(1,2,3),1])
# step4
#itemMatrix49 <- itemMatrix49FN(,terms=c(6,7))
recommendMX49 <- partialMatrix49FN(finalResult[c(1,2,3,4),1])
recommendResult49<- recommendMatrix49FN()
finalResult <- recommendResultFN49(score=0,reserve=finalResult[c(1,2,3,4),1])
# step5
#itemMatrix49 <- itemMatrix49FN(,terms=c(7,8))
recommendMX49 <- partialMatrix49FN(finalResult[c(1,2,3,4,5),1])
recommendResult49<- recommendMatrix49FN()
finalResult <- recommendResultFN49(score=0,,reserve=finalResult[c(1,2,3,4,5),1])
temp1 <- finalResult
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/flashmatrix.R
\name{fm.set.conf}
\alias{fm.set.conf}
\title{Reconfigure FlashMatrix}
\usage{
fm.set.conf(conf.file)
}
\arguments{
\item{conf.file}{The configuration file.}
}
\description{
This reconfigures FlashMatrix with the settings in the configuration file.
The configuration file contains a list of key-value pairs. Each line in
the file is a key-value pair in the form of "key_name=value".
}
\author{
Da Zheng <dzheng5@jhu.edu>
}
|
/Rpkg/man/fm.set.conf.Rd
|
permissive
|
zheng-da/FlashX
|
R
| false
| false
| 524
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/flashmatrix.R
\name{fm.set.conf}
\alias{fm.set.conf}
\title{Reconfigure FlashMatrix}
\usage{
fm.set.conf(conf.file)
}
\arguments{
\item{conf.file}{The configuration file.}
}
\description{
This reconfigures FlashMatrix with the settings in the configuration file.
The configuration file contains a list of key-value pairs. Each line in
the file is a key-value pair in the form of "key_name=value".
}
\author{
Da Zheng <dzheng5@jhu.edu>
}
|
#' Do PCA for a single geneSet based on bootstraps by genes
#'
#' It'll random choose the same number of gene with the size of this geneSet to extract the expression matrix and then do PCA for this small expression matrix .
#'
#'
#'
#' @param prefix The prefix for all of the output files.( we don't need it now actually,just placeholder)
#' @param exprSet Matrix for microarray expression values,rownames must be genes, colnames must be samples
#' @param geneSet_list A list which contains all of the vector for each geneSet.
#' @param n Times for random permuted or bootstraps by genes,default:1000
#' @return A numeric vector of P value for each PCA test about all of the geneSet .
#' @export
#' @keywords PCA
#' @examples
#' PCA_gene_multiple(exprSet=exprSet, geneSet_list=geneSet_list)
PCA_gene_multiple <- function(prefix='test',exprSet, geneSet_list, n=1000) {
if( F ){
library(CLL)
data(sCLLex)
suppressMessages(library(limma))
exprSet = exprs(sCLLex)
pdata=pData(sCLLex)
group_list = pdata$Disease
geneSet_list <- list(set1 = sample(rownames(exprSet),50),
set2 = sample(rownames(exprSet),100),
set3 = sample(rownames(exprSet),150)
)
}
p <- unlist(lapply(geneSet_list, function(this_geneSet){
PCA_gene_single(exprSet=exprSet,group_list=group_list, this_geneSet=this_geneSet)
}))
size <- unlist(lapply(geneSet_list, function(this_geneSet){
length(this_geneSet)
}))
return( data.frame(geneSet_name = names(geneSet_list),
p = p,
size = size
)
)
}
|
/R/PCA_gene_multiple.R
|
no_license
|
y461650833y/geneSet
|
R
| false
| false
| 1,666
|
r
|
#' Do PCA for a single geneSet based on bootstraps by genes
#'
#' It'll random choose the same number of gene with the size of this geneSet to extract the expression matrix and then do PCA for this small expression matrix .
#'
#'
#'
#' @param prefix The prefix for all of the output files.( we don't need it now actually,just placeholder)
#' @param exprSet Matrix for microarray expression values,rownames must be genes, colnames must be samples
#' @param geneSet_list A list which contains all of the vector for each geneSet.
#' @param n Times for random permuted or bootstraps by genes,default:1000
#' @return A numeric vector of P value for each PCA test about all of the geneSet .
#' @export
#' @keywords PCA
#' @examples
#' PCA_gene_multiple(exprSet=exprSet, geneSet_list=geneSet_list)
PCA_gene_multiple <- function(prefix='test',exprSet, geneSet_list, n=1000) {
if( F ){
library(CLL)
data(sCLLex)
suppressMessages(library(limma))
exprSet = exprs(sCLLex)
pdata=pData(sCLLex)
group_list = pdata$Disease
geneSet_list <- list(set1 = sample(rownames(exprSet),50),
set2 = sample(rownames(exprSet),100),
set3 = sample(rownames(exprSet),150)
)
}
p <- unlist(lapply(geneSet_list, function(this_geneSet){
PCA_gene_single(exprSet=exprSet,group_list=group_list, this_geneSet=this_geneSet)
}))
size <- unlist(lapply(geneSet_list, function(this_geneSet){
length(this_geneSet)
}))
return( data.frame(geneSet_name = names(geneSet_list),
p = p,
size = size
)
)
}
|
scc <- readRDS("Source_Classification_Code.rds")
mydata <- readRDS("summarySCC_PM25.rds")
str(mydata)
str(scc)
head(scc)
head(mydata)
mtcars
plot(mtcars$cyl, mtcars$mpg, type = "l")
?plot
NEI <- readRDS("summarySCC_PM25.rds")
names(NEI)
aggdata <- aggregate(NEI$Emissions, list(year = NEI$year), sum)
data <- ChickWeight
head(data)
length(unique(data$Chick))
length(unique(data$Time))
aggdata <- aggregate(data$weight, list(year = data$Time), mean)
aggdata
plot(aggdata$year, aggdata$x, type = "b")
boxplot(aggdata$x)
plot(density(aggdata$x))
hist(aggdata$x)
?barplot
barplot(aggdata$x, names = aggdata$year)
summary(aggdata$weight)
is.na(aggdata$x)
boxplot(mtcars$mpg)
library(ggplot2)
aggdata$weight)
aggdata
library(ggplot)
plot1 <- ggplot(data = aggdata, aes(y = aggdata$x, x = aggdata$year)) +
geom_bar(stat = "identity")
plot1
chick1 <- data[which(data$Chick == 1), ]
chick1 <- data[which(data$Chick == 1 | 2 | 3, ]
chicklist <- subset(data, data$Chick < 5)
z <- which(data$Chick == 8 | data$Chick == 9)
chicklist2 <-
str(data)
str(data$Chick)
data$Chick <- as.numeric(data$Chick)
?as.numeric
unique(data$Chick)
head(airmiles)
airmiles
str(data)
c1 <- subset(data, data$Chick < "10")
c1
data <- data.frame(data)
str(mtcars)
mtcars
mtcars2 <- subset(mtcars, mtcars$mpg > 1 & mtcars$mpg < 20)
data2 <- subset(data, data$Chick > 0 & data$Chick < 10)
g <- ggplot(data = c1, aes(x = Time, y = weight,
color = Chick)) + geom_point() + geom_line()
g + facet_grid(Diet ~ .)
barplot(c1$weight)
mtcars
mtcars$type <- rownames(mtcars)
rownames(mtcars)
ggplot(data = subset(mtcars, mtcars$type == "Valiant" | mtcars$type == "Fiat 128"),
aes(x = cyl, y = mpg)) + geom_bar(stat = "identity") + facet_grid(type ~ .)
str(mtcars$type)
test <- subset(mtcars, mtcars$type == "Valiant" | mtcars$type == "Fiat 128")
test
aggdata2 <- aggregate(data$weight, list(year = data$Time, chick = data$Chick, ))
|
/exploratory_analysis/project2scratchpad.R
|
no_license
|
sdevine188/coursera_code
|
R
| false
| false
| 1,921
|
r
|
scc <- readRDS("Source_Classification_Code.rds")
mydata <- readRDS("summarySCC_PM25.rds")
str(mydata)
str(scc)
head(scc)
head(mydata)
mtcars
plot(mtcars$cyl, mtcars$mpg, type = "l")
?plot
NEI <- readRDS("summarySCC_PM25.rds")
names(NEI)
aggdata <- aggregate(NEI$Emissions, list(year = NEI$year), sum)
data <- ChickWeight
head(data)
length(unique(data$Chick))
length(unique(data$Time))
aggdata <- aggregate(data$weight, list(year = data$Time), mean)
aggdata
plot(aggdata$year, aggdata$x, type = "b")
boxplot(aggdata$x)
plot(density(aggdata$x))
hist(aggdata$x)
?barplot
barplot(aggdata$x, names = aggdata$year)
summary(aggdata$weight)
is.na(aggdata$x)
boxplot(mtcars$mpg)
library(ggplot2)
aggdata$weight)
aggdata
library(ggplot)
plot1 <- ggplot(data = aggdata, aes(y = aggdata$x, x = aggdata$year)) +
geom_bar(stat = "identity")
plot1
chick1 <- data[which(data$Chick == 1), ]
chick1 <- data[which(data$Chick == 1 | 2 | 3, ]
chicklist <- subset(data, data$Chick < 5)
z <- which(data$Chick == 8 | data$Chick == 9)
chicklist2 <-
str(data)
str(data$Chick)
data$Chick <- as.numeric(data$Chick)
?as.numeric
unique(data$Chick)
head(airmiles)
airmiles
str(data)
c1 <- subset(data, data$Chick < "10")
c1
data <- data.frame(data)
str(mtcars)
mtcars
mtcars2 <- subset(mtcars, mtcars$mpg > 1 & mtcars$mpg < 20)
data2 <- subset(data, data$Chick > 0 & data$Chick < 10)
g <- ggplot(data = c1, aes(x = Time, y = weight,
color = Chick)) + geom_point() + geom_line()
g + facet_grid(Diet ~ .)
barplot(c1$weight)
mtcars
mtcars$type <- rownames(mtcars)
rownames(mtcars)
ggplot(data = subset(mtcars, mtcars$type == "Valiant" | mtcars$type == "Fiat 128"),
aes(x = cyl, y = mpg)) + geom_bar(stat = "identity") + facet_grid(type ~ .)
str(mtcars$type)
test <- subset(mtcars, mtcars$type == "Valiant" | mtcars$type == "Fiat 128")
test
aggdata2 <- aggregate(data$weight, list(year = data$Time, chick = data$Chick, ))
|
#' gscraper - Basic web scraping to generate local cache of remote website
#'
#' @name gscraper
#' @docType package
NULL
|
/R/gscraper-package.r
|
no_license
|
jefferis/gscraper
|
R
| false
| false
| 121
|
r
|
#' gscraper - Basic web scraping to generate local cache of remote website
#'
#' @name gscraper
#' @docType package
NULL
|
#######################################################################
## 연습문제: 변수리코딩
# 데이터 불러오기
library('tidyverse')
library('haven')
library('readxl')
setwd("D:/TidyData/data")
gss_panel = read_dta("data_gss_panel06.dta")
# 변수변환_문제_1: 아이가 있는지 아니면 아무도 없는지?
gss_panel %>%
mutate(
child_atleast2=ifelse(childs_1 < 2,0,1)
) %>% count(child_atleast2)
# 변수변환_문제_2: 인종구분(백인 vs. 비백인)
gss_panel %>%
mutate(
nowhite_1=ifelse(race_1==1,0,1)
) %>% count(nowhite_1)
# 변수변환_문제_3: cut() 함수
count(gss_panel,as_factor(relactiv_1))
gss_panel %>%
mutate(
religiosity4=cut(relactiv_1,c(0,1,4,6,10),
c('none','year','month','week'))
) %>% count(religiosity4)
# 변수변환_문제_4:
gss_panel %>%
mutate(
nowhite_1=ifelse(race_1==1,0,1),
child_4group=cut_interval(childs_1,n=4)
) %>%
count(nowhite_1,child_4group) %>%
drop_na()
# 변수변환_문제_5
count(gss_panel, as_factor(caremost_1))
gss_panel %>%
mutate(
global_warming_concern=fct_collapse(as.character(caremost_1),
"climate"=c("2","5"),
"animals"=c("1","3"),
"Inuit"="4")
) %>% count(global_warming_concern)
# 변수변환_문제_6
# 데이터 불러오기
data_foreign_aid = read_xlsx("data_foreign_aid.xlsx")
# 조건에 맞도록 총 개발지원액 변수들 리코딩
data_foreign_aid2 = data_foreign_aid %>%
mutate(
total_development_aid=str_replace(total_development_aid,"\\$",""),
development_aid_per_capita=as.double(str_replace(development_aid_per_capita,"\\$","")),
GDP_percent=as.double(GDP_percent)
) %>%
separate(total_development_aid,c("total_development_aid","char"),sep=" ")
# 어떤 금액단위가 쓰였는지 확인
count(data_foreign_aid2,char)
# 최종 마무리
data_foreign_aid2 = data_foreign_aid2 %>%
mutate(
total_development_aid=as.double(total_development_aid) * 10^6
) %>% select(-char)
data_foreign_aid2
# 변수변환_문제_7: 개인함수 이용 리코딩
# 결측값 처리후 역코딩(예를 들어 1-> 7, 7->1 과 같이)
data_131 = read_spss("data_TESS3_131.sav")
reverse_coding=function(myvariable){
myvariable=ifelse(myvariable >=1 & myvariable <= 7,
myvariable,NA)
myvariable=(8-myvariable)
}
data_131 = data_131 %>%
mutate(
Q1r=Q1,Q2r=Q2,Q3r=Q3
) %>%
mutate_at(
vars(Q1r,Q2r,Q3r),
funs(reverse_coding(.))
)
count(data_131,Q1,Q1r)
count(data_131,Q2,Q2r)
count(data_131,Q3,Q3r)
# 변수변환_문제_8
# 결측값 처리후 강도(strength) 변수로 리코딩
make_strength_variable=function(myvariable){
myvariable=ifelse(myvariable >=1 & myvariable <= 7,
myvariable,NA)
myvariable=abs(myvariable-4)
}
data_131 = data_131 %>%
mutate(
Q1s=Q1,Q2s=Q2,Q3s=Q3
) %>%
mutate_at(
vars(Q1s,Q2s,Q3s),
funs(make_strength_variable(.))
)
count(data_131,Q1,Q1s)
count(data_131,Q2,Q2s)
count(data_131,Q3,Q3s)
|
/data/tidyverse_practice_2_2_2_variable_recoding.R
|
no_license
|
harryyang1982/ds_with_tidyverse
|
R
| false
| false
| 3,239
|
r
|
#######################################################################
## 연습문제: 변수리코딩
# 데이터 불러오기
library('tidyverse')
library('haven')
library('readxl')
setwd("D:/TidyData/data")
gss_panel = read_dta("data_gss_panel06.dta")
# 변수변환_문제_1: 아이가 있는지 아니면 아무도 없는지?
gss_panel %>%
mutate(
child_atleast2=ifelse(childs_1 < 2,0,1)
) %>% count(child_atleast2)
# 변수변환_문제_2: 인종구분(백인 vs. 비백인)
gss_panel %>%
mutate(
nowhite_1=ifelse(race_1==1,0,1)
) %>% count(nowhite_1)
# 변수변환_문제_3: cut() 함수
count(gss_panel,as_factor(relactiv_1))
gss_panel %>%
mutate(
religiosity4=cut(relactiv_1,c(0,1,4,6,10),
c('none','year','month','week'))
) %>% count(religiosity4)
# 변수변환_문제_4:
gss_panel %>%
mutate(
nowhite_1=ifelse(race_1==1,0,1),
child_4group=cut_interval(childs_1,n=4)
) %>%
count(nowhite_1,child_4group) %>%
drop_na()
# 변수변환_문제_5
count(gss_panel, as_factor(caremost_1))
gss_panel %>%
mutate(
global_warming_concern=fct_collapse(as.character(caremost_1),
"climate"=c("2","5"),
"animals"=c("1","3"),
"Inuit"="4")
) %>% count(global_warming_concern)
# 변수변환_문제_6
# 데이터 불러오기
data_foreign_aid = read_xlsx("data_foreign_aid.xlsx")
# 조건에 맞도록 총 개발지원액 변수들 리코딩
data_foreign_aid2 = data_foreign_aid %>%
mutate(
total_development_aid=str_replace(total_development_aid,"\\$",""),
development_aid_per_capita=as.double(str_replace(development_aid_per_capita,"\\$","")),
GDP_percent=as.double(GDP_percent)
) %>%
separate(total_development_aid,c("total_development_aid","char"),sep=" ")
# 어떤 금액단위가 쓰였는지 확인
count(data_foreign_aid2,char)
# 최종 마무리
data_foreign_aid2 = data_foreign_aid2 %>%
mutate(
total_development_aid=as.double(total_development_aid) * 10^6
) %>% select(-char)
data_foreign_aid2
# 변수변환_문제_7: 개인함수 이용 리코딩
# 결측값 처리후 역코딩(예를 들어 1-> 7, 7->1 과 같이)
data_131 = read_spss("data_TESS3_131.sav")
reverse_coding=function(myvariable){
myvariable=ifelse(myvariable >=1 & myvariable <= 7,
myvariable,NA)
myvariable=(8-myvariable)
}
data_131 = data_131 %>%
mutate(
Q1r=Q1,Q2r=Q2,Q3r=Q3
) %>%
mutate_at(
vars(Q1r,Q2r,Q3r),
funs(reverse_coding(.))
)
count(data_131,Q1,Q1r)
count(data_131,Q2,Q2r)
count(data_131,Q3,Q3r)
# 변수변환_문제_8
# 결측값 처리후 강도(strength) 변수로 리코딩
make_strength_variable=function(myvariable){
myvariable=ifelse(myvariable >=1 & myvariable <= 7,
myvariable,NA)
myvariable=abs(myvariable-4)
}
data_131 = data_131 %>%
mutate(
Q1s=Q1,Q2s=Q2,Q3s=Q3
) %>%
mutate_at(
vars(Q1s,Q2s,Q3s),
funs(make_strength_variable(.))
)
count(data_131,Q1,Q1s)
count(data_131,Q2,Q2s)
count(data_131,Q3,Q3s)
|
testlist <- list(end = NULL, start = NULL, x = structure(c(4.65661649758392e-10, 1.37982776272053e-309, 2.32903286132618e+96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L)))
result <- do.call(dynutils::project_to_segments,testlist)
str(result)
|
/dynutils/inst/testfiles/project_to_segments/AFL_project_to_segments/project_to_segments_valgrind_files/1609871305-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 409
|
r
|
testlist <- list(end = NULL, start = NULL, x = structure(c(4.65661649758392e-10, 1.37982776272053e-309, 2.32903286132618e+96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L)))
result <- do.call(dynutils::project_to_segments,testlist)
str(result)
|
#library(tidyverse)
#library(showtext) # 글꼴, install.packages("showtext")
#library(extrafont) # install.packages("extrafont")
#font_import(prompt = F, pattern = "D2")
#loadfonts(quiet = F)
par(family = "AppleGothic")
# report 1
kings = read.table("datavis/chosun\ kings.txt", header=T)
P <- cumsum(kings$Life)
plot(1:27, P, type="n", xlab="Order", ylab="Accumlated Life",
main="Chosun Dynasty")
polygon(c(0,0,1,1), c(0,P[1],P[1],0))
for(i in 2:27) {
polygon(c(i-1,i-1,i,i), c(P[i-1], P[i], P[i], P[i-1]),
col=rainbow(27)[i])
}
segments(0, 0, 27, 1243, lty="dotted", lwd=2, col="darkgreen")
# report 2
# https://www.knou.ac.kr/knou/pbre/EHPSchlSong.jsp
#install.packages(c("tm","wordcloud"))
#install.packages("rJava")
#install.packages("KoNLP")
library(tm)
library(wordcloud)
library(rJava)
library(KoNLP)
par(family="Gulim")
ktext = Corpus(DirSource("datavis/gyoga/",
encoding="UTF-8", recursive = T))
words1 = unlist(sapply(ktext[[1]]$content, extractNoun, USE.NAMES = F))
words1freq = table(words1)
words1freq <- words1freq[!(names(words1freq)
%in% c("곳", "교", "리", "속", "앞",
"한"))]
sort(words1freq, decreasing=T)[1:12]
wordcloud(names(words1freq), freq=words1freq, max.words=50)
# report 3
data(Titanic)
mosaicplot(~ Class+Survived, data=Titanic, color=c("grey", "red"))
# report 4
#install.packages("sp")
library(sp)
#gadm = readRDS("datavis/gadm36_KOR_0_sp.rds")
gadm0 = readRDS("datavis/KOR_adm0.rds")
plot(gadm0)
gadm2 = readRDS("datavis/KOR_adm2.rds")
seoul = gadm2[gadm2$NAME_1=="Seoul",]
plot(seoul)
gadm1 = readRDS("datavis/KOR_adm1.rds")
seoul = gadm1[gadm1$NAME_1 == "Seoul",]
plot(seoul, col="green")
library(sp)
gadm1 = readRDS("datavis/KOR_adm1.rds")
plot(gadm1, col="grey")
pollution = read.table("datavis/pollution.txt", header=T)
pollution$width = 2/5
pollution$height = 0.1
pollution$space = 0.1
spaceDif = 0.05
# draw a point on city
for (i in 1:dim(pollution)[1]) {
coords = SpatialPoints(data.frame(
cbind(pollution$경도[i], pollution$위도[i]))
, proj4string = CRS("+proj=longlat"))
plot(coords, col = "red3", pch = 20, cex = 1.5, add = T)
}
# draw a rectangle for text
for (i in 1:dim(pollution)[1]) {
a <- c(pollution$경도[i] - pollution$width[i],
pollution$경도[i] + pollution$width[i],
pollution$경도[i] + pollution$width[i],
pollution$경도[i] - pollution$width[i])
b <- c(pollution$위도[i] + pollution$space[i]
- pollution$height[i] + spaceDif,
pollution$위도[i] + pollution$space[i]
- pollution$height[i] + spaceDif,
pollution$위도[i] + pollution$space[i]
+ pollution$height[i] + spaceDif,
pollution$위도[i] + pollution$space[i]
+ pollution$height[i] + spaceDif)
polygon(x=a, y=b, col="white")
}
library(stringr)
cityLabels <- str_c(pollution$시도, pollution$미세먼지농도)
cityCoord <- matrix(c(t(pollution$경도),
t(pollution$위도 + pollution$space
+ spaceDif)),
dim(pollution)[1])
text(cityCoord, labels = cityLabels, cex = 0.6, bg="white")
text(128, 38.6, labels = "도시별 미세먼지농도", cex = 2)
|
/R/datavis.r.R
|
permissive
|
tolkien/misc
|
R
| false
| false
| 3,271
|
r
|
#library(tidyverse)
#library(showtext) # 글꼴, install.packages("showtext")
#library(extrafont) # install.packages("extrafont")
#font_import(prompt = F, pattern = "D2")
#loadfonts(quiet = F)
par(family = "AppleGothic")
# report 1
kings = read.table("datavis/chosun\ kings.txt", header=T)
P <- cumsum(kings$Life)
plot(1:27, P, type="n", xlab="Order", ylab="Accumlated Life",
main="Chosun Dynasty")
polygon(c(0,0,1,1), c(0,P[1],P[1],0))
for(i in 2:27) {
polygon(c(i-1,i-1,i,i), c(P[i-1], P[i], P[i], P[i-1]),
col=rainbow(27)[i])
}
segments(0, 0, 27, 1243, lty="dotted", lwd=2, col="darkgreen")
# report 2
# https://www.knou.ac.kr/knou/pbre/EHPSchlSong.jsp
#install.packages(c("tm","wordcloud"))
#install.packages("rJava")
#install.packages("KoNLP")
library(tm)
library(wordcloud)
library(rJava)
library(KoNLP)
par(family="Gulim")
ktext = Corpus(DirSource("datavis/gyoga/",
encoding="UTF-8", recursive = T))
words1 = unlist(sapply(ktext[[1]]$content, extractNoun, USE.NAMES = F))
words1freq = table(words1)
words1freq <- words1freq[!(names(words1freq)
%in% c("곳", "교", "리", "속", "앞",
"한"))]
sort(words1freq, decreasing=T)[1:12]
wordcloud(names(words1freq), freq=words1freq, max.words=50)
# report 3
data(Titanic)
mosaicplot(~ Class+Survived, data=Titanic, color=c("grey", "red"))
# report 4
#install.packages("sp")
library(sp)
#gadm = readRDS("datavis/gadm36_KOR_0_sp.rds")
gadm0 = readRDS("datavis/KOR_adm0.rds")
plot(gadm0)
gadm2 = readRDS("datavis/KOR_adm2.rds")
seoul = gadm2[gadm2$NAME_1=="Seoul",]
plot(seoul)
gadm1 = readRDS("datavis/KOR_adm1.rds")
seoul = gadm1[gadm1$NAME_1 == "Seoul",]
plot(seoul, col="green")
library(sp)
gadm1 = readRDS("datavis/KOR_adm1.rds")
plot(gadm1, col="grey")
pollution = read.table("datavis/pollution.txt", header=T)
pollution$width = 2/5
pollution$height = 0.1
pollution$space = 0.1
spaceDif = 0.05
# draw a point on city
for (i in 1:dim(pollution)[1]) {
coords = SpatialPoints(data.frame(
cbind(pollution$경도[i], pollution$위도[i]))
, proj4string = CRS("+proj=longlat"))
plot(coords, col = "red3", pch = 20, cex = 1.5, add = T)
}
# draw a rectangle for text
for (i in 1:dim(pollution)[1]) {
a <- c(pollution$경도[i] - pollution$width[i],
pollution$경도[i] + pollution$width[i],
pollution$경도[i] + pollution$width[i],
pollution$경도[i] - pollution$width[i])
b <- c(pollution$위도[i] + pollution$space[i]
- pollution$height[i] + spaceDif,
pollution$위도[i] + pollution$space[i]
- pollution$height[i] + spaceDif,
pollution$위도[i] + pollution$space[i]
+ pollution$height[i] + spaceDif,
pollution$위도[i] + pollution$space[i]
+ pollution$height[i] + spaceDif)
polygon(x=a, y=b, col="white")
}
library(stringr)
cityLabels <- str_c(pollution$시도, pollution$미세먼지농도)
cityCoord <- matrix(c(t(pollution$경도),
t(pollution$위도 + pollution$space
+ spaceDif)),
dim(pollution)[1])
text(cityCoord, labels = cityLabels, cex = 0.6, bg="white")
text(128, 38.6, labels = "도시별 미세먼지농도", cex = 2)
|
## Take the sampled data from the No Prof data, and further divide it into a training set (train_data20minus) and a .5% HeldOff data (test_data20)
con.FileNames <- list.files(path = "/Users/saurabh/Desktop/Data Science/Capstone/final/en_US_NoProf") # list of files in the clean corpus
r <- length(con.FileNames) # number of text files in the clean corpus
testfiles2write <- character(r)
trainfiles2write <- character(r)
for (i in 1:r){
# name of the test and train files where the data will be written
testfile2write <- paste0("/Users/saurabh/Desktop/Data Science/Capstone/final/test_data5/test.", con.FileNames[i], sep = NULL)
trainfile2write <- paste0("/Users/saurabh/Desktop/Data Science/Capstone/final/train_data5minus/train.", con.FileNames[i], sep = NULL)
# open connections to write the files
conTest <- file(testfile2write, open = "wt")
conTrain <- file(trainfile2write, open = "wt")
file2read <- paste0("/Users/saurabh/Desktop/Data Science/Capstone/final/train_data5/train.", con.FileNames[i], sep = NULL)
#con.file2read <- file(file2read)
textdata <- readLines(file2read)
l <- length(textdata)
sample.indicies <- sample.int(l, size = l*.02)
test.subset <- textdata[sample.indicies]
newtrain.set <- textdata[-sample.indicies]
writeLines(test.subset, conTest)
writeLines(newtrain.set, conTrain)
close(conTest); close(conTrain) # close connections
}
|
/SampleHeldOff.R
|
no_license
|
soniasharma/Laguage-Model---Word-Prediction
|
R
| false
| false
| 1,451
|
r
|
## Take the sampled data from the No Prof data, and further divide it into a training set (train_data20minus) and a .5% HeldOff data (test_data20)
con.FileNames <- list.files(path = "/Users/saurabh/Desktop/Data Science/Capstone/final/en_US_NoProf") # list of files in the clean corpus
r <- length(con.FileNames) # number of text files in the clean corpus
testfiles2write <- character(r)
trainfiles2write <- character(r)
for (i in 1:r){
# name of the test and train files where the data will be written
testfile2write <- paste0("/Users/saurabh/Desktop/Data Science/Capstone/final/test_data5/test.", con.FileNames[i], sep = NULL)
trainfile2write <- paste0("/Users/saurabh/Desktop/Data Science/Capstone/final/train_data5minus/train.", con.FileNames[i], sep = NULL)
# open connections to write the files
conTest <- file(testfile2write, open = "wt")
conTrain <- file(trainfile2write, open = "wt")
file2read <- paste0("/Users/saurabh/Desktop/Data Science/Capstone/final/train_data5/train.", con.FileNames[i], sep = NULL)
#con.file2read <- file(file2read)
textdata <- readLines(file2read)
l <- length(textdata)
sample.indicies <- sample.int(l, size = l*.02)
test.subset <- textdata[sample.indicies]
newtrain.set <- textdata[-sample.indicies]
writeLines(test.subset, conTest)
writeLines(newtrain.set, conTrain)
close(conTest); close(conTrain) # close connections
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Meshfns.R
\name{convert_INLA_mesh_MVST}
\alias{convert_INLA_mesh_MVST}
\title{Convert INLA mesh object MVST FEM class}
\usage{
convert_INLA_mesh_MVST(mesh)
}
\arguments{
\item{mesh}{The INLA mesh to convert (inla.mesh))}
}
\value{
MVST finite element mesh object
}
\description{
The MVST R package requires the FEM mesh components to be in a different object class
to that of 'inla.mesh'. This function converts the 'inla.mesh' class objects to required
MVST class, without export of the mesh to disk.
}
|
/man/convert_INLA_mesh_MVST.Rd
|
permissive
|
andrewzm/MVST
|
R
| false
| true
| 584
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Meshfns.R
\name{convert_INLA_mesh_MVST}
\alias{convert_INLA_mesh_MVST}
\title{Convert INLA mesh object MVST FEM class}
\usage{
convert_INLA_mesh_MVST(mesh)
}
\arguments{
\item{mesh}{The INLA mesh to convert (inla.mesh))}
}
\value{
MVST finite element mesh object
}
\description{
The MVST R package requires the FEM mesh components to be in a different object class
to that of 'inla.mesh'. This function converts the 'inla.mesh' class objects to required
MVST class, without export of the mesh to disk.
}
|
##################################################################################
# Create data for Nashville OD Data Visulization using ActivityViz
# Author: Aditya Gore
##################################################################################
#### Sections
# 1. Load/Install required packages
# 2. Define Constants
# 3. Load required databases
# 4. Create output data
# 4a. Passive Data Scenario
# 5. Write output data
### Load/Install required packages ###############################################
##################################################################################
library(data.table)
library(jsonlite)
library(stringr)
library(sf)
library(geojsonsf)
library(omxr)
library(rmapshaper)
library(tigris)
library(tidyverse)
### Define Constants #############################################################
##################################################################################
# Input files
data_dir = file.path(getwd(), "data")
od_AM_mx_file = file.path(data_dir, "raw_data", "ODME_AM_i7.csv")
od_MD_mx_file = file.path(data_dir, "raw_data", "ODME_MD_i7.csv")
od_PM_mx_file = file.path(data_dir, "raw_data", "ODME_PM_i7.csv")
od_OP_mx_file = file.path(data_dir, "raw_data", "ODME_OP_i7.csv")
# Geography input files
taz_file = file.path(data_dir, "raw_shapefile", "TAZ", "TAZ_nashville_split.shp")
ext_zone_file = file.path(data_dir, "raw_shapefile", "ExtZones", "nashville_nodes_2010_ExtSt.shp")
# Output files
od_output_dir = file.path(getwd(), "OD")
# Passive_Data
taz_shapefile_file = file.path(getwd(),
"taz.json") # TAZ Shapefile
county_shapefile_file = file.path(getwd(),
"counties.json") # Counties Shapefile
county_filter_file = file.path(od_output_dir,
"counties.csv") # Counties Shapefile
daily_dest_file = file.path(od_output_dir,
"daily_dest_trips.csv") #
daily_overall_file = file.path(od_output_dir,
"daily_overall_trips.csv") # Daily overall passive data OD charts
daily_overall_time_file = file.path(od_output_dir,
"daily_overall_period_trips.csv") # Daily overall passive data OD
# charts by Timezone
daily_am_file = file.path(od_output_dir,
"daily_am_trips.csv") # Daily am passive data OD charts
daily_md_file = file.path(od_output_dir,
"daily_md_trips.csv") # Daily md passive data OD charts
daily_pm_file = file.path(od_output_dir,
"daily_pm_trips.csv") # Daily pm passive data OD charts
daily_op_file = file.path(od_output_dir,
"daily_op_trips.csv") # Daily op passive data OD charts
daily_tod_file = file.path(od_output_dir,
"trip_tod.csv") #
### Load required datasets #######################################################
##################################################################################
od_AM_dt = fread(od_AM_mx_file)
od_MD_dt = fread(od_MD_mx_file)
od_PM_dt = fread(od_PM_mx_file)
od_OP_dt = fread(od_OP_mx_file)
od_AM_dt[Auto_Residents < 0, Auto_Residents:=0]
od_MD_dt[Auto_Residents < 0, Auto_Residents:=0]
od_PM_dt[Auto_Residents < 0, Auto_Residents:=0]
od_OP_dt[Auto_Residents < 0, Auto_Residents:=0]
od_AM_dt[Auto_Visitors < 0, Auto_Visitors:=0]
od_MD_dt[Auto_Visitors < 0, Auto_Visitors:=0]
od_PM_dt[Auto_Visitors < 0, Auto_Visitors:=0]
od_OP_dt[Auto_Visitors < 0, Auto_Visitors:=0]
od_AM_dt[, TYPE:="AM"]
od_MD_dt[, TYPE:="MIDDAY"]
od_PM_dt[, TYPE:="PM"]
od_OP_dt[, TYPE:="OFFPEAK"]
trip_dt = rbindlist(list(od_AM_dt, od_MD_dt, od_PM_dt, od_OP_dt),
use.names = TRUE,
fill = TRUE)
taz_sf = st_read(taz_file)
taz_dt = data.table(taz_sf)
ext_zones_sf = st_read(ext_zone_file)
ext_zones_dt = data.table(ext_zones_sf)
### Create output data ###########################################################
##################################################################################
### Simplify shapefile ###########################################################
##################################################################################
# format(object.size(taz_sf), units="Mb")
# taz_gg = ggplot(taz_sf) + geom_sf()
# Attach County Data
state_fips = "47" # Tennessee
# county_fips = "037" # Davidson County
county_fips = NULL
# bg_sf = st_as_sf(block_groups(state_fips, county_fips))
county_sf = st_as_sf(counties(state_fips))
county_sf = st_transform(county_sf, st_crs(taz_sf))
taz_add_sf = st_intersection(taz_sf, county_sf[,c("COUNTYFP", "NAME", "NAMELSAD", "geometry")])
taz_add_sf = taz_add_sf[!is.na(taz_add_sf$ID_NEW_NEW),]
taz_add_sf$IAREA = units::set_units(st_area(taz_add_sf), "mi^2")
taz_add_sf$prop = as.numeric(taz_add_sf$IAREA)/taz_add_sf$AREA
taz_add_sf = taz_add_sf[taz_add_sf$prop > 1e-1,]
taz_sf$NAME = taz_add_sf$NAME[match(taz_sf$ID_NEW_NEW, taz_add_sf$ID_NEW_NEW)]
taz_add_dt = data.table(taz_add_sf)
taz_simplify_sf = st_as_sf(ms_simplify(input = as(taz_add_sf[,c("ID_NEW_NEW",
"NAME",
"geometry")], "Spatial"),
keep = 0.04,
weighting = 0.8,
keep_shapes = TRUE))
colnames(taz_simplify_sf) = c("id", "NAME", "geometry")
taz_simplify_sf = taz_simplify_sf[order(taz_simplify_sf$id),]
# county_simplify_sf = taz_add_sf %>% group_by(NAME, NAMELSAD) %>% summarize(AREA=sum(AREA))
# county_simplify_sf = st_as_sf(ms_simplify(input = as(county_simplify_sf[,c("NAME",
# "NAMELSAD",
# "geometry")], "Spatial"),
# keep = 0.04,
# weighting = 0.8,
# keep_shapes = TRUE))
# format(object.size(taz_simplify_sf), units="Mb")
# taz_simplify_gg = ggplot(taz_simplify_sf) + geom_sf()
#
# gridExtra::grid.arrange(taz_gg, taz_simplify_gg, nrow = 1)
### Passive Data Scenario ########################################################
##################################################################################
# County Filter File
county_filter_sf = county_sf[county_sf$NAME %in% taz_simplify_sf$NAME,c("NAME", "geometry")]
county_mx = diag(nrow=nrow(county_filter_sf))
colnames(county_mx) = county_filter_sf$NAME
county_filter_dt = data.table(ID=seq_along(county_filter_sf$NAME),
COUNTY=county_filter_sf$NAME,
data.table(county_mx))
# County File
county_filter_sf = county_filter_sf[order(county_filter_sf$NAME),c("NAME", "geometry")]
ext_zones_sf$NAME = "External"
ext_add_sf = ext_zones_sf[,c("NAME", "geometry")]
order_names = c("External", rev(sort(county_filter_sf$NAME)))
# county_filter_sf = rbind(county_filter_sf, ext_add_sf)
county_filter_sf = county_filter_sf %>% group_by(NAME) %>% summarise() %>% ungroup()
county_filter_sf =county_filter_sf[order(-(match(county_filter_sf$NAME,order_names))),]
county_filter_sf$ID = seq_len(nrow(county_filter_sf))
county_filter_sf = county_filter_sf[,c("ID", "NAME", "geometry")]
# Chord Diagram
trip_dt[taz_add_dt,COUNTY_O:=i.NAME, on=.(origin=ID_NEW_NEW)]
trip_dt[taz_add_dt,COUNTY_D:=i.NAME, on=.(destination=ID_NEW_NEW)]
trip_dt[origin %in% ext_zones_dt$ID_NEW & is.na(COUNTY_O),
COUNTY_O:="External"]
trip_dt[destination %in% ext_zones_dt$ID_NEW & is.na(COUNTY_D),
COUNTY_D:="External"]
# Overall
# Daily Destination
daily_dest_dt = trip_dt[,.(#ALL =round(sum(Auto_Residents+Auto_Visitors), 2),
RESIDENTS=round(sum(Auto_Residents), 2),
VISITORS =round(sum(Auto_Visitors), 2)),
by = .(ZONE = destination, COUNTY = COUNTY_D)]
# daily_dest_dt[,ZONE:=county_filter_sf$ID[match(COUNTY,county_filter_sf$NAME)]]
setcolorder(daily_dest_dt, c("ZONE"))
daily_dest_dt = melt.data.table(daily_dest_dt,
id.vars = c("ZONE", "COUNTY"),
variable.name = "RESIDENCY",
variable.factor = FALSE,
value.name = "QUANTITY",
value.factor = FALSE)
daily_dest_dt = daily_dest_dt[order(ZONE, COUNTY, match(RESIDENCY,c("RESIDENTS", "VISITORS", "ALL")))]
# Daily Total
daily_overall_dt = trip_dt[,.(TOTAL =round(sum(Auto_Residents+Auto_Visitors), 2),
RESIDENTS=round(sum(Auto_Residents), 2),
VISITORS =round(sum(Auto_Visitors), 2)),
.(FROM = COUNTY_O,
TO = COUNTY_D)]
setkey(daily_overall_dt, FROM, TO)
daily_overall_dt = daily_overall_dt[CJ(FROM, TO, unique = TRUE)]
daily_overall_dt[is.na(TOTAL), TOTAL:=0]
daily_overall_dt[is.na(RESIDENTS), RESIDENTS:=0]
daily_overall_dt[is.na(VISITORS), VISITORS:=0]
## TIME Distribution
daily_time_dt = trip_dt[,.(TOTAL =round(sum(Auto_Residents+Auto_Visitors), 2),
RESIDENTS=round(sum(Auto_Residents), 2),
VISITORS =round(sum(Auto_Visitors), 2)),
.(FROM = COUNTY_O,
TO = COUNTY_D,
TIMEZONE = TYPE)]
time_temp_dt = dcast.data.table(daily_time_dt, FROM+TO~TIMEZONE, value.var = "TOTAL")
time_temp_dt = merge(time_temp_dt,
dcast.data.table(daily_time_dt, FROM+TO~TIMEZONE, value.var = "RESIDENTS"),
by = c("FROM", "TO"),
all = TRUE,
suffixes = c("_TOTAL", ""))
time_temp_dt = merge(time_temp_dt,
dcast.data.table(daily_time_dt, FROM+TO~TIMEZONE, value.var = "VISITORS"),
by = c("FROM", "TO"),
all = TRUE,
suffixes = c("_RESIDENTS", "_VISITORS"))
daily_time_dt = copy(time_temp_dt)
rm(time_temp_dt)
setkey(daily_time_dt, FROM, TO)
daily_time_dt = daily_time_dt[CJ(FROM, TO, unique = TRUE)]
trip_names = setdiff(names(daily_time_dt), c("FROM", "TO"))
daily_time_dt[, c(trip_names):=lapply(.SD, function(x) {x[is.na(x)] = 0; x}),
.SDcols=c(trip_names)]
# Time Total Resident Visitor
time_trip_dt = trip_dt[,.(TOTAL =round(sum(Auto_Residents+Auto_Visitors), 2),
RESIDENTS=round(sum(Auto_Residents), 2),
VISITORS =round(sum(Auto_Visitors), 2)),
.(FROM = COUNTY_O,
TO = COUNTY_D,
TIMEZONE = TYPE)]
## AM
am_trips_dt = time_trip_dt[TIMEZONE=="AM"][,TIMEZONE:=NULL][]
setkey(am_trips_dt, FROM, TO)
am_trips_dt = am_trips_dt[CJ(FROM, TO, unique = TRUE)]
am_trips_dt[is.na(TOTAL), TOTAL:=0]
am_trips_dt[is.na(RESIDENTS), RESIDENTS:=0]
am_trips_dt[is.na(VISITORS), VISITORS:=0]
## MD
md_trips_dt = time_trip_dt[TIMEZONE=="MIDDAY"][,TIMEZONE:=NULL][]
setkey(md_trips_dt, FROM, TO)
md_trips_dt = md_trips_dt[CJ(FROM, TO, unique = TRUE)]
md_trips_dt[is.na(TOTAL), TOTAL:=0]
md_trips_dt[is.na(RESIDENTS), RESIDENTS:=0]
md_trips_dt[is.na(VISITORS), VISITORS:=0]
## PM
pm_trips_dt = time_trip_dt[TIMEZONE=="PM"][,TIMEZONE:=NULL][]
setkey(pm_trips_dt, FROM, TO)
pm_trips_dt = pm_trips_dt[CJ(FROM, TO, unique = TRUE)]
pm_trips_dt[is.na(TOTAL), TOTAL:=0]
pm_trips_dt[is.na(RESIDENTS), RESIDENTS:=0]
pm_trips_dt[is.na(VISITORS), VISITORS:=0]
## OP
op_trips_dt = time_trip_dt[TIMEZONE=="OFFPEAK"][,TIMEZONE:=NULL][]
setkey(op_trips_dt, FROM, TO)
op_trips_dt = op_trips_dt[CJ(FROM, TO, unique = TRUE)]
op_trips_dt[is.na(TOTAL), TOTAL:=0]
op_trips_dt[is.na(RESIDENTS), RESIDENTS:=0]
op_trips_dt[is.na(VISITORS), VISITORS:=0]
# Time of day vs Resident/Visitor
tod_trips_dt = trip_dt[,.(#ALL =round(sum(Auto_Residents+Auto_Visitors), 2),
RESIDENTS=round(sum(Auto_Residents), 2),
VISITORS =round(sum(Auto_Visitors), 2)),
.(`TIME OF DAY` = TYPE)]
tod_trips_dt = melt.data.table(tod_trips_dt, id.vars = c("TIME OF DAY"),
variable.name = "PERSON GROUP",
variable.factor = FALSE,
value.name = "TRIPS",
value.factor = FALSE)
tod_trips_dt[,CHART:="TRIPS BY TIME OF DAY"]
### Write output data ############################################################
##################################################################################
## Passive Data
# Shapefile
st_write(taz_simplify_sf, dsn = taz_shapefile_file, driver = "GeoJSON", delete_dsn = TRUE)
st_write(county_filter_sf,
dsn = county_shapefile_file, driver = "GeoJSON", delete_dsn = TRUE)
# Filter File
fwrite(county_filter_dt, file = county_filter_file)
# Trip OD
fwrite(daily_dest_dt[COUNTY!="External"], file = daily_dest_file)
fwrite(daily_overall_dt, file = daily_overall_file)
fwrite(daily_time_dt, file = daily_overall_time_file)
fwrite(am_trips_dt, file = daily_am_file)
fwrite(md_trips_dt, file = daily_md_file)
fwrite(pm_trips_dt, file = daily_pm_file)
fwrite(op_trips_dt, file = daily_op_file)
fwrite(tod_trips_dt, file = daily_tod_file)
|
/data/nashville/scripts/activity_viz_passive_data.R
|
no_license
|
steventrev/ActivityViz_Data
|
R
| false
| false
| 14,190
|
r
|
##################################################################################
# Create data for Nashville OD Data Visulization using ActivityViz
# Author: Aditya Gore
##################################################################################
#### Sections
# 1. Load/Install required packages
# 2. Define Constants
# 3. Load required databases
# 4. Create output data
# 4a. Passive Data Scenario
# 5. Write output data
### Load/Install required packages ###############################################
##################################################################################
library(data.table)
library(jsonlite)
library(stringr)
library(sf)
library(geojsonsf)
library(omxr)
library(rmapshaper)
library(tigris)
library(tidyverse)
### Define Constants #############################################################
##################################################################################
# Input files
data_dir = file.path(getwd(), "data")
od_AM_mx_file = file.path(data_dir, "raw_data", "ODME_AM_i7.csv")
od_MD_mx_file = file.path(data_dir, "raw_data", "ODME_MD_i7.csv")
od_PM_mx_file = file.path(data_dir, "raw_data", "ODME_PM_i7.csv")
od_OP_mx_file = file.path(data_dir, "raw_data", "ODME_OP_i7.csv")
# Geography input files
taz_file = file.path(data_dir, "raw_shapefile", "TAZ", "TAZ_nashville_split.shp")
ext_zone_file = file.path(data_dir, "raw_shapefile", "ExtZones", "nashville_nodes_2010_ExtSt.shp")
# Output files
od_output_dir = file.path(getwd(), "OD")
# Passive_Data
taz_shapefile_file = file.path(getwd(),
"taz.json") # TAZ Shapefile
county_shapefile_file = file.path(getwd(),
"counties.json") # Counties Shapefile
county_filter_file = file.path(od_output_dir,
"counties.csv") # Counties Shapefile
daily_dest_file = file.path(od_output_dir,
"daily_dest_trips.csv") #
daily_overall_file = file.path(od_output_dir,
"daily_overall_trips.csv") # Daily overall passive data OD charts
daily_overall_time_file = file.path(od_output_dir,
"daily_overall_period_trips.csv") # Daily overall passive data OD
# charts by Timezone
daily_am_file = file.path(od_output_dir,
"daily_am_trips.csv") # Daily am passive data OD charts
daily_md_file = file.path(od_output_dir,
"daily_md_trips.csv") # Daily md passive data OD charts
daily_pm_file = file.path(od_output_dir,
"daily_pm_trips.csv") # Daily pm passive data OD charts
daily_op_file = file.path(od_output_dir,
"daily_op_trips.csv") # Daily op passive data OD charts
daily_tod_file = file.path(od_output_dir,
"trip_tod.csv") #
### Load required datasets #######################################################
##################################################################################
od_AM_dt = fread(od_AM_mx_file)
od_MD_dt = fread(od_MD_mx_file)
od_PM_dt = fread(od_PM_mx_file)
od_OP_dt = fread(od_OP_mx_file)
od_AM_dt[Auto_Residents < 0, Auto_Residents:=0]
od_MD_dt[Auto_Residents < 0, Auto_Residents:=0]
od_PM_dt[Auto_Residents < 0, Auto_Residents:=0]
od_OP_dt[Auto_Residents < 0, Auto_Residents:=0]
od_AM_dt[Auto_Visitors < 0, Auto_Visitors:=0]
od_MD_dt[Auto_Visitors < 0, Auto_Visitors:=0]
od_PM_dt[Auto_Visitors < 0, Auto_Visitors:=0]
od_OP_dt[Auto_Visitors < 0, Auto_Visitors:=0]
od_AM_dt[, TYPE:="AM"]
od_MD_dt[, TYPE:="MIDDAY"]
od_PM_dt[, TYPE:="PM"]
od_OP_dt[, TYPE:="OFFPEAK"]
trip_dt = rbindlist(list(od_AM_dt, od_MD_dt, od_PM_dt, od_OP_dt),
use.names = TRUE,
fill = TRUE)
taz_sf = st_read(taz_file)
taz_dt = data.table(taz_sf)
ext_zones_sf = st_read(ext_zone_file)
ext_zones_dt = data.table(ext_zones_sf)
### Create output data ###########################################################
##################################################################################
### Simplify shapefile ###########################################################
##################################################################################
# format(object.size(taz_sf), units="Mb")
# taz_gg = ggplot(taz_sf) + geom_sf()
# Attach County Data
state_fips = "47" # Tennessee
# county_fips = "037" # Davidson County
county_fips = NULL
# bg_sf = st_as_sf(block_groups(state_fips, county_fips))
county_sf = st_as_sf(counties(state_fips))
county_sf = st_transform(county_sf, st_crs(taz_sf))
taz_add_sf = st_intersection(taz_sf, county_sf[,c("COUNTYFP", "NAME", "NAMELSAD", "geometry")])
taz_add_sf = taz_add_sf[!is.na(taz_add_sf$ID_NEW_NEW),]
taz_add_sf$IAREA = units::set_units(st_area(taz_add_sf), "mi^2")
taz_add_sf$prop = as.numeric(taz_add_sf$IAREA)/taz_add_sf$AREA
taz_add_sf = taz_add_sf[taz_add_sf$prop > 1e-1,]
taz_sf$NAME = taz_add_sf$NAME[match(taz_sf$ID_NEW_NEW, taz_add_sf$ID_NEW_NEW)]
taz_add_dt = data.table(taz_add_sf)
taz_simplify_sf = st_as_sf(ms_simplify(input = as(taz_add_sf[,c("ID_NEW_NEW",
"NAME",
"geometry")], "Spatial"),
keep = 0.04,
weighting = 0.8,
keep_shapes = TRUE))
colnames(taz_simplify_sf) = c("id", "NAME", "geometry")
taz_simplify_sf = taz_simplify_sf[order(taz_simplify_sf$id),]
# county_simplify_sf = taz_add_sf %>% group_by(NAME, NAMELSAD) %>% summarize(AREA=sum(AREA))
# county_simplify_sf = st_as_sf(ms_simplify(input = as(county_simplify_sf[,c("NAME",
# "NAMELSAD",
# "geometry")], "Spatial"),
# keep = 0.04,
# weighting = 0.8,
# keep_shapes = TRUE))
# format(object.size(taz_simplify_sf), units="Mb")
# taz_simplify_gg = ggplot(taz_simplify_sf) + geom_sf()
#
# gridExtra::grid.arrange(taz_gg, taz_simplify_gg, nrow = 1)
### Passive Data Scenario ########################################################
##################################################################################
# County Filter File
county_filter_sf = county_sf[county_sf$NAME %in% taz_simplify_sf$NAME,c("NAME", "geometry")]
county_mx = diag(nrow=nrow(county_filter_sf))
colnames(county_mx) = county_filter_sf$NAME
county_filter_dt = data.table(ID=seq_along(county_filter_sf$NAME),
COUNTY=county_filter_sf$NAME,
data.table(county_mx))
# County File
county_filter_sf = county_filter_sf[order(county_filter_sf$NAME),c("NAME", "geometry")]
ext_zones_sf$NAME = "External"
ext_add_sf = ext_zones_sf[,c("NAME", "geometry")]
order_names = c("External", rev(sort(county_filter_sf$NAME)))
# county_filter_sf = rbind(county_filter_sf, ext_add_sf)
county_filter_sf = county_filter_sf %>% group_by(NAME) %>% summarise() %>% ungroup()
county_filter_sf =county_filter_sf[order(-(match(county_filter_sf$NAME,order_names))),]
county_filter_sf$ID = seq_len(nrow(county_filter_sf))
county_filter_sf = county_filter_sf[,c("ID", "NAME", "geometry")]
# Chord Diagram
trip_dt[taz_add_dt,COUNTY_O:=i.NAME, on=.(origin=ID_NEW_NEW)]
trip_dt[taz_add_dt,COUNTY_D:=i.NAME, on=.(destination=ID_NEW_NEW)]
trip_dt[origin %in% ext_zones_dt$ID_NEW & is.na(COUNTY_O),
COUNTY_O:="External"]
trip_dt[destination %in% ext_zones_dt$ID_NEW & is.na(COUNTY_D),
COUNTY_D:="External"]
# Overall
# Daily Destination
daily_dest_dt = trip_dt[,.(#ALL =round(sum(Auto_Residents+Auto_Visitors), 2),
RESIDENTS=round(sum(Auto_Residents), 2),
VISITORS =round(sum(Auto_Visitors), 2)),
by = .(ZONE = destination, COUNTY = COUNTY_D)]
# daily_dest_dt[,ZONE:=county_filter_sf$ID[match(COUNTY,county_filter_sf$NAME)]]
setcolorder(daily_dest_dt, c("ZONE"))
daily_dest_dt = melt.data.table(daily_dest_dt,
id.vars = c("ZONE", "COUNTY"),
variable.name = "RESIDENCY",
variable.factor = FALSE,
value.name = "QUANTITY",
value.factor = FALSE)
daily_dest_dt = daily_dest_dt[order(ZONE, COUNTY, match(RESIDENCY,c("RESIDENTS", "VISITORS", "ALL")))]
# Daily Total
daily_overall_dt = trip_dt[,.(TOTAL =round(sum(Auto_Residents+Auto_Visitors), 2),
RESIDENTS=round(sum(Auto_Residents), 2),
VISITORS =round(sum(Auto_Visitors), 2)),
.(FROM = COUNTY_O,
TO = COUNTY_D)]
setkey(daily_overall_dt, FROM, TO)
daily_overall_dt = daily_overall_dt[CJ(FROM, TO, unique = TRUE)]
daily_overall_dt[is.na(TOTAL), TOTAL:=0]
daily_overall_dt[is.na(RESIDENTS), RESIDENTS:=0]
daily_overall_dt[is.na(VISITORS), VISITORS:=0]
## TIME Distribution
daily_time_dt = trip_dt[,.(TOTAL =round(sum(Auto_Residents+Auto_Visitors), 2),
RESIDENTS=round(sum(Auto_Residents), 2),
VISITORS =round(sum(Auto_Visitors), 2)),
.(FROM = COUNTY_O,
TO = COUNTY_D,
TIMEZONE = TYPE)]
time_temp_dt = dcast.data.table(daily_time_dt, FROM+TO~TIMEZONE, value.var = "TOTAL")
time_temp_dt = merge(time_temp_dt,
dcast.data.table(daily_time_dt, FROM+TO~TIMEZONE, value.var = "RESIDENTS"),
by = c("FROM", "TO"),
all = TRUE,
suffixes = c("_TOTAL", ""))
time_temp_dt = merge(time_temp_dt,
dcast.data.table(daily_time_dt, FROM+TO~TIMEZONE, value.var = "VISITORS"),
by = c("FROM", "TO"),
all = TRUE,
suffixes = c("_RESIDENTS", "_VISITORS"))
daily_time_dt = copy(time_temp_dt)
rm(time_temp_dt)
setkey(daily_time_dt, FROM, TO)
daily_time_dt = daily_time_dt[CJ(FROM, TO, unique = TRUE)]
trip_names = setdiff(names(daily_time_dt), c("FROM", "TO"))
daily_time_dt[, c(trip_names):=lapply(.SD, function(x) {x[is.na(x)] = 0; x}),
.SDcols=c(trip_names)]
# Time Total Resident Visitor
time_trip_dt = trip_dt[,.(TOTAL =round(sum(Auto_Residents+Auto_Visitors), 2),
RESIDENTS=round(sum(Auto_Residents), 2),
VISITORS =round(sum(Auto_Visitors), 2)),
.(FROM = COUNTY_O,
TO = COUNTY_D,
TIMEZONE = TYPE)]
## AM
am_trips_dt = time_trip_dt[TIMEZONE=="AM"][,TIMEZONE:=NULL][]
setkey(am_trips_dt, FROM, TO)
am_trips_dt = am_trips_dt[CJ(FROM, TO, unique = TRUE)]
am_trips_dt[is.na(TOTAL), TOTAL:=0]
am_trips_dt[is.na(RESIDENTS), RESIDENTS:=0]
am_trips_dt[is.na(VISITORS), VISITORS:=0]
## MD
md_trips_dt = time_trip_dt[TIMEZONE=="MIDDAY"][,TIMEZONE:=NULL][]
setkey(md_trips_dt, FROM, TO)
md_trips_dt = md_trips_dt[CJ(FROM, TO, unique = TRUE)]
md_trips_dt[is.na(TOTAL), TOTAL:=0]
md_trips_dt[is.na(RESIDENTS), RESIDENTS:=0]
md_trips_dt[is.na(VISITORS), VISITORS:=0]
## PM
pm_trips_dt = time_trip_dt[TIMEZONE=="PM"][,TIMEZONE:=NULL][]
setkey(pm_trips_dt, FROM, TO)
pm_trips_dt = pm_trips_dt[CJ(FROM, TO, unique = TRUE)]
pm_trips_dt[is.na(TOTAL), TOTAL:=0]
pm_trips_dt[is.na(RESIDENTS), RESIDENTS:=0]
pm_trips_dt[is.na(VISITORS), VISITORS:=0]
## OP
op_trips_dt = time_trip_dt[TIMEZONE=="OFFPEAK"][,TIMEZONE:=NULL][]
setkey(op_trips_dt, FROM, TO)
op_trips_dt = op_trips_dt[CJ(FROM, TO, unique = TRUE)]
op_trips_dt[is.na(TOTAL), TOTAL:=0]
op_trips_dt[is.na(RESIDENTS), RESIDENTS:=0]
op_trips_dt[is.na(VISITORS), VISITORS:=0]
# Time of day vs Resident/Visitor
tod_trips_dt = trip_dt[,.(#ALL =round(sum(Auto_Residents+Auto_Visitors), 2),
RESIDENTS=round(sum(Auto_Residents), 2),
VISITORS =round(sum(Auto_Visitors), 2)),
.(`TIME OF DAY` = TYPE)]
tod_trips_dt = melt.data.table(tod_trips_dt, id.vars = c("TIME OF DAY"),
variable.name = "PERSON GROUP",
variable.factor = FALSE,
value.name = "TRIPS",
value.factor = FALSE)
tod_trips_dt[,CHART:="TRIPS BY TIME OF DAY"]
### Write output data ############################################################
##################################################################################
## Passive Data
# Shapefile
st_write(taz_simplify_sf, dsn = taz_shapefile_file, driver = "GeoJSON", delete_dsn = TRUE)
st_write(county_filter_sf,
dsn = county_shapefile_file, driver = "GeoJSON", delete_dsn = TRUE)
# Filter File
fwrite(county_filter_dt, file = county_filter_file)
# Trip OD
fwrite(daily_dest_dt[COUNTY!="External"], file = daily_dest_file)
fwrite(daily_overall_dt, file = daily_overall_file)
fwrite(daily_time_dt, file = daily_overall_time_file)
fwrite(am_trips_dt, file = daily_am_file)
fwrite(md_trips_dt, file = daily_md_file)
fwrite(pm_trips_dt, file = daily_pm_file)
fwrite(op_trips_dt, file = daily_op_file)
fwrite(tod_trips_dt, file = daily_tod_file)
|
#!/bin/R
### Map plots from N reduce2grid
library(ggmap)
library(maptools)
library(gpclib)
library(sp)
library(raster)
library(rgdal)
library(dplyr)
library(Cairo)
library(scales)
library(rgeos)
gpclibPermit()
mat <- read.table("symbols_colors.txt", row.names = 1, stringsAsFactors = FALSE)
sites <- rbind(BI = c(41.184326, -71.574127), BP = c(41.2, -73.181154), ER = c(36.807026,
-76.290405), F = c(40.9, -73.139791), KC = c(37.3016, -76.4226), NBH = c(41.637174,
-70.914284), NYC = c(40.7006, -74.1223), SH = c(40.4, -74.0113))
colnames(sites) <- c("lat", "lon")
# qmap(c(lon= -74.632951,lat=39.433438),zoom=6,source =
# 'google',maptype='satellite')
# mm <- get_map(c(lon= -74.632951,lat=39.433438),zoom=6,source =
# 'google',maptype='satellite')
mm <- ggmap::get_map(c(lon = -74.632951, lat = 39.433438), zoom = 6, source = "stamen")
myloc <- c(-77.25, 36, -69.75, 42) #left bottom right top
mm <- ggmap::get_map(myloc, zoom = 6, source = "stamen", maptype = "terrain-background")
mm <- ggmap::get_map(myloc, source = "stamen")
box <- as(extent(as.numeric(attr(mm, "bb"))[c(2, 4, 1, 3)] + c(0.001, -0.001, 0.001,
-0.001)), "SpatialPolygons")
proj4string(box) <- CRS(summary(shp)[[4]])
shp <- rgdal::readOGR("~/cb_2015_us_state_500k/cb_2017_us_state_500k.shp")
tractSub <- gIntersection(shp, box, byid = TRUE, id = as.character(shp$GEOID))
tractSub <- fortify(tractSub, region = "GEOID")
# plotData <- left_join(tractSub, data, by = 'id')
eastcoast <- ggmap(mm)
eastcoast + geom_polygon(aes(x = long, y = lat, group = group), data = tractSub,
colour = "white", fill = "black", alpha = 0.4, size = 0.3) + geom_point(aes(x = lon,
y = lat), data = as.data.frame(sites), color = mat[, 5], size = 9) + geom_text(aes(label = c("S1",
"T2", "T4", "S2", "S4", "T1", "T3", "S3"), x = lon, y = lat), data = as.data.frame(sites))
shp <- readOGR(dsn = "/Users/noahreid/Downloads/cb_2015_us_state_500k", layer = "cb_2015_us_state_500k")
tractSub <- gIntersection(shp, box, byid = TRUE, id = as.character(shp$GEOID))
par(mar = c(0, 0, 0, 0), oma = c(1, 1, 1, 1))
plot(tractSub, col = rgb(0, 0, 0, 0.05))
points(sites[, 2], sites[, 1], pch = 20, cex = 5, col = mat[, 5])
text(sites[, 2], sites[, 1], c("S1", "T2", "T4", "S2", "S4", "T1", "T3", "S3"))
|
/FIG/map.NR.R
|
no_license
|
jthmiller/QTL_remap
|
R
| false
| false
| 2,269
|
r
|
#!/bin/R
### Map plots from N reduce2grid
library(ggmap)
library(maptools)
library(gpclib)
library(sp)
library(raster)
library(rgdal)
library(dplyr)
library(Cairo)
library(scales)
library(rgeos)
gpclibPermit()
mat <- read.table("symbols_colors.txt", row.names = 1, stringsAsFactors = FALSE)
sites <- rbind(BI = c(41.184326, -71.574127), BP = c(41.2, -73.181154), ER = c(36.807026,
-76.290405), F = c(40.9, -73.139791), KC = c(37.3016, -76.4226), NBH = c(41.637174,
-70.914284), NYC = c(40.7006, -74.1223), SH = c(40.4, -74.0113))
colnames(sites) <- c("lat", "lon")
# qmap(c(lon= -74.632951,lat=39.433438),zoom=6,source =
# 'google',maptype='satellite')
# mm <- get_map(c(lon= -74.632951,lat=39.433438),zoom=6,source =
# 'google',maptype='satellite')
mm <- ggmap::get_map(c(lon = -74.632951, lat = 39.433438), zoom = 6, source = "stamen")
myloc <- c(-77.25, 36, -69.75, 42) #left bottom right top
mm <- ggmap::get_map(myloc, zoom = 6, source = "stamen", maptype = "terrain-background")
mm <- ggmap::get_map(myloc, source = "stamen")
box <- as(extent(as.numeric(attr(mm, "bb"))[c(2, 4, 1, 3)] + c(0.001, -0.001, 0.001,
-0.001)), "SpatialPolygons")
proj4string(box) <- CRS(summary(shp)[[4]])
shp <- rgdal::readOGR("~/cb_2015_us_state_500k/cb_2017_us_state_500k.shp")
tractSub <- gIntersection(shp, box, byid = TRUE, id = as.character(shp$GEOID))
tractSub <- fortify(tractSub, region = "GEOID")
# plotData <- left_join(tractSub, data, by = 'id')
eastcoast <- ggmap(mm)
eastcoast + geom_polygon(aes(x = long, y = lat, group = group), data = tractSub,
colour = "white", fill = "black", alpha = 0.4, size = 0.3) + geom_point(aes(x = lon,
y = lat), data = as.data.frame(sites), color = mat[, 5], size = 9) + geom_text(aes(label = c("S1",
"T2", "T4", "S2", "S4", "T1", "T3", "S3"), x = lon, y = lat), data = as.data.frame(sites))
shp <- readOGR(dsn = "/Users/noahreid/Downloads/cb_2015_us_state_500k", layer = "cb_2015_us_state_500k")
tractSub <- gIntersection(shp, box, byid = TRUE, id = as.character(shp$GEOID))
par(mar = c(0, 0, 0, 0), oma = c(1, 1, 1, 1))
plot(tractSub, col = rgb(0, 0, 0, 0.05))
points(sites[, 2], sites[, 1], pch = 20, cex = 5, col = mat[, 5])
text(sites[, 2], sites[, 1], c("S1", "T2", "T4", "S2", "S4", "T1", "T3", "S3"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fsutils.r
\name{split_path}
\alias{split_path}
\title{Split file path into individual components (optionally including separators)}
\usage{
split_path(path, include.fseps = FALSE, omit.duplicate.fseps = FALSE,
fsep = .Platform$file.sep)
}
\arguments{
\item{path}{A path with directories separated by \code{fsep}s.}
\item{include.fseps}{Whether to include the separators in the returned
character vector (default \code{FALSE})}
\item{omit.duplicate.fseps}{Whether to omit duplicate file separators if
\code{include.fseps=TRUE} (default \code{FALSE}).}
\item{fsep}{The path separator (default to \code{.Platform$file.sep})}
}
\value{
A character vector with one element for each component in the path
(including path separators if \code{include.fseps=TRUE}).
}
\description{
Split file path into individual components (optionally including separators)
}
\examples{
split_path("/a/b/c")
split_path("a/b/c")
parts=split_path("/a/b/c", include.fseps=TRUE)
# join parts back up again
paste(parts, collapse = "")
split_path("a/b//c", include.fseps=TRUE, omit.duplicate.fseps=TRUE)
# Windows style
split_path("C:\\\\a\\\\b\\\\c", fsep="\\\\")
}
\seealso{
\code{\link{file.path}}
Other path_utils: \code{\link{abs2rel}},
\code{\link{common_path}}
}
|
/man/split_path.Rd
|
no_license
|
javieralexa/nat.utils
|
R
| false
| true
| 1,331
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fsutils.r
\name{split_path}
\alias{split_path}
\title{Split file path into individual components (optionally including separators)}
\usage{
split_path(path, include.fseps = FALSE, omit.duplicate.fseps = FALSE,
fsep = .Platform$file.sep)
}
\arguments{
\item{path}{A path with directories separated by \code{fsep}s.}
\item{include.fseps}{Whether to include the separators in the returned
character vector (default \code{FALSE})}
\item{omit.duplicate.fseps}{Whether to omit duplicate file separators if
\code{include.fseps=TRUE} (default \code{FALSE}).}
\item{fsep}{The path separator (default to \code{.Platform$file.sep})}
}
\value{
A character vector with one element for each component in the path
(including path separators if \code{include.fseps=TRUE}).
}
\description{
Split file path into individual components (optionally including separators)
}
\examples{
split_path("/a/b/c")
split_path("a/b/c")
parts=split_path("/a/b/c", include.fseps=TRUE)
# join parts back up again
paste(parts, collapse = "")
split_path("a/b//c", include.fseps=TRUE, omit.duplicate.fseps=TRUE)
# Windows style
split_path("C:\\\\a\\\\b\\\\c", fsep="\\\\")
}
\seealso{
\code{\link{file.path}}
Other path_utils: \code{\link{abs2rel}},
\code{\link{common_path}}
}
|
step.2 %>%
filter(ensembl.gene %in% picks) %>%
select(hgnc, gross.mean.abundance) %>%
arrange(gross.mean.abundance) %>%
mutate(hgnc = factor(hgnc, levels = unique(hgnc))) %>%
unique %>%
filter(!is.na(gross.mean.abundance)) %>%
mutate(tissue = 'gross mean') %>%
rename(gene = hgnc, level = gross.mean.abundance) %>%
PlotTissue(pdf = TRUE, file.name = '26_picks_gross_mean.pdf', width = 2.35, height = 10)
palette <- c(
'zero' = '#f7d4d4',
'low' = '#eb9494',
'medium' = '#de5454',
'high' = '#c12525')
PlotTissue <- function(events, faceting = FALSE, pdf = FALSE, file.name = 'plot_tissue.pdf', width = 20, height = 10, order = TRUE) {
if(order == TRUE) {
events <-
bind_rows(data_frame(
gene = as.character(unlist(events[1,'gene'])),
tissue = c('adipose tissue', 'adrenal', 'appendix', 'bladder', 'blood', 'bone', 'brain', 'breast', 'bronchus', 'cerumen', 'cervix', 'epididymis', 'eye', 'fallopian tube', 'gallbladder', 'gut', 'heart', 'kidney', 'laryngopharynx', 'liver', 'lung', 'lymph node', 'nasopharynx', 'oropharynx', 'ovary', 'pancreas', 'parathyroid', 'prostate', 'rectum', 'seminal', 'skeletal muscle', 'skin', 'smooth muscle', 'soft tissue', 'spinal cord', 'spleen', 'stomach', 'synovial fluid', 'testis', 'thyroid', 'tonsil', 'uterus', 'vagina'),
level = NA), events) %>%
mutate(gene = factor(gene, levels = unique(gene))) %>%
mutate(tissue = factor(tissue, levels = c(vital, non.vital)))
}
if(all(na.omit(events$level) %% 1 == 0)) { # check if integer, if so plot discrete
events %<>%
mutate(level =
ifelse(level == 0, 'zero',
ifelse(level == 1, 'low',
ifelse(level == 2, 'medium',
ifelse(level == 3, 'high',
NA))))) %>%
mutate(level = factor(level, levels = unique(level)))
m.gg <-
ggplot(events, aes(tissue, gene)) +
geom_tile(aes(fill = level, drop = FALSE), colour = 'grey') +
scale_fill_manual(
breaks = names(palette),
values = palette,
na.value = 'grey',
drop = FALSE,
guide = guide_legend(reverse = TRUE))
} else {
m.gg <-
ggplot(events, aes(tissue, gene)) +
geom_tile(aes(fill = level), colour = 'grey') +
scale_fill_gradientn(
colours = palette,
na.value = 'transparent',
breaks = 0:3,
labels = names(palette),
limits = c(0, 3))
}
if(faceting == TRUE) {
mt.gg <-
m.gg +
theme(
text = element_text(size = 10),
axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1),
panel.background = element_rect(fill = 'grey'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour = 'black', fill = NA, size = 1),
strip.background = element_blank(),
strip.text.x = element_blank()) +
facet_wrap(~ split, ncol = 1, scales = 'free_y')
} else {
mt.gg <-
m.gg +
theme(
legend.title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
text = element_text(size = 10),
axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1),
panel.background = element_rect(fill = 'grey'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour = 'black', fill = NA, size = 1))
}
if(pdf == TRUE) {
pdf(file.name, width, height)
plot(mt.gg)
dev.off()
} else {
dev.new(width = width, height = height)
#getOption('device')()
plot(mt.gg)
}
}
line.order = c(
'msk.0.09aml',
'msk.0.tf',
'jpro.thp1',
'msk.0.thp1',
'msk.1.thp1',
'msk.2.thp1',
'msk.3.thp1',
'msk.0.kasum1',
'msk.1.kasumi',
'msk.2.kasumi',
'msk.3.kasumi',
'msk.0.monomac',
'msk.1.monomac',
'msk.2.monomac',
'msk.3.monomac',
'msk.0.molm13',
'msk.1.molm13',
'msk.2.molm13',
'msk.3.molm13')
step.2 %>%
filter(ensembl.gene %in% picks) %>%
arrange(gross.mean.abundance) %>%
select(hgnc,
msk.0.09aml,
msk.0.kasum1,
msk.0.molm13,
msk.0.monomac,
msk.0.tf,
msk.0.thp1,
msk.1.kasumi,
msk.2.kasumi,
msk.3.kasumi,
msk.1.thp1,
msk.2.thp1,
msk.3.thp1,
msk.1.monomac,
msk.2.monomac,
msk.3.monomac,
msk.1.molm13,
msk.2.molm13,
msk.3.molm13,
jpro.thp1) %>%
unique %>%
mutate(hgnc = factor(hgnc, levels = unique(hgnc))) %>%
unique %>%
gather(tissue, level, msk.0.09aml:jpro.thp1) %>%
arrange(tissue) %>%
mutate(tissue = factor(tissue, line.order)) %>%
rename(gene = hgnc) %>%
filter(!is.na(tissue)) %>%
filter(!is.na(level)) %>%
select(gene, level, tissue) %>%
PlotTissue(pdf = TRUE, file.name = 'malignant_picks.pdf', width = 9, height = 10, order = FALSE)
step.2 %>%
filter(ensembl.gene %in% picks) %>%
arrange(gross.mean.abundance) %>%
select(hgnc,
msk.1.kasumi,
msk.2.kasumi,
msk.3.kasumi,
msk.1.thp1,
msk.2.thp1,
msk.3.thp1,
msk.1.monomac,
msk.2.monomac,
msk.3.monomac,
msk.1.molm13,
msk.2.molm13,
msk.3.molm13) %>%
unique %>%
mutate(hgnc = factor(hgnc, levels = unique(hgnc))) %>%
unique %>%
gather(tissue, level, msk.1.kasumi:msk.3.molm13) %>%
rename(gene = hgnc) %>%
filter(!is.na(tissue)) %>%
filter(!is.na(level)) %>%
select(gene, level, tissue) %>%
PlotTissue(pdf = TRUE, file.name = 'triplicate_malignant_picks.pdf', width = 7, height = 10, order = FALSE)
step.2 %>%
filter(ensembl.gene %in% picks) %>%
arrange(gross.mean.abundance) %>%
rowwise %>%
mutate(msk.kasumi = mean(c(msk.1.kasumi, msk.2.kasumi, msk.3.kasumi))) %>%
mutate(msk.thp1 = mean(c(msk.1.thp1, msk.2.thp1, msk.3.thp1))) %>%
mutate(msk.monomac = mean(c(msk.1.monomac, msk.2.monomac, msk.3.monomac))) %>%
mutate(msk.molm13 = mean(c(msk.1.molm13, msk.2.molm13, msk.3.molm13))) %>%
select(hgnc,
msk.kasumi,
msk.thp1,
msk.monomac,
msk.molm13) %>%
unique %>%
mutate(hgnc = factor(hgnc, levels = unique(hgnc))) %>%
unique %>%
gather(tissue, level, msk.kasumi:msk.molm13) %>%
rename(gene = hgnc) %>%
filter(!is.na(tissue)) %>%
filter(!is.na(level)) %>%
select(gene, level, tissue) %>%
PlotTissue(pdf = TRUE, file.name = 'triplicate_collapsed_malignant_picks.pdf', width = 3.55, height = 10, order = FALSE)
step.2 %>%
filter(ensembl.gene %in% picks) %>%
arrange(gross.mean.abundance) %>%
rowwise %>%
mutate(level = mean(c(msk.1.kasumi, msk.2.kasumi, msk.3.kasumi, msk.1.thp1, msk.2.thp1, msk.3.thp1, msk.1.monomac, msk.2.monomac, msk.3.monomac, msk.1.molm13, msk.2.molm13, msk.3.molm13))) %>%
select(hgnc,
level) %>%
unique %>%
mutate(hgnc = factor(hgnc, levels = unique(hgnc))) %>%
unique %>%
rename(gene = hgnc) %>%
filter(!is.na(level)) %>%
mutate(tissue = 'malignant mean') %>%
select(gene, level, tissue) %>%
PlotTissue(pdf = TRUE, file.name = 'triplicate_mean_malignant_picks.pdf', width = 2.2, height = 10, order = FALSE)
micro <-
read_tsv('micro_patient.txt') %>%
rename(tissue = patient) %>%
gather(gene, level, GAGE1:MMP14) %>%
filter(gene %in% c(names(picks), 'EMR2', 'GPR86')) %>%
mutate(level = level - min(level)) %>%
mutate(level = level * (3/max(level))) %>%
group_by(gene) %>%
mutate(rank = mean(level)) %>%
arrange(rank) %>%
ungroup %>%
mutate(gene = factor(gene, unique(gene)))
micro %>%
PlotTissue(pdf = TRUE, file.name = 'micro_array_26.pdf', width = 15, height = 5, order = FALSE)
foo <- c(
"ABCC4",
"ANK1",
"ARID2",
"ATP11A",
"CBL",
"CCDC88A",
"CCR1",
"CD209",
"CD84",
"CD96",
"DOCK10",
"DOCK11",
"DTNA",
"ENG",
"EPB41",
"FCAR",
"GYPA",
"ITGA4",
"ITGB3",
"KIT",
"LILRA6",
"LILRB2",
"LILRB4",
"MTHFR",
"NOTCH2",
"PLXNC1",
"RABGAP1L",
"SIGLEC9",
"SLC16A7",
"SLC2A9",
"SLC31A1",
"SLC4A7",
"SORT1",
"ST14",
"VCPIP1",
"ZZEF1")
rna <-
read_tsv('rna_seq_08242015.txt') %>%
rename(DNMT3a_mut = `DNMT3a mut`) %>%
group_by(gene) %>%
mutate(DNMT3a_mut = mean(DNMT3a_mut), s_DNMT3a_WT = mean(s_DNMT3a_WT), s_MIGR1 = mean(s_MIGR1)) %>%
unique %>%
ungroup %>%
gather(tissue, level, DNMT3a_mut:s_MIGR1) %>%
mutate(level = log10(level)) %>%
mutate(level = level * 3/max(na.omit(level))) %>%
filter(gene %in% foo) %>%
group_by(gene) %>%
mutate(rank = mean(level)) %>%
arrange(rank) %>%
ungroup %>%
mutate(gene = factor(gene, unique(gene)))
rna %>%
PlotTissue(pdf = TRUE, file.name = 'rna_seq_36.pdf', width = 2.65, height = 10, order = FALSE)
|
/notebook/_fabiana_presentation.R
|
permissive
|
SadelainLab/ptolomy
|
R
| false
| false
| 8,097
|
r
|
step.2 %>%
filter(ensembl.gene %in% picks) %>%
select(hgnc, gross.mean.abundance) %>%
arrange(gross.mean.abundance) %>%
mutate(hgnc = factor(hgnc, levels = unique(hgnc))) %>%
unique %>%
filter(!is.na(gross.mean.abundance)) %>%
mutate(tissue = 'gross mean') %>%
rename(gene = hgnc, level = gross.mean.abundance) %>%
PlotTissue(pdf = TRUE, file.name = '26_picks_gross_mean.pdf', width = 2.35, height = 10)
palette <- c(
'zero' = '#f7d4d4',
'low' = '#eb9494',
'medium' = '#de5454',
'high' = '#c12525')
PlotTissue <- function(events, faceting = FALSE, pdf = FALSE, file.name = 'plot_tissue.pdf', width = 20, height = 10, order = TRUE) {
if(order == TRUE) {
events <-
bind_rows(data_frame(
gene = as.character(unlist(events[1,'gene'])),
tissue = c('adipose tissue', 'adrenal', 'appendix', 'bladder', 'blood', 'bone', 'brain', 'breast', 'bronchus', 'cerumen', 'cervix', 'epididymis', 'eye', 'fallopian tube', 'gallbladder', 'gut', 'heart', 'kidney', 'laryngopharynx', 'liver', 'lung', 'lymph node', 'nasopharynx', 'oropharynx', 'ovary', 'pancreas', 'parathyroid', 'prostate', 'rectum', 'seminal', 'skeletal muscle', 'skin', 'smooth muscle', 'soft tissue', 'spinal cord', 'spleen', 'stomach', 'synovial fluid', 'testis', 'thyroid', 'tonsil', 'uterus', 'vagina'),
level = NA), events) %>%
mutate(gene = factor(gene, levels = unique(gene))) %>%
mutate(tissue = factor(tissue, levels = c(vital, non.vital)))
}
if(all(na.omit(events$level) %% 1 == 0)) { # check if integer, if so plot discrete
events %<>%
mutate(level =
ifelse(level == 0, 'zero',
ifelse(level == 1, 'low',
ifelse(level == 2, 'medium',
ifelse(level == 3, 'high',
NA))))) %>%
mutate(level = factor(level, levels = unique(level)))
m.gg <-
ggplot(events, aes(tissue, gene)) +
geom_tile(aes(fill = level, drop = FALSE), colour = 'grey') +
scale_fill_manual(
breaks = names(palette),
values = palette,
na.value = 'grey',
drop = FALSE,
guide = guide_legend(reverse = TRUE))
} else {
m.gg <-
ggplot(events, aes(tissue, gene)) +
geom_tile(aes(fill = level), colour = 'grey') +
scale_fill_gradientn(
colours = palette,
na.value = 'transparent',
breaks = 0:3,
labels = names(palette),
limits = c(0, 3))
}
if(faceting == TRUE) {
mt.gg <-
m.gg +
theme(
text = element_text(size = 10),
axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1),
panel.background = element_rect(fill = 'grey'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour = 'black', fill = NA, size = 1),
strip.background = element_blank(),
strip.text.x = element_blank()) +
facet_wrap(~ split, ncol = 1, scales = 'free_y')
} else {
mt.gg <-
m.gg +
theme(
legend.title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
text = element_text(size = 10),
axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1),
panel.background = element_rect(fill = 'grey'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour = 'black', fill = NA, size = 1))
}
if(pdf == TRUE) {
pdf(file.name, width, height)
plot(mt.gg)
dev.off()
} else {
dev.new(width = width, height = height)
#getOption('device')()
plot(mt.gg)
}
}
line.order = c(
'msk.0.09aml',
'msk.0.tf',
'jpro.thp1',
'msk.0.thp1',
'msk.1.thp1',
'msk.2.thp1',
'msk.3.thp1',
'msk.0.kasum1',
'msk.1.kasumi',
'msk.2.kasumi',
'msk.3.kasumi',
'msk.0.monomac',
'msk.1.monomac',
'msk.2.monomac',
'msk.3.monomac',
'msk.0.molm13',
'msk.1.molm13',
'msk.2.molm13',
'msk.3.molm13')
step.2 %>%
filter(ensembl.gene %in% picks) %>%
arrange(gross.mean.abundance) %>%
select(hgnc,
msk.0.09aml,
msk.0.kasum1,
msk.0.molm13,
msk.0.monomac,
msk.0.tf,
msk.0.thp1,
msk.1.kasumi,
msk.2.kasumi,
msk.3.kasumi,
msk.1.thp1,
msk.2.thp1,
msk.3.thp1,
msk.1.monomac,
msk.2.monomac,
msk.3.monomac,
msk.1.molm13,
msk.2.molm13,
msk.3.molm13,
jpro.thp1) %>%
unique %>%
mutate(hgnc = factor(hgnc, levels = unique(hgnc))) %>%
unique %>%
gather(tissue, level, msk.0.09aml:jpro.thp1) %>%
arrange(tissue) %>%
mutate(tissue = factor(tissue, line.order)) %>%
rename(gene = hgnc) %>%
filter(!is.na(tissue)) %>%
filter(!is.na(level)) %>%
select(gene, level, tissue) %>%
PlotTissue(pdf = TRUE, file.name = 'malignant_picks.pdf', width = 9, height = 10, order = FALSE)
step.2 %>%
filter(ensembl.gene %in% picks) %>%
arrange(gross.mean.abundance) %>%
select(hgnc,
msk.1.kasumi,
msk.2.kasumi,
msk.3.kasumi,
msk.1.thp1,
msk.2.thp1,
msk.3.thp1,
msk.1.monomac,
msk.2.monomac,
msk.3.monomac,
msk.1.molm13,
msk.2.molm13,
msk.3.molm13) %>%
unique %>%
mutate(hgnc = factor(hgnc, levels = unique(hgnc))) %>%
unique %>%
gather(tissue, level, msk.1.kasumi:msk.3.molm13) %>%
rename(gene = hgnc) %>%
filter(!is.na(tissue)) %>%
filter(!is.na(level)) %>%
select(gene, level, tissue) %>%
PlotTissue(pdf = TRUE, file.name = 'triplicate_malignant_picks.pdf', width = 7, height = 10, order = FALSE)
step.2 %>%
filter(ensembl.gene %in% picks) %>%
arrange(gross.mean.abundance) %>%
rowwise %>%
mutate(msk.kasumi = mean(c(msk.1.kasumi, msk.2.kasumi, msk.3.kasumi))) %>%
mutate(msk.thp1 = mean(c(msk.1.thp1, msk.2.thp1, msk.3.thp1))) %>%
mutate(msk.monomac = mean(c(msk.1.monomac, msk.2.monomac, msk.3.monomac))) %>%
mutate(msk.molm13 = mean(c(msk.1.molm13, msk.2.molm13, msk.3.molm13))) %>%
select(hgnc,
msk.kasumi,
msk.thp1,
msk.monomac,
msk.molm13) %>%
unique %>%
mutate(hgnc = factor(hgnc, levels = unique(hgnc))) %>%
unique %>%
gather(tissue, level, msk.kasumi:msk.molm13) %>%
rename(gene = hgnc) %>%
filter(!is.na(tissue)) %>%
filter(!is.na(level)) %>%
select(gene, level, tissue) %>%
PlotTissue(pdf = TRUE, file.name = 'triplicate_collapsed_malignant_picks.pdf', width = 3.55, height = 10, order = FALSE)
step.2 %>%
filter(ensembl.gene %in% picks) %>%
arrange(gross.mean.abundance) %>%
rowwise %>%
mutate(level = mean(c(msk.1.kasumi, msk.2.kasumi, msk.3.kasumi, msk.1.thp1, msk.2.thp1, msk.3.thp1, msk.1.monomac, msk.2.monomac, msk.3.monomac, msk.1.molm13, msk.2.molm13, msk.3.molm13))) %>%
select(hgnc,
level) %>%
unique %>%
mutate(hgnc = factor(hgnc, levels = unique(hgnc))) %>%
unique %>%
rename(gene = hgnc) %>%
filter(!is.na(level)) %>%
mutate(tissue = 'malignant mean') %>%
select(gene, level, tissue) %>%
PlotTissue(pdf = TRUE, file.name = 'triplicate_mean_malignant_picks.pdf', width = 2.2, height = 10, order = FALSE)
micro <-
read_tsv('micro_patient.txt') %>%
rename(tissue = patient) %>%
gather(gene, level, GAGE1:MMP14) %>%
filter(gene %in% c(names(picks), 'EMR2', 'GPR86')) %>%
mutate(level = level - min(level)) %>%
mutate(level = level * (3/max(level))) %>%
group_by(gene) %>%
mutate(rank = mean(level)) %>%
arrange(rank) %>%
ungroup %>%
mutate(gene = factor(gene, unique(gene)))
micro %>%
PlotTissue(pdf = TRUE, file.name = 'micro_array_26.pdf', width = 15, height = 5, order = FALSE)
foo <- c(
"ABCC4",
"ANK1",
"ARID2",
"ATP11A",
"CBL",
"CCDC88A",
"CCR1",
"CD209",
"CD84",
"CD96",
"DOCK10",
"DOCK11",
"DTNA",
"ENG",
"EPB41",
"FCAR",
"GYPA",
"ITGA4",
"ITGB3",
"KIT",
"LILRA6",
"LILRB2",
"LILRB4",
"MTHFR",
"NOTCH2",
"PLXNC1",
"RABGAP1L",
"SIGLEC9",
"SLC16A7",
"SLC2A9",
"SLC31A1",
"SLC4A7",
"SORT1",
"ST14",
"VCPIP1",
"ZZEF1")
rna <-
read_tsv('rna_seq_08242015.txt') %>%
rename(DNMT3a_mut = `DNMT3a mut`) %>%
group_by(gene) %>%
mutate(DNMT3a_mut = mean(DNMT3a_mut), s_DNMT3a_WT = mean(s_DNMT3a_WT), s_MIGR1 = mean(s_MIGR1)) %>%
unique %>%
ungroup %>%
gather(tissue, level, DNMT3a_mut:s_MIGR1) %>%
mutate(level = log10(level)) %>%
mutate(level = level * 3/max(na.omit(level))) %>%
filter(gene %in% foo) %>%
group_by(gene) %>%
mutate(rank = mean(level)) %>%
arrange(rank) %>%
ungroup %>%
mutate(gene = factor(gene, unique(gene)))
rna %>%
PlotTissue(pdf = TRUE, file.name = 'rna_seq_36.pdf', width = 2.65, height = 10, order = FALSE)
|
#' Scrape footywire player statitstics.
#'
#' \code{get_footywire_stats} returns a dataframe containing player match stats from footywire from 2010 onwards.
#'
#' The dataframe contains both basic and advanced player statistics from each match specified in the match_id input.
#' To find match ID, find the relevent matches on footywire.com
#'
#' @param ids A vector containing match id's to return. Can be a single value or vector of values.
#' @return Returns a data frame containing player match stats for each match ID
#'
#' @examples
#' \dontrun{
#' get_footywire_stats(ids = 5000:5100)
#' }
#' @export
#' @importFrom magrittr %>%
#' @import dplyr
#' @importFrom rvest html_nodes
#' @importFrom rvest html_text
get_footywire_stats <- function(ids) {
if (missing(ids)) stop("Please provide an ID between 1 and 9999")
if (!is.numeric(ids)) stop("ID must be numeric between 1 and 9999")
# Initialise dataframe
dat <- as.data.frame(matrix(ncol = 42, nrow = 44))
# Now get data
# First, only proceed if we've accessed the URL
message("Getting data from footywire.com")
# Create Progress Bar
pb <- progress_estimated(length(ids), min_time = 5)
# Loop through data using map
dat <- ids %>%
purrr::map_df(~{
pb$tick()$print() # update the progress bar (tick())
get_match_data(id = .x) # do function
})
# Rearrange
dat <- dat %>%
arrange(Date, Match_id, desc(Status))
# Finish and return
message("Finished getting data")
return(dat)
}
#' Update the included footywire stats data to the specified date.
#'
#' \code{update_footywire_stats} returns a dataframe containing player match stats from [footywire](footywire.com)
#'
#' The dataframe contains both basic and advanced player statistics from each match from 2010 to the specified end date.
#'
#' This function utilised the included ID's dataset to map known ID's. It looks for any new data that isn't already loaded and proceeds to download it.
#' @param check_existing A logical specifying if we should check against existing dataset. Defaults to TRUE. Making it false will download all data from all history which will take some time.
#' @return Returns a data frame containing player match stats for each match ID
#'
#' @examples
#' \dontrun{
#' update_footywire_stats()
#' }
#' @export
#' @importFrom magrittr %>%
#' @import dplyr
update_footywire_stats <- function(check_existing = TRUE) {
message("Getting match ID's...")
# Get all URL's from 2010 (advanced stats) to current year
fw_ids <- 2010:as.numeric(format(Sys.Date(), "%Y")) %>%
purrr::map(~ paste0("https://www.footywire.com/afl/footy/ft_match_list?year=", .)) %>%
purrr::map(xml2::read_html) %>%
purrr::map(~ rvest::html_nodes(., ".data:nth-child(5) a")) %>%
purrr::map(~ rvest::html_attr(., "href")) %>%
purrr::map(~ stringr::str_extract(., "\\d+")) %>%
purrr::map_if(is.character, as.numeric) %>%
purrr::reduce(c)
# First, load data from github
if (check_existing) {
ids <- fw_ids[!fw_ids %in% player_stats$Match_id]
if (length(ids) == 0) {
message("Data is up to date. Returning original player_stats data")
return(player_stats)
} else {
# Get new data
message(paste0("Downloading new data for ", length(ids), " matches..."))
message("\nChecking Github")
# Check fitzRoy GitHub
dat_url <- "https://raw.githubusercontent.com/jimmyday12/fitzRoy/master/data-raw/player_stats/player_stats.rda"
loadRData <- function(fileName) {
load(fileName)
get(ls()[ls() != "fileName"])
}
dat_git <- loadRData(url(dat_url))
# Check what's still missing
git_ids <- fw_ids[!fw_ids %in% dat_git$Match_id]
ids <- ids[ids == git_ids]
if (length(ids) == 0) {
message("Finished getting data")
dat_git
} else {
new_data <- get_footywire_stats(ids)
player_stats %>% dplyr::bind_rows(new_data)
}
}
} else {
message("Downloading all data. Warning - this takes a long time")
all_data_ids <- fw_ids
dat <- get_footywire_stats(all_data_ids)
return(dat)
}
}
#' Get upcoming fixture from footywire.com
#'
#' \code{get_fixture} returns a dataframe containing upcoming AFL Men's season fixture.
#'
#' The dataframe contains the home and away team as well as venue.
#'
#' @param season Season to return, in yyyy format
#' @return Returns a data frame containing the date, teams and venue of each game
#'
#' @examples
#' \dontrun{
#' get_fixture(2018)
#' }
#' @export
#' @importFrom magrittr %>%
#' @import dplyr
get_fixture <- function(season = lubridate::year(Sys.Date())) {
if (!is.numeric(season)) stop(paste0("'season' must be in 4-digit year format. 'season' is currently ", season))
if (nchar(season) != 4) stop(paste0("'season' must be in 4-digit year format (e.g. 2018). 'season' is currently ", season))
# create url
url_fixture <- paste0("https://www.footywire.com/afl/footy/ft_match_list?year=", season)
fixture_xml <- xml2::read_html(url_fixture)
# Get XML and extract text from .data
games_text <- fixture_xml %>%
rvest::html_nodes(".data") %>%
rvest::html_text()
# Put this into dataframe format
games_df <- matrix(games_text, ncol = 7, byrow = T) %>%
as_data_frame() %>%
select(V1:V3)
# Update names
names(games_df) <- c("Date", "Teams", "Venue")
# Remove Bye
games_df <- games_df %>%
filter(Venue != "BYE")
# Work out day and week of each game. Games on Thursday > Wednesday go in same Round
games_df <- games_df %>%
mutate(
Date = lubridate::ydm_hm(paste(season, Date)),
epiweek = lubridate::epiweek(Date),
w.Day = lubridate::wday(Date),
Round = ifelse(between(w.Day, 1, 4), epiweek - 1, epiweek),
Round = as.integer(Round - min(Round) + 1)
) %>%
select(Date, Round, Teams, Venue)
# Fix names
games_df <- games_df %>%
group_by(Date, Round, Venue) %>%
separate(Teams, into = c("Home.Team", "Away.Team"), sep = "\\\nv\\s\\\n") %>%
mutate_at(c("Home.Team", "Away.Team"), stringr::str_remove_all, "[\r\n]")
# Add season game number
games_df <- games_df %>%
mutate(
Season.Game = row_number(),
Season = as.integer(season)
)
# Fix Teams
# Uses internal replace teams function
games_df <- games_df %>%
group_by(Season.Game) %>%
mutate_at(c("Home.Team", "Away.Team"), replace_teams) %>%
ungroup()
# Tidy columns
games_df <- games_df %>%
select(Date, Season, Season.Game, Round, Home.Team, Away.Team, Venue)
return(games_df)
}
|
/R/footywire-calcs.R
|
no_license
|
schmoopies/fitzRoy
|
R
| false
| false
| 6,584
|
r
|
#' Scrape footywire player statitstics.
#'
#' \code{get_footywire_stats} returns a dataframe containing player match stats from footywire from 2010 onwards.
#'
#' The dataframe contains both basic and advanced player statistics from each match specified in the match_id input.
#' To find match ID, find the relevent matches on footywire.com
#'
#' @param ids A vector containing match id's to return. Can be a single value or vector of values.
#' @return Returns a data frame containing player match stats for each match ID
#'
#' @examples
#' \dontrun{
#' get_footywire_stats(ids = 5000:5100)
#' }
#' @export
#' @importFrom magrittr %>%
#' @import dplyr
#' @importFrom rvest html_nodes
#' @importFrom rvest html_text
get_footywire_stats <- function(ids) {
if (missing(ids)) stop("Please provide an ID between 1 and 9999")
if (!is.numeric(ids)) stop("ID must be numeric between 1 and 9999")
# Initialise dataframe
dat <- as.data.frame(matrix(ncol = 42, nrow = 44))
# Now get data
# First, only proceed if we've accessed the URL
message("Getting data from footywire.com")
# Create Progress Bar
pb <- progress_estimated(length(ids), min_time = 5)
# Loop through data using map
dat <- ids %>%
purrr::map_df(~{
pb$tick()$print() # update the progress bar (tick())
get_match_data(id = .x) # do function
})
# Rearrange
dat <- dat %>%
arrange(Date, Match_id, desc(Status))
# Finish and return
message("Finished getting data")
return(dat)
}
#' Update the included footywire stats data to the specified date.
#'
#' \code{update_footywire_stats} returns a dataframe containing player match stats from [footywire](footywire.com)
#'
#' The dataframe contains both basic and advanced player statistics from each match from 2010 to the specified end date.
#'
#' This function utilised the included ID's dataset to map known ID's. It looks for any new data that isn't already loaded and proceeds to download it.
#' @param check_existing A logical specifying if we should check against existing dataset. Defaults to TRUE. Making it false will download all data from all history which will take some time.
#' @return Returns a data frame containing player match stats for each match ID
#'
#' @examples
#' \dontrun{
#' update_footywire_stats()
#' }
#' @export
#' @importFrom magrittr %>%
#' @import dplyr
update_footywire_stats <- function(check_existing = TRUE) {
message("Getting match ID's...")
# Get all URL's from 2010 (advanced stats) to current year
fw_ids <- 2010:as.numeric(format(Sys.Date(), "%Y")) %>%
purrr::map(~ paste0("https://www.footywire.com/afl/footy/ft_match_list?year=", .)) %>%
purrr::map(xml2::read_html) %>%
purrr::map(~ rvest::html_nodes(., ".data:nth-child(5) a")) %>%
purrr::map(~ rvest::html_attr(., "href")) %>%
purrr::map(~ stringr::str_extract(., "\\d+")) %>%
purrr::map_if(is.character, as.numeric) %>%
purrr::reduce(c)
# First, load data from github
if (check_existing) {
ids <- fw_ids[!fw_ids %in% player_stats$Match_id]
if (length(ids) == 0) {
message("Data is up to date. Returning original player_stats data")
return(player_stats)
} else {
# Get new data
message(paste0("Downloading new data for ", length(ids), " matches..."))
message("\nChecking Github")
# Check fitzRoy GitHub
dat_url <- "https://raw.githubusercontent.com/jimmyday12/fitzRoy/master/data-raw/player_stats/player_stats.rda"
loadRData <- function(fileName) {
load(fileName)
get(ls()[ls() != "fileName"])
}
dat_git <- loadRData(url(dat_url))
# Check what's still missing
git_ids <- fw_ids[!fw_ids %in% dat_git$Match_id]
ids <- ids[ids == git_ids]
if (length(ids) == 0) {
message("Finished getting data")
dat_git
} else {
new_data <- get_footywire_stats(ids)
player_stats %>% dplyr::bind_rows(new_data)
}
}
} else {
message("Downloading all data. Warning - this takes a long time")
all_data_ids <- fw_ids
dat <- get_footywire_stats(all_data_ids)
return(dat)
}
}
#' Get upcoming fixture from footywire.com
#'
#' \code{get_fixture} returns a dataframe containing upcoming AFL Men's season fixture.
#'
#' The dataframe contains the home and away team as well as venue.
#'
#' @param season Season to return, in yyyy format
#' @return Returns a data frame containing the date, teams and venue of each game
#'
#' @examples
#' \dontrun{
#' get_fixture(2018)
#' }
#' @export
#' @importFrom magrittr %>%
#' @import dplyr
get_fixture <- function(season = lubridate::year(Sys.Date())) {
if (!is.numeric(season)) stop(paste0("'season' must be in 4-digit year format. 'season' is currently ", season))
if (nchar(season) != 4) stop(paste0("'season' must be in 4-digit year format (e.g. 2018). 'season' is currently ", season))
# create url
url_fixture <- paste0("https://www.footywire.com/afl/footy/ft_match_list?year=", season)
fixture_xml <- xml2::read_html(url_fixture)
# Get XML and extract text from .data
games_text <- fixture_xml %>%
rvest::html_nodes(".data") %>%
rvest::html_text()
# Put this into dataframe format
games_df <- matrix(games_text, ncol = 7, byrow = T) %>%
as_data_frame() %>%
select(V1:V3)
# Update names
names(games_df) <- c("Date", "Teams", "Venue")
# Remove Bye
games_df <- games_df %>%
filter(Venue != "BYE")
# Work out day and week of each game. Games on Thursday > Wednesday go in same Round
games_df <- games_df %>%
mutate(
Date = lubridate::ydm_hm(paste(season, Date)),
epiweek = lubridate::epiweek(Date),
w.Day = lubridate::wday(Date),
Round = ifelse(between(w.Day, 1, 4), epiweek - 1, epiweek),
Round = as.integer(Round - min(Round) + 1)
) %>%
select(Date, Round, Teams, Venue)
# Fix names
games_df <- games_df %>%
group_by(Date, Round, Venue) %>%
separate(Teams, into = c("Home.Team", "Away.Team"), sep = "\\\nv\\s\\\n") %>%
mutate_at(c("Home.Team", "Away.Team"), stringr::str_remove_all, "[\r\n]")
# Add season game number
games_df <- games_df %>%
mutate(
Season.Game = row_number(),
Season = as.integer(season)
)
# Fix Teams
# Uses internal replace teams function
games_df <- games_df %>%
group_by(Season.Game) %>%
mutate_at(c("Home.Team", "Away.Team"), replace_teams) %>%
ungroup()
# Tidy columns
games_df <- games_df %>%
select(Date, Season, Season.Game, Round, Home.Team, Away.Team, Venue)
return(games_df)
}
|
b <- c(1,3,7,5,3,2)
f <- sqrt(b)
plot(b,f)
|
/Code_examples/r/plot.r
|
permissive
|
bodacea/datasciencecodingfordevelopment
|
R
| false
| false
| 43
|
r
|
b <- c(1,3,7,5,3,2)
f <- sqrt(b)
plot(b,f)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/species_list.R
\name{species_list}
\alias{species_list}
\title{species_list}
\usage{
species_list(Class = NULL, Order = NULL, Family = NULL,
SubFamily = NULL, Genus = NULL, Species = NULL, SpecCode = NULL,
SpeciesRefNo = NULL, all_taxa = load_taxa())
}
\arguments{
\item{Class}{Request all species in this taxonomic Class}
\item{Order}{Request all species in this taxonomic Order}
\item{Family}{Request all species in this taxonomic Family}
\item{SubFamily}{Request all species in this taxonomic SubFamily}
\item{Genus}{Request all species in this taxonomic Genus}
\item{Species}{Request all species in this taxonomic Species}
\item{SpecCode}{Request species name of species matching this SpecCode}
\item{SpeciesRefNo}{Request species name of all species matching this SpeciesRefNo}
\item{all_taxa}{The data.frame of all taxa used for the lookup. By default will be loaded
from cache if available, otherwise must be downloaded from the server; about 13 MB, may be
slow.}
}
\description{
Return the a species list given a taxonomic group
}
\details{
The first time the function is called it will download and cache the complete
}
\examples{
\dontrun{
## All species in the Family
species_list(Family = 'Scaridae')
## All species in the Genus
species_list(Genus = 'Labroides')
}
}
|
/man/species_list.Rd
|
no_license
|
GapData/rfishbase
|
R
| false
| true
| 1,376
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/species_list.R
\name{species_list}
\alias{species_list}
\title{species_list}
\usage{
species_list(Class = NULL, Order = NULL, Family = NULL,
SubFamily = NULL, Genus = NULL, Species = NULL, SpecCode = NULL,
SpeciesRefNo = NULL, all_taxa = load_taxa())
}
\arguments{
\item{Class}{Request all species in this taxonomic Class}
\item{Order}{Request all species in this taxonomic Order}
\item{Family}{Request all species in this taxonomic Family}
\item{SubFamily}{Request all species in this taxonomic SubFamily}
\item{Genus}{Request all species in this taxonomic Genus}
\item{Species}{Request all species in this taxonomic Species}
\item{SpecCode}{Request species name of species matching this SpecCode}
\item{SpeciesRefNo}{Request species name of all species matching this SpeciesRefNo}
\item{all_taxa}{The data.frame of all taxa used for the lookup. By default will be loaded
from cache if available, otherwise must be downloaded from the server; about 13 MB, may be
slow.}
}
\description{
Return the a species list given a taxonomic group
}
\details{
The first time the function is called it will download and cache the complete
}
\examples{
\dontrun{
## All species in the Family
species_list(Family = 'Scaridae')
## All species in the Genus
species_list(Genus = 'Labroides')
}
}
|
# Script for running rstanarm models on server.
# This script: test run of a model with aspe and aspn
library(rstanarm)
library(dplyr)
library(tidyr)
# Set working directory
setwd('~/poorcast/bayesian')
##### Read in data, remove NAs, add slope and aspect
all.sp = read.csv('input/veg_all_predictors.csv') %>%
select(-c(slope1, elev1, asp1, mNPP, veg_class)) %>%
filter(apply(., 1, function(x) all(!is.na(x)))) %>%
mutate(asp.e = sin(pi * asp2 / 180),
asp.n = cos(pi * asp2 / 180),
c.slope = slope2 - mean(slope2))
##### Create training and testing data.
dece.train = all.sp %>% filter(species %in% 'DECE' & year %in% 1995:2015) %>% mutate(obsno = 1:nrow(.))
dece.valid = all.sp %>% filter(species %in% 'DECE' & year %in% 2016:2018) %>% mutate(obsno = 1:nrow(.))
komy.train = all.sp %>% filter(species %in% 'KOMY' & year %in% 1995:2015) %>% mutate(obsno = 1:nrow(.))
komy.valid = all.sp %>% filter(species %in% 'KOMY' & year %in% 2016:2018) %>% mutate(obsno = 1:nrow(.))
gero.train = all.sp %>% filter(species %in% 'GEROT' & year %in% 1995:2015) %>% mutate(obsno = 1:nrow(.))
gero.valid = all.sp %>% filter(species %in% 'GEROT' & year %in% 2016:2018) %>% mutate(obsno = 1:nrow(.))
##### Fit models
### Deschampsia
dece.en = stan_glmer(cbind(n.obs, 100 - n.obs) ~ asp.e + asp.n + (1 | plot) + (1 | year) + (1 | obsno),
family = 'binomial',
cores = 4,
seed = 67000,
data = dece.train)
print('deschampsia')
# Generate posterior predictions
dece.pred = posterior_predict(dece.en, newdata = dece.valid,
re.form = ~ (1 | plot),
seed = 3515,
draws = 4000)
# Generate summary statistics for posterior draws
dece.pred.summ = dece.pred %>%
t() %>%
as.data.frame() %>%
mutate(i = 1:nrow(.)) %>%
gather(key = draw, val = pred, -c(i)) %>%
group_by(i) %>%
summarise(yhat_mean = mean(pred),
yhat_medn = median(pred),
yhat_q975 = quantile(pred, 0.975),
yhat_q025 = quantile(pred, 0.025),
yhat_q841 = quantile(pred, 0.841),
yhat_q159 = quantile(pred, 0.159)) %>%
mutate(sp = 'dece', model = 'aspen')
### Kobresia
komy.en = stan_glmer(cbind(n.obs, 100 - n.obs) ~ asp.e + asp.n + (1 | plot) + (1 | year) + (1 | obsno),
family = 'binomial',
cores = 4,
seed = 734392,
data = komy.train)
print('kobresia')
# Generate posterior predictions
komy.pred = posterior_predict(komy.en, newdata = komy.valid,
re.form = ~ (1 | plot),
seed = 19899,
draws = 4000)
# Generate summary statistics for posterior draws
komy.pred.summ = komy.pred %>%
t() %>%
as.data.frame() %>%
mutate(i = 1:nrow(.)) %>%
gather(key = draw, val = pred, -c(i)) %>%
group_by(i) %>%
summarise(yhat_mean = mean(pred),
yhat_medn = median(pred),
yhat_q975 = quantile(pred, 0.975),
yhat_q025 = quantile(pred, 0.025),
yhat_q841 = quantile(pred, 0.841),
yhat_q159 = quantile(pred, 0.159)) %>%
mutate(sp = 'komy', model = 'aspen')
### Geum
gero.en = stan_glmer(cbind(n.obs, 100 - n.obs) ~ asp.e + asp.n + (1 | plot) + (1 | year) + (1 | obsno),
family = 'binomial',
cores = 4,
seed = 189230,
data = gero.train)
print('geum')
# Generate posterior predictions
gero.pred = posterior_predict(gero.en, newdata = gero.valid,
re.form = ~ (1 | plot),
seed = 22399,
draws = 4000)
# Generate summary statistics for posterior draws
gero.pred.summ = gero.pred %>%
t() %>%
as.data.frame() %>%
mutate(i = 1:nrow(.)) %>%
gather(key = draw, val = pred, -c(i)) %>%
group_by(i) %>%
summarise(yhat_mean = mean(pred),
yhat_medn = median(pred),
yhat_q975 = quantile(pred, 0.975),
yhat_q025 = quantile(pred, 0.025),
yhat_q841 = quantile(pred, 0.841),
yhat_q159 = quantile(pred, 0.159)) %>%
mutate(sp = 'gero', model = 'aspen')
write.csv(rbind(dece.pred.summ %>%
mutate(loglik = log_lik(dece.en) %>% apply(1, sum) %>% mean()),
komy.pred.summ %>%
mutate(loglik = log_lik(komy.en) %>% apply(1, sum) %>% mean()),
gero.pred.summ %>%
mutate(loglik = log_lik(gero.en) %>% apply(1, sum) %>% mean())),
row.names = FALSE,
file = 'output/all_aspen_summary.csv')
save(dece.en, komy.en, gero.en, file = 'output/en_mods.RData')
|
/02_fit_species_models/bayes_on_server/all_aspens.R
|
no_license
|
EBIO6100Spring2020/saddle-plants
|
R
| false
| false
| 4,823
|
r
|
# Script for running rstanarm models on server.
# This script: test run of a model with aspe and aspn
library(rstanarm)
library(dplyr)
library(tidyr)
# Set working directory
setwd('~/poorcast/bayesian')
##### Read in data, remove NAs, add slope and aspect
all.sp = read.csv('input/veg_all_predictors.csv') %>%
select(-c(slope1, elev1, asp1, mNPP, veg_class)) %>%
filter(apply(., 1, function(x) all(!is.na(x)))) %>%
mutate(asp.e = sin(pi * asp2 / 180),
asp.n = cos(pi * asp2 / 180),
c.slope = slope2 - mean(slope2))
##### Create training and testing data.
dece.train = all.sp %>% filter(species %in% 'DECE' & year %in% 1995:2015) %>% mutate(obsno = 1:nrow(.))
dece.valid = all.sp %>% filter(species %in% 'DECE' & year %in% 2016:2018) %>% mutate(obsno = 1:nrow(.))
komy.train = all.sp %>% filter(species %in% 'KOMY' & year %in% 1995:2015) %>% mutate(obsno = 1:nrow(.))
komy.valid = all.sp %>% filter(species %in% 'KOMY' & year %in% 2016:2018) %>% mutate(obsno = 1:nrow(.))
gero.train = all.sp %>% filter(species %in% 'GEROT' & year %in% 1995:2015) %>% mutate(obsno = 1:nrow(.))
gero.valid = all.sp %>% filter(species %in% 'GEROT' & year %in% 2016:2018) %>% mutate(obsno = 1:nrow(.))
##### Fit models
### Deschampsia
dece.en = stan_glmer(cbind(n.obs, 100 - n.obs) ~ asp.e + asp.n + (1 | plot) + (1 | year) + (1 | obsno),
family = 'binomial',
cores = 4,
seed = 67000,
data = dece.train)
print('deschampsia')
# Generate posterior predictions
dece.pred = posterior_predict(dece.en, newdata = dece.valid,
re.form = ~ (1 | plot),
seed = 3515,
draws = 4000)
# Generate summary statistics for posterior draws
dece.pred.summ = dece.pred %>%
t() %>%
as.data.frame() %>%
mutate(i = 1:nrow(.)) %>%
gather(key = draw, val = pred, -c(i)) %>%
group_by(i) %>%
summarise(yhat_mean = mean(pred),
yhat_medn = median(pred),
yhat_q975 = quantile(pred, 0.975),
yhat_q025 = quantile(pred, 0.025),
yhat_q841 = quantile(pred, 0.841),
yhat_q159 = quantile(pred, 0.159)) %>%
mutate(sp = 'dece', model = 'aspen')
### Kobresia
komy.en = stan_glmer(cbind(n.obs, 100 - n.obs) ~ asp.e + asp.n + (1 | plot) + (1 | year) + (1 | obsno),
family = 'binomial',
cores = 4,
seed = 734392,
data = komy.train)
print('kobresia')
# Generate posterior predictions
komy.pred = posterior_predict(komy.en, newdata = komy.valid,
re.form = ~ (1 | plot),
seed = 19899,
draws = 4000)
# Generate summary statistics for posterior draws
komy.pred.summ = komy.pred %>%
t() %>%
as.data.frame() %>%
mutate(i = 1:nrow(.)) %>%
gather(key = draw, val = pred, -c(i)) %>%
group_by(i) %>%
summarise(yhat_mean = mean(pred),
yhat_medn = median(pred),
yhat_q975 = quantile(pred, 0.975),
yhat_q025 = quantile(pred, 0.025),
yhat_q841 = quantile(pred, 0.841),
yhat_q159 = quantile(pred, 0.159)) %>%
mutate(sp = 'komy', model = 'aspen')
### Geum
gero.en = stan_glmer(cbind(n.obs, 100 - n.obs) ~ asp.e + asp.n + (1 | plot) + (1 | year) + (1 | obsno),
family = 'binomial',
cores = 4,
seed = 189230,
data = gero.train)
print('geum')
# Generate posterior predictions
gero.pred = posterior_predict(gero.en, newdata = gero.valid,
re.form = ~ (1 | plot),
seed = 22399,
draws = 4000)
# Generate summary statistics for posterior draws
gero.pred.summ = gero.pred %>%
t() %>%
as.data.frame() %>%
mutate(i = 1:nrow(.)) %>%
gather(key = draw, val = pred, -c(i)) %>%
group_by(i) %>%
summarise(yhat_mean = mean(pred),
yhat_medn = median(pred),
yhat_q975 = quantile(pred, 0.975),
yhat_q025 = quantile(pred, 0.025),
yhat_q841 = quantile(pred, 0.841),
yhat_q159 = quantile(pred, 0.159)) %>%
mutate(sp = 'gero', model = 'aspen')
write.csv(rbind(dece.pred.summ %>%
mutate(loglik = log_lik(dece.en) %>% apply(1, sum) %>% mean()),
komy.pred.summ %>%
mutate(loglik = log_lik(komy.en) %>% apply(1, sum) %>% mean()),
gero.pred.summ %>%
mutate(loglik = log_lik(gero.en) %>% apply(1, sum) %>% mean())),
row.names = FALSE,
file = 'output/all_aspen_summary.csv')
save(dece.en, komy.en, gero.en, file = 'output/en_mods.RData')
|
load('CE_project.RData')
## Mixed effects
library(dplyr)
# Set up data -------------------------------------------------------------
# library(lme4)
library(hglm)
library(readxl) # install.packages("readxl") or install.packages("tidyverse")
library(plyr)
library(tibble)
library(data.table)
library(dplyr)
state_name_abbr = read.table(file='~/Documents/CE/klepikhina-masters-ce/data/state_to_abbr.csv',header = TRUE, sep=',')
cols_to_be_rectified <- names(state_name_abbr)[vapply(state_name_abbr, is.character, logical(1))]
state_name_abbr[,cols_to_be_rectified] <- lapply(state_name_abbr[,cols_to_be_rectified], trimws)
urb = read.table(file='~/Documents/CE/klepikhina-masters-ce/data/urbanization_classification.csv',header = TRUE, sep=',')
urb = left_join(urb, state_name_abbr, by = "State.Abr.")
drops = c("State.Abr.", "CBSA.title", "CBSA.2012.pop", "County.2012.pop", "X1990.based.code", "X")
urb = urb[ , !(names(urb) %in% drops)]
colnames(urb) <- c("FIPS", "County", "urb_code_2013", "urb_code_2006", "State")
urb$County = gsub("(.*?)\\sCounty$", "\\1", urb$County)
urb[,3] <- sapply(urb[,3],as.factor)
urb[,4] <- sapply(urb[,4],as.factor)
h_ranks = as.data.table(read_excel(path = "~/Documents/CE/klepikhina-masters-ce/data/county_health_rankings_2013.xls", sheet=3))
header.true <- function(df) {
names(df) <- as.character(unlist(df[1,]))
df[-1,]
}
h_ranks = header.true(h_ranks)
h_ranks[, 1] <- sapply(h_ranks[, 1], as.integer)
colnames(h_ranks) <- c("FIPS", "State", "County", "Mortality_Z_Score", "Mortality_Rank", "Morbidity_Z_Score", "Morbidity_Rank", "Health_Behaviors_Z_Score", "Health_Behaviors_Rank", "Clinical_Care_Z_Score", "Clinical_Care_Rank", "Soc_Econ_Factors_Z_Score", "Soc_Econ_Factors_Rank", "Physical_Env_Z_Score", "Physical_Env_Rank")
h_ranks=h_ranks[!is.na(h_ranks$County),]
h_ranks[,4:15] <- lapply(h_ranks[,4:15],as.numeric)
h_factors = as.data.table(read_excel(path = "~/Documents/CE/klepikhina-masters-ce/data/county_health_rankings_2013.xls", sheet=4))
h_factors = header.true(h_factors)
h_factors[, 1] <- sapply(h_factors[, 1], as.integer)
h_factors=h_factors[!is.na(h_factors$County),]
h_factors = h_factors[,!c(4:30)]
h_factors = h_factors[,!c(6:8,10:12,14:16,19:21,24:26,29,33:35,38:40,44,51,54:56,59:61,64:66,68,72:74,78,81:83,86:88,91:94,97,99,102,105,108,111)]
h_factors = h_factors[,!c(20,21,23:27)]
h_factors = h_factors[,!c(24,29,46)]
# h_factors = h_factors[-ix, ]#subset(h_factors, select=-c("PCP Rate","PCP Ratio"))
colnames(h_factors) <- c("FIPS", "State", "County",
"Smoker_Sample_Size", "Perc_Smoker", "Perc_Obese",
"Perc_Phys_Inactive", "Excessive_Drinking_Sample_Size", "Perc_Excessive_Drinking",
"MV_Deaths", "MV_Mortality_Rate", "Chlamydia_Cases",
"Chlamydia_Rate", "Teen_Births", "Teen_Pop",
"Teen_Birth_Rate", "Uninsured", "Perc_Uninsured",
"Num_Physicians", "Num_Dentists", "Num_Medicare_Enrolled_Amb_Care",
"Amb_Care_Rate", "Num_Diabetics", "Num_Medicare_Enrolled_Mammography",
"Perc_Mammography", "Perc_HS_Grad", "Num_Some_College",
"Perc_Some_College", "Num_Unemployed", "Labor_Force",
"Perc_Unemployed", "Num_Children_Poverty", "Perc_Children_Poverty",
"Inadeq_Social_Support_Sample_Size", "Perc_No_Social_Support",
"Num_Single_Parent_House",
"Num_Households", "Annual_Violent_Crimes", "Violent_Crime_Rate",
"Avg_Daily_Particulate_Matter", "Perc_Pop_In_Violation_Drinking_Water_Safety",
"Num_Pop_In_Violation_Drinking_Water_Safety",
"Num_Rec_Fac", "Num_Limited_Access_To_Healthy_Food",
"Perc_Limited_Access_To_Healthy_Food", "Num_Fast_Food", "Perc_Fast_Food")
h_factors[,4:47] <- lapply(h_factors[,4:47],as.numeric)
demographics = as.data.table(read_excel(path = "~/Documents/CE/klepikhina-masters-ce/data/county_health_rankings_2013.xls", sheet=5))[, (16:61) := NULL]
demographics = header.true(demographics)
colnames(demographics) <- c("FIPS", "State", "County", "Population", "perc_under_18", "perc_over_65", "perc_AfAm", "perc_AmIn_AlNa", "perc_As", "perc_NaHI_PaIs", "perc_Hisp", "perc_NonHispWh", "non_profi_en", "perc_non_profi_en", "perc_female")
demographics=demographics[!is.na(demographics$County),]
demographics[, 1] <- sapply(demographics[, 1], as.integer)
demographics[,4:15] <- lapply(demographics[,4:15],as.numeric)
h_outcomes = as.data.table(read_excel(path = "~/Documents/CE/klepikhina-masters-ce/data/county_health_rankings_2013.xls", sheet=4))[, (31:138) := NULL]
h_outcomes = header.true(h_outcomes)
h_outcomes = header.true(h_outcomes)
h_outcomes[, 1] <- sapply(h_outcomes[, 1], as.integer)
colnames(h_outcomes) <- c("FIPS", "State", "County", "premature_deaths", "premature_death_YPLL_rate", "premature_death_YPLL_rate_CI_low", "premature_death_YPLL_rate_CI_high", "premature_death_YPLL_rate_Z_score", "poor_health_sample_size", "poor_health_perc", "poor_health_CI_low", "poor_health_CI_high", "poor_health_Z_score", "poor_phys_health_sample_size", "poor_phys_health_avg_over_30_days", "poor_phys_health_avg_over_30_days_CI_low", "poor_phys_health_avg_over_30_days_CI_high", "poor_phys_health_avg_over_30_days_Z_score", "poor_ment_health_sample_size", "poor_ment_health_avg_over_30_days", "poor_ment_health_avg_over_30_days_CI_low", "poor_ment_health_avg_over_30_days_CI_high", "poor_ment_health_avg_over_30_days_Z_score", "unreliable_data", "low_birthweight_births", "live_births", "low_birthweight_perc", "low_birthweight_perc_CI_low", "low_birthweight_perc_CI_high", "low_birthweight_perc_Z_score")
h_outcomes=h_outcomes[!is.na(h_outcomes$County),]
h_outcomes[,4:23] <- lapply(h_outcomes[,4:23],as.numeric)
h_outcomes$unreliable_data <- ifelse(grepl("x", h_outcomes$unreliable_data), 1, 0)
h_outcomes$unreliable_data <- sapply(h_outcomes$unreliable_data,as.factor)
h_outcomes[,25:30] <- lapply(h_outcomes[,25:30],as.numeric)
merge_cols <- c("FIPS", "County", "State")
df <- merge(h_ranks, h_outcomes, by = merge_cols, all.x = TRUE)
df <- merge(df, demographics, by = merge_cols, all.x = TRUE)
df <- merge(df, urb, by = merge_cols, all.x = TRUE)
df <- merge(df, h_factors, by = merge_cols, all.x = TRUE)
df[,1] <- sapply(demographics[,1],as.factor)
df$urb_code_2013 <- factor(df$urb_code_2013)
df$poor_health_estimate = round(df$poor_health_perc*(df$poor_health_sample_size*0.01),0)
tmp = df[complete.cases(df), ] # complete dataset -- no NAs
# Imports For Bayes -------------------------------------------------------
library(rstanarm)
library(mice)
# md.pattern(df)
library(VIM)
library(broom.mixed)
library(shinystan)
library(brms)
library(dplyr)
library(rstan)
library(stringr)
library(BayesianFROC)
library(rstan)
# Impute Data -------------------------------------------------------------
df = df[, !(names(df) %in% c("poor_health_perc", "poor_health_sample_size"))]
imputed_Data <- mice(df, m=3, maxit = 1, method = 'cart', seed = 500)
# Create Summary Table 1 ----------------------------------------------------
get_df <- function(fit_summary) {
post_mean_counties <- fit_summary[,c("mean")][55:3195]
post_mean_state <- fit_summary[,c("mean")][4:54]
intercept<- fit_summary[,c("mean")][1]
print(dim(fit_summary))
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
class.df.counties<- data.frame(state, row_name_counties, row_values_counties, intercept_col)
print(tail(unique(row_name_counties)))
print(unique(state))
print(dim(class.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
class.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(unique(row_name_state))
print(unique(state))
print(dim(class.df.states))
class.df = merge(class.df.counties, class.df.states,c("state","intercept_col")) #by="state")
print(dim(class.df))
return(class.df)
}
# Get Summary Table 2 -----------------------------------------------------
get_df2 <- function(fit_summary) {
print(dim(fit_summary))
intercept<- fit_summary[,c("mean")][1]
b_perc_AfAm<- fit_summary[,c("mean")][2]
b_perc_As<- fit_summary[,c("mean")][3]
b_perc_AmIn_AlNa<- fit_summary[,c("mean")][4]
b_perc_Hisp<- fit_summary[,c("mean")][5]
b_urb_code_20134<- fit_summary[,c("mean")][6]
b_urb_code_20136<- fit_summary[,c("mean")][7]
b_urb_code_20132<- fit_summary[,c("mean")][8]
b_urb_code_20135<- fit_summary[,c("mean")][9]
b_urb_code_20131<- fit_summary[,c("mean")][10]
b_perc_female<- fit_summary[,c("mean")][11]
b_perc_under_18<- fit_summary[,c("mean")][12]
b_perc_over_65<- fit_summary[,c("mean")][13]
# sd_state <- fit_summary[,c("mean")][14]
# sd_counties <- fit_summary[,c("mean")][15]
post_mean_state <- fit_summary[,c("mean")][16:66]
post_mean_counties <- fit_summary[,c("mean")][67:3207]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
counties = str_extract(row_name_counties, "(?<=_)([^_]+)(?=,)")
counties = gsub('\\.', ' ', counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
class.df.counties<- data.frame(state, counties, row_name_counties, row_values_counties, intercept_col,
b_perc_AfAm, b_perc_As, b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female,
b_perc_under_18, b_perc_over_65)
class.df.counties$b_urb[class.df.counties$counties == df$County & df$urb_code_2013 == "1"] <- b_urb_code_20131
class.df.counties$b_urb[class.df.counties$counties == df$County & df$urb_code_2013 == "2"] <- b_urb_code_20132
class.df.counties$b_urb[class.df.counties$counties == df$County & df$urb_code_2013 == "4"] <- b_urb_code_20134
class.df.counties$b_urb[class.df.counties$counties == df$County & df$urb_code_2013 == "5"] <- b_urb_code_20135
class.df.counties$b_urb[class.df.counties$counties == df$County & df$urb_code_2013 == "6"] <- b_urb_code_20136
class.df.counties$b_urb[is.na(class.df.counties$b_urb)] <- 0
print(dim(class.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
class.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(dim(class.df.states))
class.df = merge(class.df.counties, class.df.states,c("state","intercept_col")) #by="state")
print(dim(class.df))
return(class.df)
}
# SEL Ranking -------------------------------------------------------------
sel <- function(data) {
data1 <- na.omit(data)
k = length(data1$true_ranks)
return ((1/k)*sum((data1$my_ranks-data1$true_ranks)^2))
}
save.image('CE_project.RData')
############################################################################################################################
#################################################### premature deaths 1 ####################################################
premature_deaths.1.prior <- c(
prior(gamma(7.5, 1), class = Intercept)
)
premature_deaths.bayes.1 = brm_multiple(premature_deaths ~ (1|State/County), data=imputed_Data,
family = poisson(link = "log"), prior=premature_deaths.1.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(premature_deaths.bayes.1)
fit_summary.pd1 <- summary(premature_deaths.bayes.1$fit)
premature_deaths.1.df = get_df(fit_summary.pd1$summary)
premature_deaths.1.df.summed = premature_deaths.1.df[,c("state", "row_name_counties")]
premature_deaths.1.df.summed$summed = exp(
premature_deaths.1.df$intercept_col +
premature_deaths.1.df$row_values_state +
premature_deaths.1.df$row_values_counties)
rank.premature_deaths.1 = premature_deaths.1.df.summed %>%
group_by(state) %>%
mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
rank.premature_deaths.1 = rank.premature_deaths.1[with(rank.premature_deaths.1, order(row_name_counties)), ]
rank.premature_deaths.1$true_ranks = df$Mortality_Rank
sel.prem_death.1 <-rank.premature_deaths.1 %>%
group_by(state) %>%
do(data.frame(standard.error.loss.mortality.1=sel(.)))
sel.prem_death.1 = sel.prem_death.1[with(sel.prem_death.1, order(standard.error.loss.mortality.1)), ]
sel.prem_death.1
g1 <- ggplot(data = sel.prem_death.1, mapping = aes(x = as.factor(state), y = standard.error.loss.mortality.1)) +
geom_bar(stat = "identity") +
labs(x = "state") +
ggtitle("Mortality Rank Mean Squared Error Loss Model 1") +
xlab("") +
ylab("Mean Squared Error Loss") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
axis.text=element_text(size=12),
axis.title=element_text(size=14),
plot.title=element_text(size=20))
print(g1)
#################################################### premature deaths 2 ####################################################
premature_deaths.2.prior <- c(
prior(gamma(7.5, 1), class = Intercept),
prior(normal(0, 10), class = b)
)
premature_deaths.bayes.2 = brm_multiple(premature_deaths ~ perc_AfAm + perc_As + perc_AmIn_AlNa +
perc_Hisp + urb_code_2013 + perc_female + perc_under_18 +
perc_over_65 + (1|State/County), data=imputed_Data,
family = poisson(link = "log"), prior=premature_deaths.2.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(premature_deaths.bayes.2)
fit_summary.pd2 = summary(premature_deaths.bayes.2$fit)
premature_deaths.2.df = get_df2(fit_summary.pd2$summary)
premature_deaths.2.df.summed = premature_deaths.2.df[,c("state", "row_name_counties")]
premature_deaths.2.df.summed$summed = exp(premature_deaths.2.df$intercept_col +
premature_deaths.2.df$b_perc_AfAm +
premature_deaths.2.df$b_perc_As +
premature_deaths.2.df$b_perc_AmIn_AlNa +
premature_deaths.2.df$b_perc_Hisp+
premature_deaths.2.df$b_urb+
premature_deaths.2.df$b_perc_female+
premature_deaths.2.df$b_perc_under_18+
premature_deaths.2.df$b_perc_over_65+
premature_deaths.2.df$row_values_state+
premature_deaths.2.df$row_values_counties)
rank.premature_deaths.2 = premature_deaths.2.df.summed %>%
group_by(state) %>%
mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
rank.premature_deaths.2 = rank.premature_deaths.2[with(rank.premature_deaths.2, order(row_name_counties)), ]
rank.premature_deaths.2$true_ranks = df$Mortality_Rank
sel.prem_death.2 <-rank.premature_deaths.2 %>%
group_by(state) %>%
do(data.frame(standard.error.loss.mortality.2=sel(.)))
sel.prem_death.2 = sel.prem_death.2[with(sel.prem_death.2, order(standard.error.loss.mortality.2)), ]
sel.prem_death.2
g2 <- ggplot(data = sel.prem_death.2, mapping = aes(x = as.factor(state), y = standard.error.loss.mortality.2)) +
geom_bar(stat = "identity") +
labs(x = "state") +
ggtitle("Mortality Rank Mean Squared Error Loss Model 2") +
xlab("") +
ylab("Mean Squared Error Loss") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
axis.text=element_text(size=12),
axis.title=element_text(size=14),
plot.title=element_text(size=20))
print(g2)
############################################################################################################################
#################################################### poor_phys_health_avg_over_30_days 1 ###################################
poor_phys_avg.1.prior <- c(
prior(normal(3, 10), class = Intercept),
prior(normal(3, 10), class = sigma)
)
poor_phys_avg.bayes.1 = brm_multiple(poor_phys_health_avg_over_30_days ~ (1|State/County), data=imputed_Data,
family = gaussian(link = "identity"), prior=poor_phys_avg.1.prior,
backend = "rstan", silent = 0, iter=4000, control=list(adapt_delta=0.8))
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_phys_avg.bayes.1)
# list_of_draws <- extract(testing$fit)
fit_summary.ppa1 = summary(poor_phys_avg.bayes.1$fit)
print(dim(fit_summary.ppa1$summary))
post_mean_counties <- fit_summary.ppa1$summary[,c("mean")][56:3196]
post_mean_state <- fit_summary.ppa1$summary[,c("mean")][5:55]
intercept<- fit_summary.ppa1$summary[,c("mean")][1]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_phys_avg.1.df.counties<- data.frame(state, row_name_counties, row_values_counties, intercept_col)
print(unique(state))
print(dim(poor_phys_avg.1.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_phys_avg.1.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(unique(row_name_state))
print(unique(state))
print(dim(poor_phys_avg.1.df.states))
poor_phys_avg.1.df = merge(poor_phys_avg.1.df.counties, poor_phys_avg.1.df.states,c("state","intercept_col"))
print(dim(poor_phys_avg.1.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_phys_avg.1.df.counties,
row_name_state, row_values_state, poor_phys_avg.1.df.states)
# poor_phys_avg.1.df = get_df(fit_summary.ppa1$summary)
poor_phys_avg.1.df.summed = poor_phys_avg.1.df[,c("state", "row_name_counties")]
poor_phys_avg.1.df.summed$summed = poor_phys_avg.1.df$intercept_col +
poor_phys_avg.1.df$row_values_state +
poor_phys_avg.1.df$row_values_counties
# rank.poor_phys_avg.1 = poor_phys_avg.1.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#################################################### poor_phys_health_avg_over_30_days 2 ####################################
poor_phys_avg.2.prior <- c(
prior(normal(0, 10), class = Intercept),
prior(normal(0, 10), class = b),
prior(normal(0, 10), class = sigma)
) ## HAS SUPER BAD COUNTY INTERCEPT
poor_phys_avg.bayes.2 = brm_multiple(poor_phys_health_avg_over_30_days ~ perc_AfAm + perc_As + perc_AmIn_AlNa +
perc_Hisp + factor(urb_code_2013) + perc_female + perc_under_18 +
perc_over_65 + (1|State/County), data=imputed_Data,
family = gaussian(link = "identity"), prior=poor_phys_avg.2.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_phys_avg.bayes.2)
fit_summary.ppa2 = summary(poor_phys_avg.bayes.2$fit)
print(dim(fit_summary.ppa2$summary))
intercept<- fit_summary.ppa2$summary[,c("mean")][1]
b_perc_AfAm<- fit_summary.ppa2$summary[,c("mean")][2]
b_perc_As<- fit_summary.ppa2$summary[,c("mean")][3]
b_perc_AmIn_AlNa<- fit_summary.ppa2$summary[,c("mean")][4]
b_perc_Hisp<- fit_summary.ppa2$summary[,c("mean")][5]
b_urb_code_20134<- fit_summary.ppa2$summary[,c("mean")][6]
b_urb_code_20136<- fit_summary.ppa2$summary[,c("mean")][7]
b_urb_code_20132<- fit_summary.ppa2$summary[,c("mean")][8]
b_urb_code_20135<- fit_summary.ppa2$summary[,c("mean")][9]
b_urb_code_20131<- fit_summary.ppa2$summary[,c("mean")][10]
b_perc_female<- fit_summary.ppa2$summary[,c("mean")][11]
b_perc_under_18<- fit_summary.ppa2$summary[,c("mean")][12]
b_perc_over_65<- fit_summary.ppa2$summary[,c("mean")][13]
post_mean_state <- fit_summary.ppa2$summary[,c("mean")][17:67]
post_mean_counties <- fit_summary.ppa2$summary[,c("mean")][68:3208]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
counties = str_extract(row_name_counties, "(?<=_)([^_]+)(?=,)")
counties = gsub('\\.', ' ', counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_phys_avg.2.df.counties<- data.frame(state, counties, row_name_counties, row_values_counties, intercept_col,
b_perc_AfAm, b_perc_As, b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female,
b_perc_under_18, b_perc_over_65)
poor_phys_avg.2.df.counties$b_urb[poor_phys_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "1"] <- b_urb_code_20131
poor_phys_avg.2.df.counties$b_urb[poor_phys_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "2"] <- b_urb_code_20132
poor_phys_avg.2.df.counties$b_urb[poor_phys_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "4"] <- b_urb_code_20134
poor_phys_avg.2.df.counties$b_urb[poor_phys_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "5"] <- b_urb_code_20135
poor_phys_avg.2.df.counties$b_urb[poor_phys_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "6"] <- b_urb_code_20136
poor_phys_avg.2.df.counties$b_urb[is.na(poor_phys_avg.2.df.counties$b_urb)] <- 0
print(dim(poor_phys_avg.2.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_phys_avg.2.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(dim(poor_phys_avg.2.df.states))
poor_phys_avg.2.df = merge(poor_phys_avg.2.df.counties, poor_phys_avg.2.df.states,c("state","intercept_col")) #by="state")
print(dim(poor_phys_avg.2.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_phys_avg.2.df.counties,
row_name_state, row_values_state, poor_phys_avg.2.df.states, b_perc_AfAm, b_perc_As,
b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female, b_perc_under_18, b_perc_over_65)
# poor_phys_avg.2.df = get_df2(fit_summary.ppa2$summary)
poor_phys_avg.2.df.summed = poor_phys_avg.2.df[,c("state", "row_name_counties")]
poor_phys_avg.2.df.summed$summed = poor_phys_avg.2.df$intercept_col +
poor_phys_avg.2.df$b_perc_AfAm +
poor_phys_avg.2.df$b_perc_As +
poor_phys_avg.2.df$b_perc_AmIn_AlNa +
poor_phys_avg.2.df$b_perc_Hisp+
poor_phys_avg.2.df$b_urb+
poor_phys_avg.2.df$b_perc_female+
poor_phys_avg.2.df$b_perc_under_18+
poor_phys_avg.2.df$b_perc_over_65+
poor_phys_avg.2.df$row_values_state+
poor_phys_avg.2.df$row_values_counties
# rank.poor_phys_avg.2 = poor_phys_avg.2.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
# rank.poor_phys_avg.2 = rank.poor_phys_avg.2[with(rank.poor_phys_avg.2, order(row_name_counties)), ]
############################################################################################################################
#################################################### poor_ment_health_avg_over_30_days 1 ###################################
poor_ment_avg.1.prior <- c(
prior(normal(0, 10), class = Intercept),
prior(normal(0, 10), class = sigma)
) ## HAS SUPER BAD COUNTY INTERCEPT
poor_ment_avg.bayes.1 = brm_multiple(poor_ment_health_avg_over_30_days ~ (1|State/County), data=imputed_Data,
family = gaussian(link = "identity"), prior=poor_ment_avg.1.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_ment_avg.bayes.1)
fit_summary.pma1 = summary(poor_ment_avg.bayes.1$fit)
print(dim(fit_summary.pma1$summary))
post_mean_counties <- fit_summary.pma1$summary[,c("mean")][56:3196]
post_mean_state <- fit_summary.pma1$summary[,c("mean")][5:55]
intercept<- fit_summary.pma1$summary[,c("mean")][1]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_ment_avg.1.df.counties<- data.frame(state, row_name_counties, row_values_counties, intercept_col)
print(unique(state))
print(dim(poor_ment_avg.1.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_ment_avg.1.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(unique(row_name_state))
print(unique(state))
print(dim(poor_ment_avg.1.df.states))
poor_ment_avg.1.df = merge(poor_ment_avg.1.df.counties, poor_ment_avg.1.df.states,c("state","intercept_col"))
print(dim(poor_ment_avg.1.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_ment_avg.1.df.counties,
row_name_state, row_values_state, poor_ment_avg.1.df.states)
poor_ment_avg.1.df.summed = poor_ment_avg.1.df[,c("state", "row_name_counties")]
poor_ment_avg.1.df.summed$summed = poor_ment_avg.1.df$intercept_col +
poor_ment_avg.1.df$row_values_state +
poor_ment_avg.1.df$row_values_counties
# rank.poor_ment_avg.1 = poor_ment_avg.1.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#
# rank.poor_ment_avg.1 = rank.poor_ment_avg.1[with(rank.poor_ment_avg.1, order(row_name_counties)), ]
#################################################### poor_ment_health_avg_over_30_days 2 ####################################
poor_ment_avg.2.prior <- c(
prior(normal(0, 1), class = Intercept),
prior(normal(0, 1), class = b),
prior(normal(0, 1), class = sigma)
) ## HAS SUPER BAD COUNTY INTERCEPT
poor_ment_avg.bayes.2 = brm_multiple(poor_ment_health_avg_over_30_days ~ perc_AfAm + perc_As + perc_AmIn_AlNa +
perc_Hisp + factor(urb_code_2013) + perc_female + perc_under_18 +
perc_over_65 + (1|State/County), data=imputed_Data,
family = gaussian(link = "identity"), prior=poor_ment_avg.2.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_ment_avg.bayes.2)
fit_summary.pma2 = summary(poor_ment_avg.bayes.2$fit)
print(dim(fit_summary.pma2$summary))
intercept<- fit_summary.pma2$summary[,c("mean")][1]
b_perc_AfAm<- fit_summary.pma2$summary[,c("mean")][2]
b_perc_As<- fit_summary.pma2$summary[,c("mean")][3]
b_perc_AmIn_AlNa<- fit_summary.pma2$summary[,c("mean")][4]
b_perc_Hisp<- fit_summary.pma2$summary[,c("mean")][5]
b_urb_code_20134<- fit_summary.pma2$summary[,c("mean")][6]
b_urb_code_20136<- fit_summary.pma2$summary[,c("mean")][7]
b_urb_code_20132<- fit_summary.pma2$summary[,c("mean")][8]
b_urb_code_20135<- fit_summary.pma2$summary[,c("mean")][9]
b_urb_code_20131<- fit_summary.pma2$summary[,c("mean")][10]
b_perc_female<- fit_summary.pma2$summary[,c("mean")][11]
b_perc_under_18<- fit_summary.pma2$summary[,c("mean")][12]
b_perc_over_65<- fit_summary.pma2$summary[,c("mean")][13]
post_mean_state <- fit_summary.pma2$summary[,c("mean")][17:67]
post_mean_counties <- fit_summary.pma2$summary[,c("mean")][68:3208]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
counties = str_extract(row_name_counties, "(?<=_)([^_]+)(?=,)")
counties = gsub('\\.', ' ', counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_ment_avg.2.df.counties<- data.frame(state, counties, row_name_counties, row_values_counties, intercept_col,
b_perc_AfAm, b_perc_As, b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female,
b_perc_under_18, b_perc_over_65)
poor_ment_avg.2.df.counties$b_urb[poor_ment_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "1"] <- b_urb_code_20131
poor_ment_avg.2.df.counties$b_urb[poor_ment_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "2"] <- b_urb_code_20132
poor_ment_avg.2.df.counties$b_urb[poor_ment_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "4"] <- b_urb_code_20134
poor_ment_avg.2.df.counties$b_urb[poor_ment_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "5"] <- b_urb_code_20135
poor_ment_avg.2.df.counties$b_urb[poor_ment_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "6"] <- b_urb_code_20136
poor_ment_avg.2.df.counties$b_urb[is.na(poor_ment_avg.2.df.counties$b_urb)] <- 0
print(dim(poor_ment_avg.2.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_ment_avg.2.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(dim(poor_ment_avg.2.df.states))
poor_ment_avg.2.df = merge(poor_ment_avg.2.df.counties, poor_ment_avg.2.df.states,c("state","intercept_col")) #by="state")
print(dim(poor_ment_avg.2.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_ment_avg.2.df.counties,
row_name_state, row_values_state, poor_ment_avg.2.df.states, b_perc_AfAm, b_perc_As,
b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female, b_perc_under_18, b_perc_over_65)
poor_ment_avg.2.df.summed = poor_ment_avg.2.df[,c("state", "row_name_counties")]
poor_ment_avg.2.df.summed$summed = poor_ment_avg.2.df$intercept_col +
poor_ment_avg.2.df$b_perc_AfAm +
poor_ment_avg.2.df$b_perc_As +
poor_ment_avg.2.df$b_perc_AmIn_AlNa +
poor_ment_avg.2.df$b_perc_Hisp+
poor_ment_avg.2.df$b_urb+
poor_ment_avg.2.df$b_perc_female+
poor_ment_avg.2.df$b_perc_under_18+
poor_ment_avg.2.df$b_perc_over_65+
poor_ment_avg.2.df$row_values_state+
poor_ment_avg.2.df$row_values_counties
# rank.poor_ment_avg.2 = poor_ment_avg.2.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#
# rank.poor_ment_avg.2 = rank.poor_ment_avg.2[with(rank.poor_ment_avg.2, order(row_name_counties)), ]
############################################################################################################################
#################################################### low_birthweight_births 1 ###################################
low_bwb.1.prior <- c(
prior(normal(1, 1), class = Intercept)
) ## HAS KINDA BAD COUNTY INTERCEPT
low_bwb.bayes.1 = brm_multiple(low_birthweight_births ~ (1|State/County), data=imputed_Data,
family = binomial(link = "logit"), prior=low_bwb.1.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(low_bwb.bayes.1)
fit_summary.lbwb1 = summary(low_bwb.bayes.1$fit)
print(dim(fit_summary.lbwb1$summary))
post_mean_counties <- fit_summary.lbwb1$summary[,c("mean")][55:3195]
post_mean_state <- fit_summary.lbwb1$summary[,c("mean")][4:54]
intercept<- fit_summary.lbwb1$summary[,c("mean")][1]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
low_bwb.1.df.counties<- data.frame(state, row_name_counties, row_values_counties, intercept_col)
print(unique(state))
print(dim(low_bwb.1.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
low_bwb.1.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(unique(row_name_state))
print(unique(state))
print(dim(low_bwb.1.df.states))
low_bwb.1.df = merge(low_bwb.1.df.counties, low_bwb.1.df.states,c("state","intercept_col"))
print(dim(low_bwb.1.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, low_bwb.1.df.counties,
row_name_state, row_values_state, low_bwb.1.df.states)
low_bwb.1.df.summed = low_bwb.1.df[,c("state", "row_name_counties")]
low_bwb.1.df.summed$summed = exp(low_bwb.1.df$intercept_col +low_bwb.1.df$row_values_state + low_bwb.1.df$row_values_counties) /
(1+exp(low_bwb.1.df$intercept_col +low_bwb.1.df$row_values_state + low_bwb.1.df$row_values_counties))
# rank.low_bwb.1 = low_bwb.1.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#
# rank.low_bwb.1 = rank.low_bwb.1[with(rank.low_bwb.1, order(row_name_counties)), ]
#################################################### low_birthweight_births 2 ####################################
low_bwb.2.prior <- c(
prior(normal(1, 1), class = Intercept),
prior(normal(1, 1), class = b)
)
low_bwb.bayes.2 = brm_multiple(low_birthweight_births ~ perc_AfAm + perc_As + perc_AmIn_AlNa +
perc_Hisp + factor(urb_code_2013) + perc_female + perc_under_18 +
perc_over_65 + (1|State/County), data=imputed_Data,
family = binomial(link = "logit"), prior=low_bwb.2.prior,
backend = "rstan", silent = 0, inits=c(15, 5), iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(low_bwb.bayes.2)
fit_summary.lbwb2 = summary(low_bwb.bayes.2$fit)
print(dim(fit_summary.lbwb2$summary))
intercept<- fit_summary.lbwb2$summary[,c("mean")][1]
b_perc_AfAm<- fit_summary.lbwb2$summary[,c("mean")][2]
b_perc_As<- fit_summary.lbwb2$summary[,c("mean")][3]
b_perc_AmIn_AlNa<- fit_summary.lbwb2$summary[,c("mean")][4]
b_perc_Hisp<- fit_summary.lbwb2$summary[,c("mean")][5]
b_urb_code_20134<- fit_summary.lbwb2$summary[,c("mean")][6]
b_urb_code_20136<- fit_summary.lbwb2$summary[,c("mean")][7]
b_urb_code_20132<- fit_summary.lbwb2$summary[,c("mean")][8]
b_urb_code_20135<- fit_summary.lbwb2$summary[,c("mean")][9]
b_urb_code_20131<- fit_summary.lbwb2$summary[,c("mean")][10]
b_perc_female<- fit_summary.lbwb2$summary[,c("mean")][11]
b_perc_under_18<- fit_summary.lbwb2$summary[,c("mean")][12]
b_perc_over_65<- fit_summary.lbwb2$summary[,c("mean")][13]
post_mean_state <- fit_summary.lbwb2$summary[,c("mean")][16:66]
post_mean_counties <- fit_summary.lbwb2$summary[,c("mean")][67:3207]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
counties = str_extract(row_name_counties, "(?<=_)([^_]+)(?=,)")
counties = gsub('\\.', ' ', counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
low_bwb.2.df.counties<- data.frame(state, counties, row_name_counties, row_values_counties, intercept_col,
b_perc_AfAm, b_perc_As, b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female,
b_perc_under_18, b_perc_over_65)
low_bwb.2.df.counties$b_urb[low_bwb.2.df.counties$counties == df$County & df$urb_code_2013 == "1"] <- b_urb_code_20131
low_bwb.2.df.counties$b_urb[low_bwb.2.df.counties$counties == df$County & df$urb_code_2013 == "2"] <- b_urb_code_20132
low_bwb.2.df.counties$b_urb[low_bwb.2.df.counties$counties == df$County & df$urb_code_2013 == "4"] <- b_urb_code_20134
low_bwb.2.df.counties$b_urb[low_bwb.2.df.counties$counties == df$County & df$urb_code_2013 == "5"] <- b_urb_code_20135
low_bwb.2.df.counties$b_urb[low_bwb.2.df.counties$counties == df$County & df$urb_code_2013 == "6"] <- b_urb_code_20136
low_bwb.2.df.counties$b_urb[is.na(low_bwb.2.df.counties$b_urb)] <- 0
print(dim(low_bwb.2.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
low_bwb.2.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(dim(low_bwb.2.df.states))
low_bwb.2.df = merge(low_bwb.2.df.counties, low_bwb.2.df.states,c("state","intercept_col")) #by="state")
print(dim(low_bwb.2.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, low_bwb.2.df.counties,
row_name_state, row_values_state, low_bwb.2.df.states, b_perc_AfAm, b_perc_As,
b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female, b_perc_under_18, b_perc_over_65)
low_bwb.2.df.summed = low_bwb.2.df[,c("state", "row_name_counties")]
low_bwb.2.df.summed$summed = exp(low_bwb.2.df$intercept_col +
low_bwb.2.df$b_perc_AfAm +
low_bwb.2.df$b_perc_As +
low_bwb.2.df$b_perc_AmIn_AlNa +
low_bwb.2.df$b_perc_Hisp+
low_bwb.2.df$b_urb+
low_bwb.2.df$b_perc_female+
low_bwb.2.df$b_perc_under_18+
low_bwb.2.df$b_perc_over_65+
low_bwb.2.df$row_values_state+
low_bwb.2.df$row_values_counties)/
(1+exp(low_bwb.2.df$intercept_col +
low_bwb.2.df$b_perc_AfAm +
low_bwb.2.df$b_perc_As +
low_bwb.2.df$b_perc_AmIn_AlNa +
low_bwb.2.df$b_perc_Hisp+
low_bwb.2.df$b_urb+
low_bwb.2.df$b_perc_female+
low_bwb.2.df$b_perc_under_18+
low_bwb.2.df$b_perc_over_65+
low_bwb.2.df$row_values_state+
low_bwb.2.df$row_values_counties))
# rank.low_bwb.2 = low_bwb.2.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#
# rank.low_bwb.2 = rank.low_bwb.2[with(rank.low_bwb.2, order(row_name_counties)), ]
# ############################################################################################################################
# #################################################### poor_health_perc 1 ###################################
poor_health_perc.1.prior <- c(
prior(normal(1,1), class = Intercept)
)
poor_health_perc.bayes.1 = brm_multiple(poor_health_estimate ~ (1|State/County), data=imputed_Data,
family = binomial(link = "logit"), prior=poor_health_perc.1.prior,
backend = "rstan", silent = 0, iter=4000)
# poor_health_perc.1.prior <- c(
# prior(beta(2, 2), class = Intercept)
# )
# poor_health_num = round(poor_health_sample_size * poor_health_percent)
# poor_health_perc.bayes.1 = brm_multiple(poor_health_num ~ (1|State/County), data=imputed_Data,
# family = binomial(link = "logit"), prior=poor_health_perc.1.prior,
# backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_health_perc.bayes.1)
# list_of_draws <- extract(testing$fit)
fit_summary.php1 = summary(poor_health_perc.bayes.1$fit)
print(dim(fit_summary.php1$summary))
post_mean_counties <- fit_summary.php1$summary[,c("mean")][55:3195]
post_mean_state <- fit_summary.php1$summary[,c("mean")][4:54]
intercept<- fit_summary.php1$summary[,c("mean")][1]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_health_perc.1.df.counties<- data.frame(state, row_name_counties, row_values_counties, intercept_col)
print(unique(state))
print(dim(poor_health_perc.1.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_health_perc.1.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(unique(row_name_state))
print(unique(state))
print(dim(poor_health_perc.1.df.states))
poor_health_perc.1.df = merge(poor_health_perc.1.df.counties, poor_health_perc.1.df.states,c("state","intercept_col"))
print(dim(poor_health_perc.1.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_health_perc.1.df.counties,
row_name_state, row_values_state, poor_health_perc.1.df.states)
poor_health_perc.1.df.summed = poor_health_perc.1.df[,c("state", "row_name_counties")]
poor_health_perc.1.df.summed$summed = exp(poor_health_perc.1.df$intercept_col +poor_health_perc.1.df$row_values_state +poor_health_perc.1.df$row_values_counties)/
(1+exp(poor_health_perc.1.df$intercept_col +poor_health_perc.1.df$row_values_state +poor_health_perc.1.df$row_values_counties))
#################################################### poor_health_perc 2 ####################################
poor_health_perc.2.prior <- c(
prior(beta(2, 2), class = Intercept),
prior(normal(0, 1), class = b)
)
poor_health_perc.bayes.2 = brm_multiple(poor_health_estimate ~ perc_AfAm + perc_As + perc_AmIn_AlNa +
perc_Hisp + factor(urb_code_2013) + perc_female + perc_under_18 +
perc_over_65 + (1|State/County), data=imputed_Data,
family = binomial(link = "logit"), prior=poor_health_perc.2.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_health_perc.bayes.2)
fit_summary.php2 = summary(poor_health_perc.bayes.2$fit)
print(dim(fit_summary.php2$summary))
intercept<- fit_summary.php2$summary[,c("mean")][1]
b_perc_AfAm<- fit_summary.php2$summary[,c("mean")][2]
b_perc_As<- fit_summary.php2$summary[,c("mean")][3]
b_perc_AmIn_AlNa<- fit_summary.php2$summary[,c("mean")][4]
b_perc_Hisp<- fit_summary.php2$summary[,c("mean")][5]
b_urb_code_20134<- fit_summary.php2$summary[,c("mean")][6]
b_urb_code_20136<- fit_summary.php2$summary[,c("mean")][7]
b_urb_code_20132<- fit_summary.php2$summary[,c("mean")][8]
b_urb_code_20135<- fit_summary.php2$summary[,c("mean")][9]
b_urb_code_20131<- fit_summary.php2$summary[,c("mean")][10]
b_perc_female<- fit_summary.php2$summary[,c("mean")][11]
b_perc_under_18<- fit_summary.php2$summary[,c("mean")][12]
b_perc_over_65<- fit_summary.php2$summary[,c("mean")][13]
post_mean_state <- fit_summary.php2$summary[,c("mean")][16:66]
post_mean_counties <- fit_summary.php2$summary[,c("mean")][67:3207]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
counties = str_extract(row_name_counties, "(?<=_)([^_]+)(?=,)")
counties = gsub('\\.', ' ', counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_health_perc.2.df.counties<- data.frame(state, counties, row_name_counties, row_values_counties, intercept_col,
b_perc_AfAm, b_perc_As, b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female,
b_perc_under_18, b_perc_over_65)
poor_health_perc.2.df.counties$b_urb[poor_health_perc.2.df.counties$counties == df$County & df$urb_code_2013 == "1"] <- b_urb_code_20131
poor_health_perc.2.df.counties$b_urb[poor_health_perc.2.df.counties$counties == df$County & df$urb_code_2013 == "2"] <- b_urb_code_20132
poor_health_perc.2.df.counties$b_urb[poor_health_perc.2.df.counties$counties == df$County & df$urb_code_2013 == "4"] <- b_urb_code_20134
poor_health_perc.2.df.counties$b_urb[poor_health_perc.2.df.counties$counties == df$County & df$urb_code_2013 == "5"] <- b_urb_code_20135
poor_health_perc.2.df.counties$b_urb[poor_health_perc.2.df.counties$counties == df$County & df$urb_code_2013 == "6"] <- b_urb_code_20136
poor_health_perc.2.df.counties$b_urb[is.na(poor_health_perc.2.df.counties$b_urb)] <- 0
print(dim(poor_health_perc.2.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_health_perc.2.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(dim(poor_health_perc.2.df.states))
poor_health_perc.2.df = merge(poor_health_perc.2.df.counties, poor_health_perc.2.df.states,c("state","intercept_col")) #by="state")
print(dim(poor_health_perc.2.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_health_perc.2.df.counties,
row_name_state, row_values_state, poor_health_perc.2.df.states, b_perc_AfAm, b_perc_As,
b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female, b_perc_under_18, b_perc_over_65)
poor_health_perc.2.df.summed = poor_health_perc.2.df[,c("state", "row_name_counties")]
poor_health_perc.2.df.summed$summed = exp(poor_health_perc.2.df$intercept_col +
poor_health_perc.2.df$b_perc_AfAm +
poor_health_perc.2.df$b_perc_As +
poor_health_perc.2.df$b_perc_AmIn_AlNa +
poor_health_perc.2.df$b_perc_Hisp+
poor_health_perc.2.df$b_urb+
poor_health_perc.2.df$b_perc_female+
poor_health_perc.2.df$b_perc_under_18+
poor_health_perc.2.df$b_perc_over_65+
poor_health_perc.2.df$row_values_state+
poor_health_perc.2.df$row_values_counties)/
(1+exp(poor_health_perc.2.df$intercept_col +
poor_health_perc.2.df$b_perc_AfAm +
poor_health_perc.2.df$b_perc_As +
poor_health_perc.2.df$b_perc_AmIn_AlNa +
poor_health_perc.2.df$b_perc_Hisp+
poor_health_perc.2.df$b_urb+
poor_health_perc.2.df$b_perc_female+
poor_health_perc.2.df$b_perc_under_18+
poor_health_perc.2.df$b_perc_over_65+
poor_health_perc.2.df$row_values_state+
poor_health_perc.2.df$row_values_counties))
# poor_health_perc.2.df.summed = poor_health_perc.2.df[,c("state", "row_name_counties")]
# poor_health_perc.2.df.summed$summed = exp(poor_health_perc.2.df$intercept_col + poor_health_perc.2.df$row_values_state + poor_health_perc.2.df$row_values_counties)/
# (1+exp(poor_health_perc.2.df$intercept_col + poor_health_perc.2.df$row_values_state + poor_health_perc.2.df$row_values_counties))
#
# rank.poor_health_perc.2 = poor_health_perc.2.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#
# rank.poor_health_perc.2[with(rank.poor_health_perc.2, order(row_name_counties)), ]
# Morbidity Rank 1 ----------------------------------------------------------
all.df.summed.1 <- poor_phys_avg.1.df.summed[, c("state", "row_name_counties")]
all.df.summed.1$summed <- mean(poor_phys_avg.1.df.summed$summed+
poor_ment_avg.1.df.summed$summed +
low_bwb.1.df.summed$summed +
poor_health_perc.1.df.summed$summed)
rank.morbidity.1 = all.df.summed.1 %>%
group_by(state) %>%
mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
rank.morbidity.1 = rank.morbidity.1[with(rank.morbidity.1, order(row_name_counties)), ]
rank.morbidity.1$true_ranks = df$Morbidity_Rank
sel.morbidity.1 <-rank.morbidity.1 %>%
group_by(state) %>%
do(data.frame(standard.error.loss.morbidity.1=sel(.)))
sel.morbidity.1 = sel.morbidity.1[with(sel.morbidity.1, order(standard.error.loss.morbidity.1)), ]
sel.morbidity.1
g3 <- ggplot(data = sel.morbidity.1, mapping = aes(x = as.factor(state), y = standard.error.loss.morbidity.1)) +
geom_bar(stat = "identity") +
labs(x = "state") +
ggtitle("Morbidity Rank Mean Squared Error Loss Model 1") +
xlab("") +
ylab("Mean Squared Error Loss") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
axis.text=element_text(size=12),
axis.title=element_text(size=14),
plot.title=element_text(size=20))
print(g3)
# Morbidity Rank 2 --------------------------------------------------------
all.df.summed.2 <- poor_phys_avg.2.df.summed[, c("state", "row_name_counties")]
all.df.summed.2$summed <- mean(poor_phys_avg.2.df.summed$summed+
poor_ment_avg.2.df.summed$summed +
low_bwb.2.df.summed$summed +
poor_health_perc.2.df.summed$summed)
rank.morbidity.2 = all.df.summed.2 %>%
group_by(state) %>%
mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
rank.morbidity.2 = rank.morbidity.2[with(rank.morbidity.2, order(row_name_counties)), ]
rank.morbidity.2$true_ranks = df$Morbidity_Rank
sel.morbidity.2 <-rank.morbidity.2 %>%
group_by(state) %>%
do(data.frame(standard.error.loss.morbidity.2=sel(.)))
sel.morbidity.2 = sel.morbidity.2[with(sel.morbidity.2, order(standard.error.loss.morbidity.2)), ]
sel.morbidity.2
g4 <- ggplot(data = sel.morbidity.2, mapping = aes(x = as.factor(state), y = standard.error.loss.morbidity.2)) +
geom_bar(stat = "identity") +
labs(x = "state") +
ggtitle("Morbidity Rank Mean Squared Error Loss Model 2") +
xlab("") +
ylab("Mean Squared Error Loss") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
axis.text=element_text(size=12),
axis.title=element_text(size=14),
plot.title=element_text(size=20))
print(g4)
# Plotting model 1 vs 2----------------------------------------------------------------
plot(sel.prem_death.2$standard.error.loss.mortality.2 ~sel.prem_death.1$standard.error.loss.mortality.1,
main ="Relationship Between Model 1 and Model 2 Mortality Mean Squared Error Loss",
xlab = "Mean Squared Error Loss Model 1",
ylab = "Mean Squared Error Loss Model 2", cex=1.5)
abline(lm(sel.prem_death.2$standard.error.loss.mortality.2 ~ sel.prem_death.1$standard.error.loss.mortality.1))
plot(sel.morbidity.2$standard.error.loss.morbidity.2 ~ sel.morbidity.1$standard.error.loss.morbidity.1,
main ="Relationship Between Model 1 and Model 2 Morbidity Mean Squared Error Loss",
xlab = "Mean Squared Error Loss Model 1",
ylab = "Mean Squared Error Loss Model 2", cex=1.5)
abline(lm(sel.morbidity.2$standard.error.loss.morbidity.2 ~ sel.morbidity.1$standard.error.loss.morbidity.1))
# Plotting model vs true --------------------------------------------------
par(mfrow=c(2,2))
plot(rank.premature_deaths.1$my_ranks, rank.premature_deaths.1$true_ranks,
main ="Mortality Model 1 True Rank vs Estimated Rank",
xlab = "True Rank",
ylab = "Estimated Rank")
abline(lm(rank.premature_deaths.1$true_ranks ~ rank.premature_deaths.1$my_ranks))
plot(rank.premature_deaths.2$my_ranks, rank.premature_deaths.2$true_ranks,
main ="Mortality Model 2 True Rank vs Estimated Rank",
xlab = "True Rank",
ylab = "Estimated Rank")
abline(lm(rank.premature_deaths.2$true_ranks ~ rank.premature_deaths.2$my_ranks))
plot(rank.morbidity.1$my_ranks, rank.morbidity.1$true_ranks,
main ="Morbidity Model 1 True Rank vs Estimated Rank",
xlab = "True Rank",
ylab = "Estimated Rank")
abline(lm(rank.morbidity.1$true_ranks ~ rank.morbidity.1$my_ranks))
plot(rank.morbidity.2$my_ranks, rank.morbidity.2$true_ranks,
main ="Morbidity Model 2 True Rank vs Estimated Rank",
xlab = "True Rank",
ylab = "Estimated Rank")
abline(lm(rank.morbidity.2$true_ranks ~ rank.morbidity.2$my_ranks))
# Plotting sel and pop ----------------------------------------------------
sel.prem_death.1 = sel.prem_death.1[with(sel.prem_death.1, order(state)), ]
sel.prem_death.1$population = aggregate(df$Population, by=list(State=df$State), FUN=sum)$x
plot(sel.prem_death.1$population, sel.prem_death.1$standard.error.loss.mortality.1,
main ="Mortality Model 1 Mean Squared Error Loss vs State Population",
xlab = "State Population",
ylab = "Mortality Model 1 Mean Squared Error Loss")
abline(lm(sel.prem_death.1$standard.error.loss.mortality.1 ~ sel.prem_death.1$population))
sel.prem_death.2 = sel.prem_death.2[with(sel.prem_death.2, order(state)), ]
sel.prem_death.2$population = aggregate(df$Population, by=list(State=df$State), FUN=sum)$x
plot(sel.prem_death.2$population, sel.prem_death.2$standard.error.loss.mortality.2,
main ="Mortality Model 2 Mean Squared Error Loss vs State Population",
xlab = "State Population",
ylab = "Mortality Model 2 Mean Squared Error Loss")
abline(lm(sel.prem_death.2$standard.error.loss.mortality.2 ~ sel.prem_death.2$population))
sel.morbidity.1 = sel.morbidity.1[with(sel.morbidity.1, order(state)), ]
sel.morbidity.1$population = aggregate(df$Population, by=list(State=df$State), FUN=sum)$x
plot(sel.morbidity.1$population, sel.morbidity.1$standard.error.loss.morbidity.1,
main ="Morbidity Model 1 Mean Squared Error Loss vs State Population",
xlab = "State Population",
ylab = "Morbidity Model 1 Mean Squared Error Loss")
abline(lm(sel.morbidity.1$standard.error.loss.morbidity.1 ~ sel.morbidity.1$population))
sel.morbidity.2 = sel.morbidity.2[with(sel.morbidity.2, order(state)), ]
sel.morbidity.2$population = aggregate(df$Population, by=list(State=df$State), FUN=sum)$x
plot(sel.morbidity.2$population, sel.morbidity.2$standard.error.loss.morbidity.2,
main ="Morbidity Model 2 Mean Squared Error Loss vs State Population",
xlab = "State Population",
ylab = "Morbidity Model 2 Mean Squared Error Loss")
abline(lm(sel.morbidity.2$standard.error.loss.morbidity.2 ~ sel.morbidity.2$population))
# Plotting sel vs number of counties --------------------------------------
par(mfrow=c(1,1))
sel.prem_death.1 = sel.prem_death.1[with(sel.prem_death.1, order(state)), ]
sel.prem_death.1$countycount = aggregate(df$County, by=list(State=df$State), FUN=length)$x
plot(sel.prem_death.1$countycount, sel.prem_death.1$standard.error.loss.mortality.1,
main ="Mortality Model 1 Mean Squared Error Loss vs Number of Counties",
xlab = "Number of Counties",
ylab = "Mortality Model 1 Mean Squared Error Loss")
abline(lm(sel.prem_death.1$standard.error.loss.mortality.1 ~ sel.prem_death.1$countycount))
sel.prem_death.2 = sel.prem_death.2[with(sel.prem_death.2, order(state)), ]
sel.prem_death.2$countycount = aggregate(df$County, by=list(State=df$State), FUN=length)$x
plot(sel.prem_death.2$countycount, sel.prem_death.2$standard.error.loss.mortality.2,
main ="Mortality Model 2 Mean Squared Error Loss vs Number of Counties",
xlab = "Number of Counties",
ylab = "Mortality Model 2 Mean Squared Error Loss")
abline(lm(sel.prem_death.2$standard.error.loss.mortality.2 ~ sel.prem_death.2$countycount))
sel.morbidity.1 = sel.morbidity.1[with(sel.morbidity.1, order(state)), ]
sel.morbidity.1$countycount = aggregate(df$County, by=list(State=df$State), FUN=length)$x
plot(sel.morbidity.1$countycount, sel.morbidity.1$standard.error.loss.morbidity.1,
main ="Morbidity Model 1 Mean Squared Error Loss vs Number of Counties",
xlab = "Number of Counties",
ylab = "Morbidity Model 1 Mean Squared Error Loss")
abline(lm(sel.morbidity.1$standard.error.loss.morbidity.1 ~ sel.morbidity.1$countycount))
sel.morbidity.2 = sel.morbidity.2[with(sel.morbidity.2, order(state)), ]
sel.morbidity.2$countycount = aggregate(df$County, by=list(State=df$State), FUN=length)$x
plot(sel.morbidity.2$countycount, sel.morbidity.2$standard.error.loss.morbidity.2,
main ="Morbidity Model 2 Mean Squared Error Loss vs Number of Counties",
xlab = "Number of Counties",
ylab = "Morbidity Model 2 Mean Squared Error Loss")
abline(lm(sel.morbidity.2$standard.error.loss.morbidity.2 ~ sel.morbidity.2$countycount))
|
/RCode/ce_ranking.R
|
no_license
|
klepikhina/klepikhina-masters-ce
|
R
| false
| false
| 57,534
|
r
|
load('CE_project.RData')
## Mixed effects
library(dplyr)
# Set up data -------------------------------------------------------------
# library(lme4)
library(hglm)
library(readxl) # install.packages("readxl") or install.packages("tidyverse")
library(plyr)
library(tibble)
library(data.table)
library(dplyr)
state_name_abbr = read.table(file='~/Documents/CE/klepikhina-masters-ce/data/state_to_abbr.csv',header = TRUE, sep=',')
cols_to_be_rectified <- names(state_name_abbr)[vapply(state_name_abbr, is.character, logical(1))]
state_name_abbr[,cols_to_be_rectified] <- lapply(state_name_abbr[,cols_to_be_rectified], trimws)
urb = read.table(file='~/Documents/CE/klepikhina-masters-ce/data/urbanization_classification.csv',header = TRUE, sep=',')
urb = left_join(urb, state_name_abbr, by = "State.Abr.")
drops = c("State.Abr.", "CBSA.title", "CBSA.2012.pop", "County.2012.pop", "X1990.based.code", "X")
urb = urb[ , !(names(urb) %in% drops)]
colnames(urb) <- c("FIPS", "County", "urb_code_2013", "urb_code_2006", "State")
urb$County = gsub("(.*?)\\sCounty$", "\\1", urb$County)
urb[,3] <- sapply(urb[,3],as.factor)
urb[,4] <- sapply(urb[,4],as.factor)
h_ranks = as.data.table(read_excel(path = "~/Documents/CE/klepikhina-masters-ce/data/county_health_rankings_2013.xls", sheet=3))
header.true <- function(df) {
names(df) <- as.character(unlist(df[1,]))
df[-1,]
}
h_ranks = header.true(h_ranks)
h_ranks[, 1] <- sapply(h_ranks[, 1], as.integer)
colnames(h_ranks) <- c("FIPS", "State", "County", "Mortality_Z_Score", "Mortality_Rank", "Morbidity_Z_Score", "Morbidity_Rank", "Health_Behaviors_Z_Score", "Health_Behaviors_Rank", "Clinical_Care_Z_Score", "Clinical_Care_Rank", "Soc_Econ_Factors_Z_Score", "Soc_Econ_Factors_Rank", "Physical_Env_Z_Score", "Physical_Env_Rank")
h_ranks=h_ranks[!is.na(h_ranks$County),]
h_ranks[,4:15] <- lapply(h_ranks[,4:15],as.numeric)
h_factors = as.data.table(read_excel(path = "~/Documents/CE/klepikhina-masters-ce/data/county_health_rankings_2013.xls", sheet=4))
h_factors = header.true(h_factors)
h_factors[, 1] <- sapply(h_factors[, 1], as.integer)
h_factors=h_factors[!is.na(h_factors$County),]
h_factors = h_factors[,!c(4:30)]
h_factors = h_factors[,!c(6:8,10:12,14:16,19:21,24:26,29,33:35,38:40,44,51,54:56,59:61,64:66,68,72:74,78,81:83,86:88,91:94,97,99,102,105,108,111)]
h_factors = h_factors[,!c(20,21,23:27)]
h_factors = h_factors[,!c(24,29,46)]
# h_factors = h_factors[-ix, ]#subset(h_factors, select=-c("PCP Rate","PCP Ratio"))
colnames(h_factors) <- c("FIPS", "State", "County",
"Smoker_Sample_Size", "Perc_Smoker", "Perc_Obese",
"Perc_Phys_Inactive", "Excessive_Drinking_Sample_Size", "Perc_Excessive_Drinking",
"MV_Deaths", "MV_Mortality_Rate", "Chlamydia_Cases",
"Chlamydia_Rate", "Teen_Births", "Teen_Pop",
"Teen_Birth_Rate", "Uninsured", "Perc_Uninsured",
"Num_Physicians", "Num_Dentists", "Num_Medicare_Enrolled_Amb_Care",
"Amb_Care_Rate", "Num_Diabetics", "Num_Medicare_Enrolled_Mammography",
"Perc_Mammography", "Perc_HS_Grad", "Num_Some_College",
"Perc_Some_College", "Num_Unemployed", "Labor_Force",
"Perc_Unemployed", "Num_Children_Poverty", "Perc_Children_Poverty",
"Inadeq_Social_Support_Sample_Size", "Perc_No_Social_Support",
"Num_Single_Parent_House",
"Num_Households", "Annual_Violent_Crimes", "Violent_Crime_Rate",
"Avg_Daily_Particulate_Matter", "Perc_Pop_In_Violation_Drinking_Water_Safety",
"Num_Pop_In_Violation_Drinking_Water_Safety",
"Num_Rec_Fac", "Num_Limited_Access_To_Healthy_Food",
"Perc_Limited_Access_To_Healthy_Food", "Num_Fast_Food", "Perc_Fast_Food")
h_factors[,4:47] <- lapply(h_factors[,4:47],as.numeric)
demographics = as.data.table(read_excel(path = "~/Documents/CE/klepikhina-masters-ce/data/county_health_rankings_2013.xls", sheet=5))[, (16:61) := NULL]
demographics = header.true(demographics)
colnames(demographics) <- c("FIPS", "State", "County", "Population", "perc_under_18", "perc_over_65", "perc_AfAm", "perc_AmIn_AlNa", "perc_As", "perc_NaHI_PaIs", "perc_Hisp", "perc_NonHispWh", "non_profi_en", "perc_non_profi_en", "perc_female")
demographics=demographics[!is.na(demographics$County),]
demographics[, 1] <- sapply(demographics[, 1], as.integer)
demographics[,4:15] <- lapply(demographics[,4:15],as.numeric)
h_outcomes = as.data.table(read_excel(path = "~/Documents/CE/klepikhina-masters-ce/data/county_health_rankings_2013.xls", sheet=4))[, (31:138) := NULL]
h_outcomes = header.true(h_outcomes)
h_outcomes = header.true(h_outcomes)
h_outcomes[, 1] <- sapply(h_outcomes[, 1], as.integer)
colnames(h_outcomes) <- c("FIPS", "State", "County", "premature_deaths", "premature_death_YPLL_rate", "premature_death_YPLL_rate_CI_low", "premature_death_YPLL_rate_CI_high", "premature_death_YPLL_rate_Z_score", "poor_health_sample_size", "poor_health_perc", "poor_health_CI_low", "poor_health_CI_high", "poor_health_Z_score", "poor_phys_health_sample_size", "poor_phys_health_avg_over_30_days", "poor_phys_health_avg_over_30_days_CI_low", "poor_phys_health_avg_over_30_days_CI_high", "poor_phys_health_avg_over_30_days_Z_score", "poor_ment_health_sample_size", "poor_ment_health_avg_over_30_days", "poor_ment_health_avg_over_30_days_CI_low", "poor_ment_health_avg_over_30_days_CI_high", "poor_ment_health_avg_over_30_days_Z_score", "unreliable_data", "low_birthweight_births", "live_births", "low_birthweight_perc", "low_birthweight_perc_CI_low", "low_birthweight_perc_CI_high", "low_birthweight_perc_Z_score")
h_outcomes=h_outcomes[!is.na(h_outcomes$County),]
h_outcomes[,4:23] <- lapply(h_outcomes[,4:23],as.numeric)
h_outcomes$unreliable_data <- ifelse(grepl("x", h_outcomes$unreliable_data), 1, 0)
h_outcomes$unreliable_data <- sapply(h_outcomes$unreliable_data,as.factor)
h_outcomes[,25:30] <- lapply(h_outcomes[,25:30],as.numeric)
merge_cols <- c("FIPS", "County", "State")
df <- merge(h_ranks, h_outcomes, by = merge_cols, all.x = TRUE)
df <- merge(df, demographics, by = merge_cols, all.x = TRUE)
df <- merge(df, urb, by = merge_cols, all.x = TRUE)
df <- merge(df, h_factors, by = merge_cols, all.x = TRUE)
df[,1] <- sapply(demographics[,1],as.factor)
df$urb_code_2013 <- factor(df$urb_code_2013)
df$poor_health_estimate = round(df$poor_health_perc*(df$poor_health_sample_size*0.01),0)
tmp = df[complete.cases(df), ] # complete dataset -- no NAs
# Imports For Bayes -------------------------------------------------------
library(rstanarm)
library(mice)
# md.pattern(df)
library(VIM)
library(broom.mixed)
library(shinystan)
library(brms)
library(dplyr)
library(rstan)
library(stringr)
library(BayesianFROC)
library(rstan)
# Impute Data -------------------------------------------------------------
df = df[, !(names(df) %in% c("poor_health_perc", "poor_health_sample_size"))]
imputed_Data <- mice(df, m=3, maxit = 1, method = 'cart', seed = 500)
# Create Summary Table 1 ----------------------------------------------------
get_df <- function(fit_summary) {
post_mean_counties <- fit_summary[,c("mean")][55:3195]
post_mean_state <- fit_summary[,c("mean")][4:54]
intercept<- fit_summary[,c("mean")][1]
print(dim(fit_summary))
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
class.df.counties<- data.frame(state, row_name_counties, row_values_counties, intercept_col)
print(tail(unique(row_name_counties)))
print(unique(state))
print(dim(class.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
class.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(unique(row_name_state))
print(unique(state))
print(dim(class.df.states))
class.df = merge(class.df.counties, class.df.states,c("state","intercept_col")) #by="state")
print(dim(class.df))
return(class.df)
}
# Get Summary Table 2 -----------------------------------------------------
get_df2 <- function(fit_summary) {
print(dim(fit_summary))
intercept<- fit_summary[,c("mean")][1]
b_perc_AfAm<- fit_summary[,c("mean")][2]
b_perc_As<- fit_summary[,c("mean")][3]
b_perc_AmIn_AlNa<- fit_summary[,c("mean")][4]
b_perc_Hisp<- fit_summary[,c("mean")][5]
b_urb_code_20134<- fit_summary[,c("mean")][6]
b_urb_code_20136<- fit_summary[,c("mean")][7]
b_urb_code_20132<- fit_summary[,c("mean")][8]
b_urb_code_20135<- fit_summary[,c("mean")][9]
b_urb_code_20131<- fit_summary[,c("mean")][10]
b_perc_female<- fit_summary[,c("mean")][11]
b_perc_under_18<- fit_summary[,c("mean")][12]
b_perc_over_65<- fit_summary[,c("mean")][13]
# sd_state <- fit_summary[,c("mean")][14]
# sd_counties <- fit_summary[,c("mean")][15]
post_mean_state <- fit_summary[,c("mean")][16:66]
post_mean_counties <- fit_summary[,c("mean")][67:3207]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
counties = str_extract(row_name_counties, "(?<=_)([^_]+)(?=,)")
counties = gsub('\\.', ' ', counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
class.df.counties<- data.frame(state, counties, row_name_counties, row_values_counties, intercept_col,
b_perc_AfAm, b_perc_As, b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female,
b_perc_under_18, b_perc_over_65)
class.df.counties$b_urb[class.df.counties$counties == df$County & df$urb_code_2013 == "1"] <- b_urb_code_20131
class.df.counties$b_urb[class.df.counties$counties == df$County & df$urb_code_2013 == "2"] <- b_urb_code_20132
class.df.counties$b_urb[class.df.counties$counties == df$County & df$urb_code_2013 == "4"] <- b_urb_code_20134
class.df.counties$b_urb[class.df.counties$counties == df$County & df$urb_code_2013 == "5"] <- b_urb_code_20135
class.df.counties$b_urb[class.df.counties$counties == df$County & df$urb_code_2013 == "6"] <- b_urb_code_20136
class.df.counties$b_urb[is.na(class.df.counties$b_urb)] <- 0
print(dim(class.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
class.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(dim(class.df.states))
class.df = merge(class.df.counties, class.df.states,c("state","intercept_col")) #by="state")
print(dim(class.df))
return(class.df)
}
# SEL Ranking -------------------------------------------------------------
sel <- function(data) {
data1 <- na.omit(data)
k = length(data1$true_ranks)
return ((1/k)*sum((data1$my_ranks-data1$true_ranks)^2))
}
save.image('CE_project.RData')
############################################################################################################################
#################################################### premature deaths 1 ####################################################
premature_deaths.1.prior <- c(
prior(gamma(7.5, 1), class = Intercept)
)
premature_deaths.bayes.1 = brm_multiple(premature_deaths ~ (1|State/County), data=imputed_Data,
family = poisson(link = "log"), prior=premature_deaths.1.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(premature_deaths.bayes.1)
fit_summary.pd1 <- summary(premature_deaths.bayes.1$fit)
premature_deaths.1.df = get_df(fit_summary.pd1$summary)
premature_deaths.1.df.summed = premature_deaths.1.df[,c("state", "row_name_counties")]
premature_deaths.1.df.summed$summed = exp(
premature_deaths.1.df$intercept_col +
premature_deaths.1.df$row_values_state +
premature_deaths.1.df$row_values_counties)
rank.premature_deaths.1 = premature_deaths.1.df.summed %>%
group_by(state) %>%
mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
rank.premature_deaths.1 = rank.premature_deaths.1[with(rank.premature_deaths.1, order(row_name_counties)), ]
rank.premature_deaths.1$true_ranks = df$Mortality_Rank
sel.prem_death.1 <-rank.premature_deaths.1 %>%
group_by(state) %>%
do(data.frame(standard.error.loss.mortality.1=sel(.)))
sel.prem_death.1 = sel.prem_death.1[with(sel.prem_death.1, order(standard.error.loss.mortality.1)), ]
sel.prem_death.1
g1 <- ggplot(data = sel.prem_death.1, mapping = aes(x = as.factor(state), y = standard.error.loss.mortality.1)) +
geom_bar(stat = "identity") +
labs(x = "state") +
ggtitle("Mortality Rank Mean Squared Error Loss Model 1") +
xlab("") +
ylab("Mean Squared Error Loss") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
axis.text=element_text(size=12),
axis.title=element_text(size=14),
plot.title=element_text(size=20))
print(g1)
#################################################### premature deaths 2 ####################################################
premature_deaths.2.prior <- c(
prior(gamma(7.5, 1), class = Intercept),
prior(normal(0, 10), class = b)
)
premature_deaths.bayes.2 = brm_multiple(premature_deaths ~ perc_AfAm + perc_As + perc_AmIn_AlNa +
perc_Hisp + urb_code_2013 + perc_female + perc_under_18 +
perc_over_65 + (1|State/County), data=imputed_Data,
family = poisson(link = "log"), prior=premature_deaths.2.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(premature_deaths.bayes.2)
fit_summary.pd2 = summary(premature_deaths.bayes.2$fit)
premature_deaths.2.df = get_df2(fit_summary.pd2$summary)
premature_deaths.2.df.summed = premature_deaths.2.df[,c("state", "row_name_counties")]
premature_deaths.2.df.summed$summed = exp(premature_deaths.2.df$intercept_col +
premature_deaths.2.df$b_perc_AfAm +
premature_deaths.2.df$b_perc_As +
premature_deaths.2.df$b_perc_AmIn_AlNa +
premature_deaths.2.df$b_perc_Hisp+
premature_deaths.2.df$b_urb+
premature_deaths.2.df$b_perc_female+
premature_deaths.2.df$b_perc_under_18+
premature_deaths.2.df$b_perc_over_65+
premature_deaths.2.df$row_values_state+
premature_deaths.2.df$row_values_counties)
rank.premature_deaths.2 = premature_deaths.2.df.summed %>%
group_by(state) %>%
mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
rank.premature_deaths.2 = rank.premature_deaths.2[with(rank.premature_deaths.2, order(row_name_counties)), ]
rank.premature_deaths.2$true_ranks = df$Mortality_Rank
sel.prem_death.2 <-rank.premature_deaths.2 %>%
group_by(state) %>%
do(data.frame(standard.error.loss.mortality.2=sel(.)))
sel.prem_death.2 = sel.prem_death.2[with(sel.prem_death.2, order(standard.error.loss.mortality.2)), ]
sel.prem_death.2
g2 <- ggplot(data = sel.prem_death.2, mapping = aes(x = as.factor(state), y = standard.error.loss.mortality.2)) +
geom_bar(stat = "identity") +
labs(x = "state") +
ggtitle("Mortality Rank Mean Squared Error Loss Model 2") +
xlab("") +
ylab("Mean Squared Error Loss") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
axis.text=element_text(size=12),
axis.title=element_text(size=14),
plot.title=element_text(size=20))
print(g2)
############################################################################################################################
#################################################### poor_phys_health_avg_over_30_days 1 ###################################
poor_phys_avg.1.prior <- c(
prior(normal(3, 10), class = Intercept),
prior(normal(3, 10), class = sigma)
)
poor_phys_avg.bayes.1 = brm_multiple(poor_phys_health_avg_over_30_days ~ (1|State/County), data=imputed_Data,
family = gaussian(link = "identity"), prior=poor_phys_avg.1.prior,
backend = "rstan", silent = 0, iter=4000, control=list(adapt_delta=0.8))
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_phys_avg.bayes.1)
# list_of_draws <- extract(testing$fit)
fit_summary.ppa1 = summary(poor_phys_avg.bayes.1$fit)
print(dim(fit_summary.ppa1$summary))
post_mean_counties <- fit_summary.ppa1$summary[,c("mean")][56:3196]
post_mean_state <- fit_summary.ppa1$summary[,c("mean")][5:55]
intercept<- fit_summary.ppa1$summary[,c("mean")][1]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_phys_avg.1.df.counties<- data.frame(state, row_name_counties, row_values_counties, intercept_col)
print(unique(state))
print(dim(poor_phys_avg.1.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_phys_avg.1.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(unique(row_name_state))
print(unique(state))
print(dim(poor_phys_avg.1.df.states))
poor_phys_avg.1.df = merge(poor_phys_avg.1.df.counties, poor_phys_avg.1.df.states,c("state","intercept_col"))
print(dim(poor_phys_avg.1.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_phys_avg.1.df.counties,
row_name_state, row_values_state, poor_phys_avg.1.df.states)
# poor_phys_avg.1.df = get_df(fit_summary.ppa1$summary)
poor_phys_avg.1.df.summed = poor_phys_avg.1.df[,c("state", "row_name_counties")]
poor_phys_avg.1.df.summed$summed = poor_phys_avg.1.df$intercept_col +
poor_phys_avg.1.df$row_values_state +
poor_phys_avg.1.df$row_values_counties
# rank.poor_phys_avg.1 = poor_phys_avg.1.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#################################################### poor_phys_health_avg_over_30_days 2 ####################################
poor_phys_avg.2.prior <- c(
prior(normal(0, 10), class = Intercept),
prior(normal(0, 10), class = b),
prior(normal(0, 10), class = sigma)
) ## HAS SUPER BAD COUNTY INTERCEPT
poor_phys_avg.bayes.2 = brm_multiple(poor_phys_health_avg_over_30_days ~ perc_AfAm + perc_As + perc_AmIn_AlNa +
perc_Hisp + factor(urb_code_2013) + perc_female + perc_under_18 +
perc_over_65 + (1|State/County), data=imputed_Data,
family = gaussian(link = "identity"), prior=poor_phys_avg.2.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_phys_avg.bayes.2)
fit_summary.ppa2 = summary(poor_phys_avg.bayes.2$fit)
print(dim(fit_summary.ppa2$summary))
intercept<- fit_summary.ppa2$summary[,c("mean")][1]
b_perc_AfAm<- fit_summary.ppa2$summary[,c("mean")][2]
b_perc_As<- fit_summary.ppa2$summary[,c("mean")][3]
b_perc_AmIn_AlNa<- fit_summary.ppa2$summary[,c("mean")][4]
b_perc_Hisp<- fit_summary.ppa2$summary[,c("mean")][5]
b_urb_code_20134<- fit_summary.ppa2$summary[,c("mean")][6]
b_urb_code_20136<- fit_summary.ppa2$summary[,c("mean")][7]
b_urb_code_20132<- fit_summary.ppa2$summary[,c("mean")][8]
b_urb_code_20135<- fit_summary.ppa2$summary[,c("mean")][9]
b_urb_code_20131<- fit_summary.ppa2$summary[,c("mean")][10]
b_perc_female<- fit_summary.ppa2$summary[,c("mean")][11]
b_perc_under_18<- fit_summary.ppa2$summary[,c("mean")][12]
b_perc_over_65<- fit_summary.ppa2$summary[,c("mean")][13]
post_mean_state <- fit_summary.ppa2$summary[,c("mean")][17:67]
post_mean_counties <- fit_summary.ppa2$summary[,c("mean")][68:3208]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
counties = str_extract(row_name_counties, "(?<=_)([^_]+)(?=,)")
counties = gsub('\\.', ' ', counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_phys_avg.2.df.counties<- data.frame(state, counties, row_name_counties, row_values_counties, intercept_col,
b_perc_AfAm, b_perc_As, b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female,
b_perc_under_18, b_perc_over_65)
poor_phys_avg.2.df.counties$b_urb[poor_phys_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "1"] <- b_urb_code_20131
poor_phys_avg.2.df.counties$b_urb[poor_phys_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "2"] <- b_urb_code_20132
poor_phys_avg.2.df.counties$b_urb[poor_phys_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "4"] <- b_urb_code_20134
poor_phys_avg.2.df.counties$b_urb[poor_phys_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "5"] <- b_urb_code_20135
poor_phys_avg.2.df.counties$b_urb[poor_phys_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "6"] <- b_urb_code_20136
poor_phys_avg.2.df.counties$b_urb[is.na(poor_phys_avg.2.df.counties$b_urb)] <- 0
print(dim(poor_phys_avg.2.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_phys_avg.2.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(dim(poor_phys_avg.2.df.states))
poor_phys_avg.2.df = merge(poor_phys_avg.2.df.counties, poor_phys_avg.2.df.states,c("state","intercept_col")) #by="state")
print(dim(poor_phys_avg.2.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_phys_avg.2.df.counties,
row_name_state, row_values_state, poor_phys_avg.2.df.states, b_perc_AfAm, b_perc_As,
b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female, b_perc_under_18, b_perc_over_65)
# poor_phys_avg.2.df = get_df2(fit_summary.ppa2$summary)
poor_phys_avg.2.df.summed = poor_phys_avg.2.df[,c("state", "row_name_counties")]
poor_phys_avg.2.df.summed$summed = poor_phys_avg.2.df$intercept_col +
poor_phys_avg.2.df$b_perc_AfAm +
poor_phys_avg.2.df$b_perc_As +
poor_phys_avg.2.df$b_perc_AmIn_AlNa +
poor_phys_avg.2.df$b_perc_Hisp+
poor_phys_avg.2.df$b_urb+
poor_phys_avg.2.df$b_perc_female+
poor_phys_avg.2.df$b_perc_under_18+
poor_phys_avg.2.df$b_perc_over_65+
poor_phys_avg.2.df$row_values_state+
poor_phys_avg.2.df$row_values_counties
# rank.poor_phys_avg.2 = poor_phys_avg.2.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
# rank.poor_phys_avg.2 = rank.poor_phys_avg.2[with(rank.poor_phys_avg.2, order(row_name_counties)), ]
############################################################################################################################
#################################################### poor_ment_health_avg_over_30_days 1 ###################################
poor_ment_avg.1.prior <- c(
prior(normal(0, 10), class = Intercept),
prior(normal(0, 10), class = sigma)
) ## HAS SUPER BAD COUNTY INTERCEPT
poor_ment_avg.bayes.1 = brm_multiple(poor_ment_health_avg_over_30_days ~ (1|State/County), data=imputed_Data,
family = gaussian(link = "identity"), prior=poor_ment_avg.1.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_ment_avg.bayes.1)
fit_summary.pma1 = summary(poor_ment_avg.bayes.1$fit)
print(dim(fit_summary.pma1$summary))
post_mean_counties <- fit_summary.pma1$summary[,c("mean")][56:3196]
post_mean_state <- fit_summary.pma1$summary[,c("mean")][5:55]
intercept<- fit_summary.pma1$summary[,c("mean")][1]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_ment_avg.1.df.counties<- data.frame(state, row_name_counties, row_values_counties, intercept_col)
print(unique(state))
print(dim(poor_ment_avg.1.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_ment_avg.1.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(unique(row_name_state))
print(unique(state))
print(dim(poor_ment_avg.1.df.states))
poor_ment_avg.1.df = merge(poor_ment_avg.1.df.counties, poor_ment_avg.1.df.states,c("state","intercept_col"))
print(dim(poor_ment_avg.1.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_ment_avg.1.df.counties,
row_name_state, row_values_state, poor_ment_avg.1.df.states)
poor_ment_avg.1.df.summed = poor_ment_avg.1.df[,c("state", "row_name_counties")]
poor_ment_avg.1.df.summed$summed = poor_ment_avg.1.df$intercept_col +
poor_ment_avg.1.df$row_values_state +
poor_ment_avg.1.df$row_values_counties
# rank.poor_ment_avg.1 = poor_ment_avg.1.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#
# rank.poor_ment_avg.1 = rank.poor_ment_avg.1[with(rank.poor_ment_avg.1, order(row_name_counties)), ]
#################################################### poor_ment_health_avg_over_30_days 2 ####################################
poor_ment_avg.2.prior <- c(
prior(normal(0, 1), class = Intercept),
prior(normal(0, 1), class = b),
prior(normal(0, 1), class = sigma)
) ## HAS SUPER BAD COUNTY INTERCEPT
poor_ment_avg.bayes.2 = brm_multiple(poor_ment_health_avg_over_30_days ~ perc_AfAm + perc_As + perc_AmIn_AlNa +
perc_Hisp + factor(urb_code_2013) + perc_female + perc_under_18 +
perc_over_65 + (1|State/County), data=imputed_Data,
family = gaussian(link = "identity"), prior=poor_ment_avg.2.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_ment_avg.bayes.2)
fit_summary.pma2 = summary(poor_ment_avg.bayes.2$fit)
print(dim(fit_summary.pma2$summary))
intercept<- fit_summary.pma2$summary[,c("mean")][1]
b_perc_AfAm<- fit_summary.pma2$summary[,c("mean")][2]
b_perc_As<- fit_summary.pma2$summary[,c("mean")][3]
b_perc_AmIn_AlNa<- fit_summary.pma2$summary[,c("mean")][4]
b_perc_Hisp<- fit_summary.pma2$summary[,c("mean")][5]
b_urb_code_20134<- fit_summary.pma2$summary[,c("mean")][6]
b_urb_code_20136<- fit_summary.pma2$summary[,c("mean")][7]
b_urb_code_20132<- fit_summary.pma2$summary[,c("mean")][8]
b_urb_code_20135<- fit_summary.pma2$summary[,c("mean")][9]
b_urb_code_20131<- fit_summary.pma2$summary[,c("mean")][10]
b_perc_female<- fit_summary.pma2$summary[,c("mean")][11]
b_perc_under_18<- fit_summary.pma2$summary[,c("mean")][12]
b_perc_over_65<- fit_summary.pma2$summary[,c("mean")][13]
post_mean_state <- fit_summary.pma2$summary[,c("mean")][17:67]
post_mean_counties <- fit_summary.pma2$summary[,c("mean")][68:3208]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
counties = str_extract(row_name_counties, "(?<=_)([^_]+)(?=,)")
counties = gsub('\\.', ' ', counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_ment_avg.2.df.counties<- data.frame(state, counties, row_name_counties, row_values_counties, intercept_col,
b_perc_AfAm, b_perc_As, b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female,
b_perc_under_18, b_perc_over_65)
poor_ment_avg.2.df.counties$b_urb[poor_ment_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "1"] <- b_urb_code_20131
poor_ment_avg.2.df.counties$b_urb[poor_ment_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "2"] <- b_urb_code_20132
poor_ment_avg.2.df.counties$b_urb[poor_ment_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "4"] <- b_urb_code_20134
poor_ment_avg.2.df.counties$b_urb[poor_ment_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "5"] <- b_urb_code_20135
poor_ment_avg.2.df.counties$b_urb[poor_ment_avg.2.df.counties$counties == df$County & df$urb_code_2013 == "6"] <- b_urb_code_20136
poor_ment_avg.2.df.counties$b_urb[is.na(poor_ment_avg.2.df.counties$b_urb)] <- 0
print(dim(poor_ment_avg.2.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_ment_avg.2.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(dim(poor_ment_avg.2.df.states))
poor_ment_avg.2.df = merge(poor_ment_avg.2.df.counties, poor_ment_avg.2.df.states,c("state","intercept_col")) #by="state")
print(dim(poor_ment_avg.2.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_ment_avg.2.df.counties,
row_name_state, row_values_state, poor_ment_avg.2.df.states, b_perc_AfAm, b_perc_As,
b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female, b_perc_under_18, b_perc_over_65)
poor_ment_avg.2.df.summed = poor_ment_avg.2.df[,c("state", "row_name_counties")]
poor_ment_avg.2.df.summed$summed = poor_ment_avg.2.df$intercept_col +
poor_ment_avg.2.df$b_perc_AfAm +
poor_ment_avg.2.df$b_perc_As +
poor_ment_avg.2.df$b_perc_AmIn_AlNa +
poor_ment_avg.2.df$b_perc_Hisp+
poor_ment_avg.2.df$b_urb+
poor_ment_avg.2.df$b_perc_female+
poor_ment_avg.2.df$b_perc_under_18+
poor_ment_avg.2.df$b_perc_over_65+
poor_ment_avg.2.df$row_values_state+
poor_ment_avg.2.df$row_values_counties
# rank.poor_ment_avg.2 = poor_ment_avg.2.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#
# rank.poor_ment_avg.2 = rank.poor_ment_avg.2[with(rank.poor_ment_avg.2, order(row_name_counties)), ]
############################################################################################################################
#################################################### low_birthweight_births 1 ###################################
low_bwb.1.prior <- c(
prior(normal(1, 1), class = Intercept)
) ## HAS KINDA BAD COUNTY INTERCEPT
low_bwb.bayes.1 = brm_multiple(low_birthweight_births ~ (1|State/County), data=imputed_Data,
family = binomial(link = "logit"), prior=low_bwb.1.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(low_bwb.bayes.1)
fit_summary.lbwb1 = summary(low_bwb.bayes.1$fit)
print(dim(fit_summary.lbwb1$summary))
post_mean_counties <- fit_summary.lbwb1$summary[,c("mean")][55:3195]
post_mean_state <- fit_summary.lbwb1$summary[,c("mean")][4:54]
intercept<- fit_summary.lbwb1$summary[,c("mean")][1]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
low_bwb.1.df.counties<- data.frame(state, row_name_counties, row_values_counties, intercept_col)
print(unique(state))
print(dim(low_bwb.1.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
low_bwb.1.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(unique(row_name_state))
print(unique(state))
print(dim(low_bwb.1.df.states))
low_bwb.1.df = merge(low_bwb.1.df.counties, low_bwb.1.df.states,c("state","intercept_col"))
print(dim(low_bwb.1.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, low_bwb.1.df.counties,
row_name_state, row_values_state, low_bwb.1.df.states)
low_bwb.1.df.summed = low_bwb.1.df[,c("state", "row_name_counties")]
low_bwb.1.df.summed$summed = exp(low_bwb.1.df$intercept_col +low_bwb.1.df$row_values_state + low_bwb.1.df$row_values_counties) /
(1+exp(low_bwb.1.df$intercept_col +low_bwb.1.df$row_values_state + low_bwb.1.df$row_values_counties))
# rank.low_bwb.1 = low_bwb.1.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#
# rank.low_bwb.1 = rank.low_bwb.1[with(rank.low_bwb.1, order(row_name_counties)), ]
#################################################### low_birthweight_births 2 ####################################
low_bwb.2.prior <- c(
prior(normal(1, 1), class = Intercept),
prior(normal(1, 1), class = b)
)
low_bwb.bayes.2 = brm_multiple(low_birthweight_births ~ perc_AfAm + perc_As + perc_AmIn_AlNa +
perc_Hisp + factor(urb_code_2013) + perc_female + perc_under_18 +
perc_over_65 + (1|State/County), data=imputed_Data,
family = binomial(link = "logit"), prior=low_bwb.2.prior,
backend = "rstan", silent = 0, inits=c(15, 5), iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(low_bwb.bayes.2)
fit_summary.lbwb2 = summary(low_bwb.bayes.2$fit)
print(dim(fit_summary.lbwb2$summary))
intercept<- fit_summary.lbwb2$summary[,c("mean")][1]
b_perc_AfAm<- fit_summary.lbwb2$summary[,c("mean")][2]
b_perc_As<- fit_summary.lbwb2$summary[,c("mean")][3]
b_perc_AmIn_AlNa<- fit_summary.lbwb2$summary[,c("mean")][4]
b_perc_Hisp<- fit_summary.lbwb2$summary[,c("mean")][5]
b_urb_code_20134<- fit_summary.lbwb2$summary[,c("mean")][6]
b_urb_code_20136<- fit_summary.lbwb2$summary[,c("mean")][7]
b_urb_code_20132<- fit_summary.lbwb2$summary[,c("mean")][8]
b_urb_code_20135<- fit_summary.lbwb2$summary[,c("mean")][9]
b_urb_code_20131<- fit_summary.lbwb2$summary[,c("mean")][10]
b_perc_female<- fit_summary.lbwb2$summary[,c("mean")][11]
b_perc_under_18<- fit_summary.lbwb2$summary[,c("mean")][12]
b_perc_over_65<- fit_summary.lbwb2$summary[,c("mean")][13]
post_mean_state <- fit_summary.lbwb2$summary[,c("mean")][16:66]
post_mean_counties <- fit_summary.lbwb2$summary[,c("mean")][67:3207]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
counties = str_extract(row_name_counties, "(?<=_)([^_]+)(?=,)")
counties = gsub('\\.', ' ', counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
low_bwb.2.df.counties<- data.frame(state, counties, row_name_counties, row_values_counties, intercept_col,
b_perc_AfAm, b_perc_As, b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female,
b_perc_under_18, b_perc_over_65)
low_bwb.2.df.counties$b_urb[low_bwb.2.df.counties$counties == df$County & df$urb_code_2013 == "1"] <- b_urb_code_20131
low_bwb.2.df.counties$b_urb[low_bwb.2.df.counties$counties == df$County & df$urb_code_2013 == "2"] <- b_urb_code_20132
low_bwb.2.df.counties$b_urb[low_bwb.2.df.counties$counties == df$County & df$urb_code_2013 == "4"] <- b_urb_code_20134
low_bwb.2.df.counties$b_urb[low_bwb.2.df.counties$counties == df$County & df$urb_code_2013 == "5"] <- b_urb_code_20135
low_bwb.2.df.counties$b_urb[low_bwb.2.df.counties$counties == df$County & df$urb_code_2013 == "6"] <- b_urb_code_20136
low_bwb.2.df.counties$b_urb[is.na(low_bwb.2.df.counties$b_urb)] <- 0
print(dim(low_bwb.2.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
low_bwb.2.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(dim(low_bwb.2.df.states))
low_bwb.2.df = merge(low_bwb.2.df.counties, low_bwb.2.df.states,c("state","intercept_col")) #by="state")
print(dim(low_bwb.2.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, low_bwb.2.df.counties,
row_name_state, row_values_state, low_bwb.2.df.states, b_perc_AfAm, b_perc_As,
b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female, b_perc_under_18, b_perc_over_65)
low_bwb.2.df.summed = low_bwb.2.df[,c("state", "row_name_counties")]
low_bwb.2.df.summed$summed = exp(low_bwb.2.df$intercept_col +
low_bwb.2.df$b_perc_AfAm +
low_bwb.2.df$b_perc_As +
low_bwb.2.df$b_perc_AmIn_AlNa +
low_bwb.2.df$b_perc_Hisp+
low_bwb.2.df$b_urb+
low_bwb.2.df$b_perc_female+
low_bwb.2.df$b_perc_under_18+
low_bwb.2.df$b_perc_over_65+
low_bwb.2.df$row_values_state+
low_bwb.2.df$row_values_counties)/
(1+exp(low_bwb.2.df$intercept_col +
low_bwb.2.df$b_perc_AfAm +
low_bwb.2.df$b_perc_As +
low_bwb.2.df$b_perc_AmIn_AlNa +
low_bwb.2.df$b_perc_Hisp+
low_bwb.2.df$b_urb+
low_bwb.2.df$b_perc_female+
low_bwb.2.df$b_perc_under_18+
low_bwb.2.df$b_perc_over_65+
low_bwb.2.df$row_values_state+
low_bwb.2.df$row_values_counties))
# rank.low_bwb.2 = low_bwb.2.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#
# rank.low_bwb.2 = rank.low_bwb.2[with(rank.low_bwb.2, order(row_name_counties)), ]
# ############################################################################################################################
# #################################################### poor_health_perc 1 ###################################
poor_health_perc.1.prior <- c(
prior(normal(1,1), class = Intercept)
)
poor_health_perc.bayes.1 = brm_multiple(poor_health_estimate ~ (1|State/County), data=imputed_Data,
family = binomial(link = "logit"), prior=poor_health_perc.1.prior,
backend = "rstan", silent = 0, iter=4000)
# poor_health_perc.1.prior <- c(
# prior(beta(2, 2), class = Intercept)
# )
# poor_health_num = round(poor_health_sample_size * poor_health_percent)
# poor_health_perc.bayes.1 = brm_multiple(poor_health_num ~ (1|State/County), data=imputed_Data,
# family = binomial(link = "logit"), prior=poor_health_perc.1.prior,
# backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_health_perc.bayes.1)
# list_of_draws <- extract(testing$fit)
fit_summary.php1 = summary(poor_health_perc.bayes.1$fit)
print(dim(fit_summary.php1$summary))
post_mean_counties <- fit_summary.php1$summary[,c("mean")][55:3195]
post_mean_state <- fit_summary.php1$summary[,c("mean")][4:54]
intercept<- fit_summary.php1$summary[,c("mean")][1]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_health_perc.1.df.counties<- data.frame(state, row_name_counties, row_values_counties, intercept_col)
print(unique(state))
print(dim(poor_health_perc.1.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_health_perc.1.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(unique(row_name_state))
print(unique(state))
print(dim(poor_health_perc.1.df.states))
poor_health_perc.1.df = merge(poor_health_perc.1.df.counties, poor_health_perc.1.df.states,c("state","intercept_col"))
print(dim(poor_health_perc.1.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_health_perc.1.df.counties,
row_name_state, row_values_state, poor_health_perc.1.df.states)
poor_health_perc.1.df.summed = poor_health_perc.1.df[,c("state", "row_name_counties")]
poor_health_perc.1.df.summed$summed = exp(poor_health_perc.1.df$intercept_col +poor_health_perc.1.df$row_values_state +poor_health_perc.1.df$row_values_counties)/
(1+exp(poor_health_perc.1.df$intercept_col +poor_health_perc.1.df$row_values_state +poor_health_perc.1.df$row_values_counties))
#################################################### poor_health_perc 2 ####################################
poor_health_perc.2.prior <- c(
prior(beta(2, 2), class = Intercept),
prior(normal(0, 1), class = b)
)
poor_health_perc.bayes.2 = brm_multiple(poor_health_estimate ~ perc_AfAm + perc_As + perc_AmIn_AlNa +
perc_Hisp + factor(urb_code_2013) + perc_female + perc_under_18 +
perc_over_65 + (1|State/County), data=imputed_Data,
family = binomial(link = "logit"), prior=poor_health_perc.2.prior,
backend = "rstan", silent = 0, iter=4000)
save.image('CE_project.RData')
load('CE_project.RData')
launch_shinystan(poor_health_perc.bayes.2)
fit_summary.php2 = summary(poor_health_perc.bayes.2$fit)
print(dim(fit_summary.php2$summary))
intercept<- fit_summary.php2$summary[,c("mean")][1]
b_perc_AfAm<- fit_summary.php2$summary[,c("mean")][2]
b_perc_As<- fit_summary.php2$summary[,c("mean")][3]
b_perc_AmIn_AlNa<- fit_summary.php2$summary[,c("mean")][4]
b_perc_Hisp<- fit_summary.php2$summary[,c("mean")][5]
b_urb_code_20134<- fit_summary.php2$summary[,c("mean")][6]
b_urb_code_20136<- fit_summary.php2$summary[,c("mean")][7]
b_urb_code_20132<- fit_summary.php2$summary[,c("mean")][8]
b_urb_code_20135<- fit_summary.php2$summary[,c("mean")][9]
b_urb_code_20131<- fit_summary.php2$summary[,c("mean")][10]
b_perc_female<- fit_summary.php2$summary[,c("mean")][11]
b_perc_under_18<- fit_summary.php2$summary[,c("mean")][12]
b_perc_over_65<- fit_summary.php2$summary[,c("mean")][13]
post_mean_state <- fit_summary.php2$summary[,c("mean")][16:66]
post_mean_counties <- fit_summary.php2$summary[,c("mean")][67:3207]
row_name_counties = names(post_mean_counties)
row_values_counties = unname(post_mean_counties)
counties = str_extract(row_name_counties, "(?<=_)([^_]+)(?=,)")
counties = gsub('\\.', ' ', counties)
state = str_extract(row_name_counties, '(?<=\\[)(.*?)(?=\\_)') #"(?<=\\[)([^\\[]*)(?=_)")
intercept_col = rep(intercept, length(row_name_counties))
poor_health_perc.2.df.counties<- data.frame(state, counties, row_name_counties, row_values_counties, intercept_col,
b_perc_AfAm, b_perc_As, b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female,
b_perc_under_18, b_perc_over_65)
poor_health_perc.2.df.counties$b_urb[poor_health_perc.2.df.counties$counties == df$County & df$urb_code_2013 == "1"] <- b_urb_code_20131
poor_health_perc.2.df.counties$b_urb[poor_health_perc.2.df.counties$counties == df$County & df$urb_code_2013 == "2"] <- b_urb_code_20132
poor_health_perc.2.df.counties$b_urb[poor_health_perc.2.df.counties$counties == df$County & df$urb_code_2013 == "4"] <- b_urb_code_20134
poor_health_perc.2.df.counties$b_urb[poor_health_perc.2.df.counties$counties == df$County & df$urb_code_2013 == "5"] <- b_urb_code_20135
poor_health_perc.2.df.counties$b_urb[poor_health_perc.2.df.counties$counties == df$County & df$urb_code_2013 == "6"] <- b_urb_code_20136
poor_health_perc.2.df.counties$b_urb[is.na(poor_health_perc.2.df.counties$b_urb)] <- 0
print(dim(poor_health_perc.2.df.counties))
row_name_state = names(post_mean_state)
row_values_state = unname(post_mean_state)
state = str_extract(row_name_state, "(?<=\\[)(.*?)(?=\\,)")
intercept_col = rep(intercept, length(row_name_state))
poor_health_perc.2.df.states<- data.frame(state, row_name_state, row_values_state, intercept_col)
print(dim(poor_health_perc.2.df.states))
poor_health_perc.2.df = merge(poor_health_perc.2.df.counties, poor_health_perc.2.df.states,c("state","intercept_col")) #by="state")
print(dim(poor_health_perc.2.df))
rm(post_mean_counties, row_values_counties, state, intercept_col, poor_health_perc.2.df.counties,
row_name_state, row_values_state, poor_health_perc.2.df.states, b_perc_AfAm, b_perc_As,
b_perc_AmIn_AlNa, b_perc_Hisp, b_perc_female, b_perc_under_18, b_perc_over_65)
poor_health_perc.2.df.summed = poor_health_perc.2.df[,c("state", "row_name_counties")]
poor_health_perc.2.df.summed$summed = exp(poor_health_perc.2.df$intercept_col +
poor_health_perc.2.df$b_perc_AfAm +
poor_health_perc.2.df$b_perc_As +
poor_health_perc.2.df$b_perc_AmIn_AlNa +
poor_health_perc.2.df$b_perc_Hisp+
poor_health_perc.2.df$b_urb+
poor_health_perc.2.df$b_perc_female+
poor_health_perc.2.df$b_perc_under_18+
poor_health_perc.2.df$b_perc_over_65+
poor_health_perc.2.df$row_values_state+
poor_health_perc.2.df$row_values_counties)/
(1+exp(poor_health_perc.2.df$intercept_col +
poor_health_perc.2.df$b_perc_AfAm +
poor_health_perc.2.df$b_perc_As +
poor_health_perc.2.df$b_perc_AmIn_AlNa +
poor_health_perc.2.df$b_perc_Hisp+
poor_health_perc.2.df$b_urb+
poor_health_perc.2.df$b_perc_female+
poor_health_perc.2.df$b_perc_under_18+
poor_health_perc.2.df$b_perc_over_65+
poor_health_perc.2.df$row_values_state+
poor_health_perc.2.df$row_values_counties))
# poor_health_perc.2.df.summed = poor_health_perc.2.df[,c("state", "row_name_counties")]
# poor_health_perc.2.df.summed$summed = exp(poor_health_perc.2.df$intercept_col + poor_health_perc.2.df$row_values_state + poor_health_perc.2.df$row_values_counties)/
# (1+exp(poor_health_perc.2.df$intercept_col + poor_health_perc.2.df$row_values_state + poor_health_perc.2.df$row_values_counties))
#
# rank.poor_health_perc.2 = poor_health_perc.2.df.summed %>%
# group_by(state) %>%
# mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
#
# rank.poor_health_perc.2[with(rank.poor_health_perc.2, order(row_name_counties)), ]
# Morbidity Rank 1 ----------------------------------------------------------
all.df.summed.1 <- poor_phys_avg.1.df.summed[, c("state", "row_name_counties")]
all.df.summed.1$summed <- mean(poor_phys_avg.1.df.summed$summed+
poor_ment_avg.1.df.summed$summed +
low_bwb.1.df.summed$summed +
poor_health_perc.1.df.summed$summed)
rank.morbidity.1 = all.df.summed.1 %>%
group_by(state) %>%
mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
rank.morbidity.1 = rank.morbidity.1[with(rank.morbidity.1, order(row_name_counties)), ]
rank.morbidity.1$true_ranks = df$Morbidity_Rank
sel.morbidity.1 <-rank.morbidity.1 %>%
group_by(state) %>%
do(data.frame(standard.error.loss.morbidity.1=sel(.)))
sel.morbidity.1 = sel.morbidity.1[with(sel.morbidity.1, order(standard.error.loss.morbidity.1)), ]
sel.morbidity.1
g3 <- ggplot(data = sel.morbidity.1, mapping = aes(x = as.factor(state), y = standard.error.loss.morbidity.1)) +
geom_bar(stat = "identity") +
labs(x = "state") +
ggtitle("Morbidity Rank Mean Squared Error Loss Model 1") +
xlab("") +
ylab("Mean Squared Error Loss") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
axis.text=element_text(size=12),
axis.title=element_text(size=14),
plot.title=element_text(size=20))
print(g3)
# Morbidity Rank 2 --------------------------------------------------------
all.df.summed.2 <- poor_phys_avg.2.df.summed[, c("state", "row_name_counties")]
all.df.summed.2$summed <- mean(poor_phys_avg.2.df.summed$summed+
poor_ment_avg.2.df.summed$summed +
low_bwb.2.df.summed$summed +
poor_health_perc.2.df.summed$summed)
rank.morbidity.2 = all.df.summed.2 %>%
group_by(state) %>%
mutate(my_ranks = order(order(summed, row_name_counties, decreasing=TRUE)))
rank.morbidity.2 = rank.morbidity.2[with(rank.morbidity.2, order(row_name_counties)), ]
rank.morbidity.2$true_ranks = df$Morbidity_Rank
sel.morbidity.2 <-rank.morbidity.2 %>%
group_by(state) %>%
do(data.frame(standard.error.loss.morbidity.2=sel(.)))
sel.morbidity.2 = sel.morbidity.2[with(sel.morbidity.2, order(standard.error.loss.morbidity.2)), ]
sel.morbidity.2
g4 <- ggplot(data = sel.morbidity.2, mapping = aes(x = as.factor(state), y = standard.error.loss.morbidity.2)) +
geom_bar(stat = "identity") +
labs(x = "state") +
ggtitle("Morbidity Rank Mean Squared Error Loss Model 2") +
xlab("") +
ylab("Mean Squared Error Loss") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
axis.text=element_text(size=12),
axis.title=element_text(size=14),
plot.title=element_text(size=20))
print(g4)
# Plotting model 1 vs 2----------------------------------------------------------------
plot(sel.prem_death.2$standard.error.loss.mortality.2 ~sel.prem_death.1$standard.error.loss.mortality.1,
main ="Relationship Between Model 1 and Model 2 Mortality Mean Squared Error Loss",
xlab = "Mean Squared Error Loss Model 1",
ylab = "Mean Squared Error Loss Model 2", cex=1.5)
abline(lm(sel.prem_death.2$standard.error.loss.mortality.2 ~ sel.prem_death.1$standard.error.loss.mortality.1))
plot(sel.morbidity.2$standard.error.loss.morbidity.2 ~ sel.morbidity.1$standard.error.loss.morbidity.1,
main ="Relationship Between Model 1 and Model 2 Morbidity Mean Squared Error Loss",
xlab = "Mean Squared Error Loss Model 1",
ylab = "Mean Squared Error Loss Model 2", cex=1.5)
abline(lm(sel.morbidity.2$standard.error.loss.morbidity.2 ~ sel.morbidity.1$standard.error.loss.morbidity.1))
# Plotting model vs true --------------------------------------------------
par(mfrow=c(2,2))
plot(rank.premature_deaths.1$my_ranks, rank.premature_deaths.1$true_ranks,
main ="Mortality Model 1 True Rank vs Estimated Rank",
xlab = "True Rank",
ylab = "Estimated Rank")
abline(lm(rank.premature_deaths.1$true_ranks ~ rank.premature_deaths.1$my_ranks))
plot(rank.premature_deaths.2$my_ranks, rank.premature_deaths.2$true_ranks,
main ="Mortality Model 2 True Rank vs Estimated Rank",
xlab = "True Rank",
ylab = "Estimated Rank")
abline(lm(rank.premature_deaths.2$true_ranks ~ rank.premature_deaths.2$my_ranks))
plot(rank.morbidity.1$my_ranks, rank.morbidity.1$true_ranks,
main ="Morbidity Model 1 True Rank vs Estimated Rank",
xlab = "True Rank",
ylab = "Estimated Rank")
abline(lm(rank.morbidity.1$true_ranks ~ rank.morbidity.1$my_ranks))
plot(rank.morbidity.2$my_ranks, rank.morbidity.2$true_ranks,
main ="Morbidity Model 2 True Rank vs Estimated Rank",
xlab = "True Rank",
ylab = "Estimated Rank")
abline(lm(rank.morbidity.2$true_ranks ~ rank.morbidity.2$my_ranks))
# Plotting sel and pop ----------------------------------------------------
sel.prem_death.1 = sel.prem_death.1[with(sel.prem_death.1, order(state)), ]
sel.prem_death.1$population = aggregate(df$Population, by=list(State=df$State), FUN=sum)$x
plot(sel.prem_death.1$population, sel.prem_death.1$standard.error.loss.mortality.1,
main ="Mortality Model 1 Mean Squared Error Loss vs State Population",
xlab = "State Population",
ylab = "Mortality Model 1 Mean Squared Error Loss")
abline(lm(sel.prem_death.1$standard.error.loss.mortality.1 ~ sel.prem_death.1$population))
sel.prem_death.2 = sel.prem_death.2[with(sel.prem_death.2, order(state)), ]
sel.prem_death.2$population = aggregate(df$Population, by=list(State=df$State), FUN=sum)$x
plot(sel.prem_death.2$population, sel.prem_death.2$standard.error.loss.mortality.2,
main ="Mortality Model 2 Mean Squared Error Loss vs State Population",
xlab = "State Population",
ylab = "Mortality Model 2 Mean Squared Error Loss")
abline(lm(sel.prem_death.2$standard.error.loss.mortality.2 ~ sel.prem_death.2$population))
sel.morbidity.1 = sel.morbidity.1[with(sel.morbidity.1, order(state)), ]
sel.morbidity.1$population = aggregate(df$Population, by=list(State=df$State), FUN=sum)$x
plot(sel.morbidity.1$population, sel.morbidity.1$standard.error.loss.morbidity.1,
main ="Morbidity Model 1 Mean Squared Error Loss vs State Population",
xlab = "State Population",
ylab = "Morbidity Model 1 Mean Squared Error Loss")
abline(lm(sel.morbidity.1$standard.error.loss.morbidity.1 ~ sel.morbidity.1$population))
sel.morbidity.2 = sel.morbidity.2[with(sel.morbidity.2, order(state)), ]
sel.morbidity.2$population = aggregate(df$Population, by=list(State=df$State), FUN=sum)$x
plot(sel.morbidity.2$population, sel.morbidity.2$standard.error.loss.morbidity.2,
main ="Morbidity Model 2 Mean Squared Error Loss vs State Population",
xlab = "State Population",
ylab = "Morbidity Model 2 Mean Squared Error Loss")
abline(lm(sel.morbidity.2$standard.error.loss.morbidity.2 ~ sel.morbidity.2$population))
# Plotting sel vs number of counties --------------------------------------
par(mfrow=c(1,1))
sel.prem_death.1 = sel.prem_death.1[with(sel.prem_death.1, order(state)), ]
sel.prem_death.1$countycount = aggregate(df$County, by=list(State=df$State), FUN=length)$x
plot(sel.prem_death.1$countycount, sel.prem_death.1$standard.error.loss.mortality.1,
main ="Mortality Model 1 Mean Squared Error Loss vs Number of Counties",
xlab = "Number of Counties",
ylab = "Mortality Model 1 Mean Squared Error Loss")
abline(lm(sel.prem_death.1$standard.error.loss.mortality.1 ~ sel.prem_death.1$countycount))
sel.prem_death.2 = sel.prem_death.2[with(sel.prem_death.2, order(state)), ]
sel.prem_death.2$countycount = aggregate(df$County, by=list(State=df$State), FUN=length)$x
plot(sel.prem_death.2$countycount, sel.prem_death.2$standard.error.loss.mortality.2,
main ="Mortality Model 2 Mean Squared Error Loss vs Number of Counties",
xlab = "Number of Counties",
ylab = "Mortality Model 2 Mean Squared Error Loss")
abline(lm(sel.prem_death.2$standard.error.loss.mortality.2 ~ sel.prem_death.2$countycount))
sel.morbidity.1 = sel.morbidity.1[with(sel.morbidity.1, order(state)), ]
sel.morbidity.1$countycount = aggregate(df$County, by=list(State=df$State), FUN=length)$x
plot(sel.morbidity.1$countycount, sel.morbidity.1$standard.error.loss.morbidity.1,
main ="Morbidity Model 1 Mean Squared Error Loss vs Number of Counties",
xlab = "Number of Counties",
ylab = "Morbidity Model 1 Mean Squared Error Loss")
abline(lm(sel.morbidity.1$standard.error.loss.morbidity.1 ~ sel.morbidity.1$countycount))
sel.morbidity.2 = sel.morbidity.2[with(sel.morbidity.2, order(state)), ]
sel.morbidity.2$countycount = aggregate(df$County, by=list(State=df$State), FUN=length)$x
plot(sel.morbidity.2$countycount, sel.morbidity.2$standard.error.loss.morbidity.2,
main ="Morbidity Model 2 Mean Squared Error Loss vs Number of Counties",
xlab = "Number of Counties",
ylab = "Morbidity Model 2 Mean Squared Error Loss")
abline(lm(sel.morbidity.2$standard.error.loss.morbidity.2 ~ sel.morbidity.2$countycount))
|
Delhi <- read_excel("C:/Users/User/Downloads/Delhi.xlsx")
library(padr)
library(dplyr)
library(tidyr)
library(readxl)
library(magrittr)
library(forecast)
library(imputeTS)
library(DMwR)
Delhi<-delhi
summary(Delhi)
str(Delhi)
plot(Delhi)
##################Find Null Value###################33
Delhi$pm25 <- as.numeric(Delhi$pm25)
sum(is.na(Delhi$pm25))
str(Delhi)
plot(Delhi)
plot(Delhi$pm25,type = "l")
##Counting NA Values
sum(is.na(Delhi$pm25))
##########################find missing value in date column###########
library(padr)
Delhi1 <- pad(as.data.frame(Delhi$date))
colnames(Delhi1) <- 'date'
Newdata <- full_join(Delhi1,Delhi)
View(Delhi1)
sum(is.na(Newdata$pm25))
str(Newdata)
plotNA.distribution(Newdata$pm25)
####################Convert the data to time series#####################################
Newdata$pm25 <- ts(Newdata$pm25,start = c(2018,01),end=c(2018, 2617), frequency=365*24)
str(Newdata$pm25)
plot(Newdata$pm25)
######################################Imputation##################
library(imputeTS)
library(ggplot2)
Newdata$ma<-na_seasplit(Newdata$pm25,algorithm = "ma",find_frequency=TRUE)
#Newdata$interpolation<- na_seasplit(Newdata$pm25,algorithm = "interpolation",find_frequency=TRUE)
plot(Newdata$pm25)
str(Newdata$ma)
View(Newdata)
## Splitting
train<- Newdata$ma[1:2094]
test<- Newdata$ma[2095:2617]
## Model Building
h_a<- holt(train,h = 523)
autoplot(h_a)
h_a$model
accuracy(h_a,test)##55.22
# identify optimal alpha parameter
beta <- seq(.0001, .5, by = .001)
RMSE <- NA
for(i in seq_along(beta)) {
fit <- holt(train, beta = beta[i], h = 72)
RMSE[i] <- accuracy(fit)[1,2]
}
# convert to a data frame and idenitify min alpha value
beta.fit <- data_frame(beta, RMSE)
beta.min <- filter(beta.fit, RMSE == min(RMSE))
# plot RMSE vs. alpha
ggplot(beta.fit, aes(beta, RMSE)) +
geom_line() +
geom_point(data = beta.min, aes(beta, RMSE), size = 2, color = "blue")
# new model with optimal beta
holt.a.opt <- holt(train, h = 523, beta = 0.0001)
accuracy(holt.a.opt) ## Train RMSE = 55.17
fcast_holt<- forecast(holt.a.opt,h =523)
autoplot(holt.a.opt)
accuracy(as.vector(fcast_holt$mean),test) ## Test RMSE = 141.45
######### RUN ON WHOLE DATA SET #################
holts_wd<- holt(Newdata$ma, h = 523,beta = 0.0001)
accuracy(holts_wd) ## RMSE = 53.61
# accuracy of first model
accuracy(holt.a.opt, test)
autoplot(holt.a.opt)
|
/Model_Holts.R
|
no_license
|
itsme020/Project-on-Delhi-Air-Pollution
|
R
| false
| false
| 2,571
|
r
|
Delhi <- read_excel("C:/Users/User/Downloads/Delhi.xlsx")
library(padr)
library(dplyr)
library(tidyr)
library(readxl)
library(magrittr)
library(forecast)
library(imputeTS)
library(DMwR)
Delhi<-delhi
summary(Delhi)
str(Delhi)
plot(Delhi)
##################Find Null Value###################33
Delhi$pm25 <- as.numeric(Delhi$pm25)
sum(is.na(Delhi$pm25))
str(Delhi)
plot(Delhi)
plot(Delhi$pm25,type = "l")
##Counting NA Values
sum(is.na(Delhi$pm25))
##########################find missing value in date column###########
library(padr)
Delhi1 <- pad(as.data.frame(Delhi$date))
colnames(Delhi1) <- 'date'
Newdata <- full_join(Delhi1,Delhi)
View(Delhi1)
sum(is.na(Newdata$pm25))
str(Newdata)
plotNA.distribution(Newdata$pm25)
####################Convert the data to time series#####################################
Newdata$pm25 <- ts(Newdata$pm25,start = c(2018,01),end=c(2018, 2617), frequency=365*24)
str(Newdata$pm25)
plot(Newdata$pm25)
######################################Imputation##################
library(imputeTS)
library(ggplot2)
Newdata$ma<-na_seasplit(Newdata$pm25,algorithm = "ma",find_frequency=TRUE)
#Newdata$interpolation<- na_seasplit(Newdata$pm25,algorithm = "interpolation",find_frequency=TRUE)
plot(Newdata$pm25)
str(Newdata$ma)
View(Newdata)
## Splitting
train<- Newdata$ma[1:2094]
test<- Newdata$ma[2095:2617]
## Model Building
h_a<- holt(train,h = 523)
autoplot(h_a)
h_a$model
accuracy(h_a,test)##55.22
# identify optimal alpha parameter
beta <- seq(.0001, .5, by = .001)
RMSE <- NA
for(i in seq_along(beta)) {
fit <- holt(train, beta = beta[i], h = 72)
RMSE[i] <- accuracy(fit)[1,2]
}
# convert to a data frame and idenitify min alpha value
beta.fit <- data_frame(beta, RMSE)
beta.min <- filter(beta.fit, RMSE == min(RMSE))
# plot RMSE vs. alpha
ggplot(beta.fit, aes(beta, RMSE)) +
geom_line() +
geom_point(data = beta.min, aes(beta, RMSE), size = 2, color = "blue")
# new model with optimal beta
holt.a.opt <- holt(train, h = 523, beta = 0.0001)
accuracy(holt.a.opt) ## Train RMSE = 55.17
fcast_holt<- forecast(holt.a.opt,h =523)
autoplot(holt.a.opt)
accuracy(as.vector(fcast_holt$mean),test) ## Test RMSE = 141.45
######### RUN ON WHOLE DATA SET #################
holts_wd<- holt(Newdata$ma, h = 523,beta = 0.0001)
accuracy(holts_wd) ## RMSE = 53.61
# accuracy of first model
accuracy(holt.a.opt, test)
autoplot(holt.a.opt)
|
## The set of functions will cache the inverse of a matrix so that repeated
## inverse operations on the same matrix will return a cached result.
## Usage:
## makeCacheMatrix(x): returns the matrix with inverse caching property
## x$set(y): where y is a matrix initializes x with the values in y
## cacheSolve(x): returns the inverse of x. First call will calculate
## the inverse and cache it - subsequent calls will return the
## cached value
## makeCacheMatrix(x): takes x and initializes it as a special type of matrix
## that supports 4 functions - set(to initialize the matrix),
## get(to get the matrix), setinverse(to cache the inverse) and
## getmatrix(to return the cached value)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inv) m <<- inv
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve(x): returns the inverse of x. First call will calculate
## the inverse and cache it - subsequent calls will return the
## cached value
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
someshg/ProgrammingAssignment2
|
R
| false
| false
| 1,653
|
r
|
## The set of functions will cache the inverse of a matrix so that repeated
## inverse operations on the same matrix will return a cached result.
## Usage:
## makeCacheMatrix(x): returns the matrix with inverse caching property
## x$set(y): where y is a matrix initializes x with the values in y
## cacheSolve(x): returns the inverse of x. First call will calculate
## the inverse and cache it - subsequent calls will return the
## cached value
## makeCacheMatrix(x): takes x and initializes it as a special type of matrix
## that supports 4 functions - set(to initialize the matrix),
## get(to get the matrix), setinverse(to cache the inverse) and
## getmatrix(to return the cached value)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inv) m <<- inv
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve(x): returns the inverse of x. First call will calculate
## the inverse and cache it - subsequent calls will return the
## cached value
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
source("incl/start.R")
objectSize <- future:::objectSize
message("objectSize() ...")
env <- new.env()
env$a <- 3.14
env$b <- 1:100
env2 <- new.env()
env2$env <- env
## Namespaces will be skipped
env3 <- getNamespace("utils")
fcn <- function(...) TRUE
objs <- list(
NULL,
TRUE,
1L,
3.14,
"hello",
1:100,
1:100 + 0.1,
letters,
list(a = 3.14, b = 1:100),
list(a = 3.14, b = 1:100, c = list(a = 3.14, b = 1:100)),
env,
env2,
env3,
fcn,
as.FutureGlobals(list(a = 3.14, b = 1:100)),
list(x = as.FutureGlobals(list(a = 3.14, b = 1:100)))
)
for (kk in seq_along(objs)) {
obj <- objs[[kk]]
message(sprintf("objectSize(<%s>) ...", mode(obj)))
str(obj)
size0 <- object.size(obj)
str(size0)
size <- objectSize(obj)
str(size)
message(sprintf("objectSize(<%s>) ... DONE", mode(obj)))
}
message("*** objectSize() - globals with non-trustful length() ...")
length.CantTrustLength <- function(x) length(unclass(x)) + 1L
.length <- future:::.length
x <- structure(as.list(1:3), class = c("CantTrustLength", "list"))
str(list(n = length(x), n_true = .length(x)))
stopifnot(length(x) > .length(x))
size <- objectSize(x)
print(size)
message("*** objectSize() - globals with non-trustful length() ... DONE")
message("objectSize() ... DONE")
source("incl/end.R")
|
/tests/objectSize.R
|
no_license
|
rjcc/future
|
R
| false
| false
| 1,312
|
r
|
source("incl/start.R")
objectSize <- future:::objectSize
message("objectSize() ...")
env <- new.env()
env$a <- 3.14
env$b <- 1:100
env2 <- new.env()
env2$env <- env
## Namespaces will be skipped
env3 <- getNamespace("utils")
fcn <- function(...) TRUE
objs <- list(
NULL,
TRUE,
1L,
3.14,
"hello",
1:100,
1:100 + 0.1,
letters,
list(a = 3.14, b = 1:100),
list(a = 3.14, b = 1:100, c = list(a = 3.14, b = 1:100)),
env,
env2,
env3,
fcn,
as.FutureGlobals(list(a = 3.14, b = 1:100)),
list(x = as.FutureGlobals(list(a = 3.14, b = 1:100)))
)
for (kk in seq_along(objs)) {
obj <- objs[[kk]]
message(sprintf("objectSize(<%s>) ...", mode(obj)))
str(obj)
size0 <- object.size(obj)
str(size0)
size <- objectSize(obj)
str(size)
message(sprintf("objectSize(<%s>) ... DONE", mode(obj)))
}
message("*** objectSize() - globals with non-trustful length() ...")
length.CantTrustLength <- function(x) length(unclass(x)) + 1L
.length <- future:::.length
x <- structure(as.list(1:3), class = c("CantTrustLength", "list"))
str(list(n = length(x), n_true = .length(x)))
stopifnot(length(x) > .length(x))
size <- objectSize(x)
print(size)
message("*** objectSize() - globals with non-trustful length() ... DONE")
message("objectSize() ... DONE")
source("incl/end.R")
|
#' Recursive Directory Creation
#'
#' Allows the user to input pieces of directory names to quickly generate
#' multiple sub-directories with similar names nested in the same directory.
#'
#' @param \ldots The pieces of the names to put together. \code{rdirs} will use
#' R's recylcing rule with different length vectors.
#' @param path A character vector specifying the root directory path.
#' @param sep A character string to separate the terms.
#' @param pad.num logical. If \code{TRUE} numbers will be padded with leading
#' zeros (detects numeric strings supplied using the colon(\code{:}) operator or
#' combine (\code{c(}) function.
#' @param text.only logical. If \code{TRUE} rdirs does not create the
#' directories, but only returns the names. This allows the names to be passed
#' to \code{new_report} and \code{presentation}.
#' @return Generates recursive sub directories. Invisibly returns the names of
#' the sub-directories.
#' @seealso \code{\link[reports]{folder}},
#' \code{delete},
#' \code{\link[base]{dir.create}}
#' @keywords file, directory, folder
#' @export
#' @importFrom qdapTools pad
#' @examples
#' ## fx <- folder(delete_me)
#' ## owd <- getwd(); setwd(fx)
#' ## rdirs(admin, 1:15, c("d", "f", "w"), c(1, 4, 6))
#' rdirs(admin, 1:15, c("d", "f", "w"), c(1, 4, 6), text.only = TRUE)
#' ## rdirs(session, 1:12, seq(as.Date("2000/1/1"), by = "month", length.out = 12))
#'
#' x <- rdirs(admin, 1:15, c("d", "f", "w"), c(1, 4, 6), text.only = TRUE)
#' ## lapply(x, new_report)
#' ## setwd(owd); delete(fx)
rdirs <- function(..., path = getwd(), sep = "_", pad.num = TRUE,
text.only = FALSE) {
pieces <- as.character(match.call(expand.dots = FALSE)[[2]])
plist <- lapply(pieces, "[")
nums <- grepl("[0-9][:]|[c][\\(]|[qcv][\\(]", pieces)
plist[nums] <- invisible(lapply(pieces[nums], function(x) {
x <- eval(parse(text=x))
if (pad.num) {
x <- qdapTools::pad(x, sort = FALSE)
}
x
}))
nms <- paste2(plist, sep=sep)
if (!text.only) {
invisible(lapply(file.path(path, nms), dir.create))
message(paste0("directories create in: \n", path, "\n"))
invisible(nms)
} else {
return(nms)
}
}
|
/R/rdirs.R
|
no_license
|
2ndFloorStuff/reports
|
R
| false
| false
| 2,239
|
r
|
#' Recursive Directory Creation
#'
#' Allows the user to input pieces of directory names to quickly generate
#' multiple sub-directories with similar names nested in the same directory.
#'
#' @param \ldots The pieces of the names to put together. \code{rdirs} will use
#' R's recylcing rule with different length vectors.
#' @param path A character vector specifying the root directory path.
#' @param sep A character string to separate the terms.
#' @param pad.num logical. If \code{TRUE} numbers will be padded with leading
#' zeros (detects numeric strings supplied using the colon(\code{:}) operator or
#' combine (\code{c(}) function.
#' @param text.only logical. If \code{TRUE} rdirs does not create the
#' directories, but only returns the names. This allows the names to be passed
#' to \code{new_report} and \code{presentation}.
#' @return Generates recursive sub directories. Invisibly returns the names of
#' the sub-directories.
#' @seealso \code{\link[reports]{folder}},
#' \code{delete},
#' \code{\link[base]{dir.create}}
#' @keywords file, directory, folder
#' @export
#' @importFrom qdapTools pad
#' @examples
#' ## fx <- folder(delete_me)
#' ## owd <- getwd(); setwd(fx)
#' ## rdirs(admin, 1:15, c("d", "f", "w"), c(1, 4, 6))
#' rdirs(admin, 1:15, c("d", "f", "w"), c(1, 4, 6), text.only = TRUE)
#' ## rdirs(session, 1:12, seq(as.Date("2000/1/1"), by = "month", length.out = 12))
#'
#' x <- rdirs(admin, 1:15, c("d", "f", "w"), c(1, 4, 6), text.only = TRUE)
#' ## lapply(x, new_report)
#' ## setwd(owd); delete(fx)
rdirs <- function(..., path = getwd(), sep = "_", pad.num = TRUE,
text.only = FALSE) {
pieces <- as.character(match.call(expand.dots = FALSE)[[2]])
plist <- lapply(pieces, "[")
nums <- grepl("[0-9][:]|[c][\\(]|[qcv][\\(]", pieces)
plist[nums] <- invisible(lapply(pieces[nums], function(x) {
x <- eval(parse(text=x))
if (pad.num) {
x <- qdapTools::pad(x, sort = FALSE)
}
x
}))
nms <- paste2(plist, sep=sep)
if (!text.only) {
invisible(lapply(file.path(path, nms), dir.create))
message(paste0("directories create in: \n", path, "\n"))
invisible(nms)
} else {
return(nms)
}
}
|
library(tidyverse)
final_proj <- read_csv("Final-Projections.csv")
final_proj$X1 <- factor(final_proj$X1,levels = unique(final_proj$X1))
#colnames(final_proj) <- final_proj[1,]
#final_proj <- final_proj[-1,]
final_proj <- as.data.frame(final_proj)
melt_plot_dat <- reshape2::melt(final_proj,id="X1")
ggplot(melt_plot_dat,aes(x=X1,y=value)) +
geom_bar(stat='identity') +
facet_wrap(~variable) +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
scale_x_discrete(limits=final_proj$X1) +
labs(x="",y="Record",title="Simulated Records",caption="@msubbaiah1") +
scale_y_continuous(labels = scales::percent)
|
/Commish/final_proj_plots.R
|
no_license
|
meysubb/Fantasy_Football_League
|
R
| false
| false
| 650
|
r
|
library(tidyverse)
final_proj <- read_csv("Final-Projections.csv")
final_proj$X1 <- factor(final_proj$X1,levels = unique(final_proj$X1))
#colnames(final_proj) <- final_proj[1,]
#final_proj <- final_proj[-1,]
final_proj <- as.data.frame(final_proj)
melt_plot_dat <- reshape2::melt(final_proj,id="X1")
ggplot(melt_plot_dat,aes(x=X1,y=value)) +
geom_bar(stat='identity') +
facet_wrap(~variable) +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
scale_x_discrete(limits=final_proj$X1) +
labs(x="",y="Record",title="Simulated Records",caption="@msubbaiah1") +
scale_y_continuous(labels = scales::percent)
|
\name{Column and row-wise medians}
\alias{colMedians}
\alias{rowMedians}
\title{
Column and row-wise medians
}
\description{
Column and row-wise medians of a matrix.
}
\usage{
colMedians(x,na.rm = FALSE, parallel = FALSE)
rowMedians(x,na.rm = FALSE, parallel = FALSE)
}
\arguments{
\item{x}{
A matrix or data.frame with the data.
}
\item{parallel}{
Do you want to do it in parallel in C++? TRUE or FALSE.
}
\item{na.rm}{
TRUE or FAlSE for remove NAs if exists.
}
}
\details{
The functions is written in C++ in order to be as fast as possible.
}
\value{
A vector with the column medians.
}
%\references{
%Tsagris M.T., Preston S. and Wood A.T.A. (2011). A data-based power transformation for compositional data. In Proceedings of the 4th Compositional Data Analysis Workshop, Girona, Spain.
%}
\author{
R implementation and documentation: Manos Papadakis <papadakm95@gmail.com>.
}
\seealso{
\code{\link{Median}, \link{colVars}, \link{colMeans} (buit-in R function)
}
}
\examples{
x <- matrix( rnorm(100 * 100), ncol = 100 )
a <- apply(x, 2, median)
b1 <- colMedians(x)
all.equal(as.vector(a), b1)
x<-a<-b1<-NULL
}
\keyword{ Column-wise medians }
\keyword{ Row-wise medians }
|
/man/colMedians.Rd
|
no_license
|
cran/Rfast
|
R
| false
| false
| 1,249
|
rd
|
\name{Column and row-wise medians}
\alias{colMedians}
\alias{rowMedians}
\title{
Column and row-wise medians
}
\description{
Column and row-wise medians of a matrix.
}
\usage{
colMedians(x,na.rm = FALSE, parallel = FALSE)
rowMedians(x,na.rm = FALSE, parallel = FALSE)
}
\arguments{
\item{x}{
A matrix or data.frame with the data.
}
\item{parallel}{
Do you want to do it in parallel in C++? TRUE or FALSE.
}
\item{na.rm}{
TRUE or FAlSE for remove NAs if exists.
}
}
\details{
The functions is written in C++ in order to be as fast as possible.
}
\value{
A vector with the column medians.
}
%\references{
%Tsagris M.T., Preston S. and Wood A.T.A. (2011). A data-based power transformation for compositional data. In Proceedings of the 4th Compositional Data Analysis Workshop, Girona, Spain.
%}
\author{
R implementation and documentation: Manos Papadakis <papadakm95@gmail.com>.
}
\seealso{
\code{\link{Median}, \link{colVars}, \link{colMeans} (buit-in R function)
}
}
\examples{
x <- matrix( rnorm(100 * 100), ncol = 100 )
a <- apply(x, 2, median)
b1 <- colMedians(x)
all.equal(as.vector(a), b1)
x<-a<-b1<-NULL
}
\keyword{ Column-wise medians }
\keyword{ Row-wise medians }
|
\name{usquakeLR}
\alias{usquakeLR}
\title{California earthquake loss ratios}
\docType{data}
\description{
Loss ratios for earthquake insurance in California
between 1971 and 1994.
}
\usage{
data(usquakeLR)
}
\format{
\code{usquakeLR} is a data frame of 2 columns and 24 rows:
\describe{
\item{\code{Year}}{Year of the earthquake.}
\item{\code{LossRatio}}{Loss ratio.}
}
}
\references{
Dataset used
in Jaffee and Russell (1996),
\emph{Catastrophe Insurance, Capital Markets and Uninsurable Risks},
Philadelphia: Financial Institutions Center, The Wharton School, p. 96-112.
and
in Embrechts, Resnick and Samorodnitsky (1999).
\emph{Extreme Value Theory as a Risk Management Tool},
North American Actuarial Journal, Volume 3, Number 2.
}
\examples{
# (1) load of data
#
data(usquakeLR)
# (2) plot log scale
#
plot(usquakeLR$Year, usquakeLR$LossRatio+1e-3,
ylim=c(1e-3, 1e4), log="y", ylab="Loss Ratio", xlab="Year")
}
\keyword{datasets}
|
/pkg/man/usearthquake.Rd
|
no_license
|
TonyWU-git/CASdatasets
|
R
| false
| false
| 962
|
rd
|
\name{usquakeLR}
\alias{usquakeLR}
\title{California earthquake loss ratios}
\docType{data}
\description{
Loss ratios for earthquake insurance in California
between 1971 and 1994.
}
\usage{
data(usquakeLR)
}
\format{
\code{usquakeLR} is a data frame of 2 columns and 24 rows:
\describe{
\item{\code{Year}}{Year of the earthquake.}
\item{\code{LossRatio}}{Loss ratio.}
}
}
\references{
Dataset used
in Jaffee and Russell (1996),
\emph{Catastrophe Insurance, Capital Markets and Uninsurable Risks},
Philadelphia: Financial Institutions Center, The Wharton School, p. 96-112.
and
in Embrechts, Resnick and Samorodnitsky (1999).
\emph{Extreme Value Theory as a Risk Management Tool},
North American Actuarial Journal, Volume 3, Number 2.
}
\examples{
# (1) load of data
#
data(usquakeLR)
# (2) plot log scale
#
plot(usquakeLR$Year, usquakeLR$LossRatio+1e-3,
ylim=c(1e-3, 1e4), log="y", ylab="Loss Ratio", xlab="Year")
}
\keyword{datasets}
|
# Load Claddis library:
library(Claddis)
# Set working directory:
#setwd("~/Documents/Homepage/www.graemetlloyd.com")
setwd("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/")
# Get file list:
file.list <- list.files()
# Get just the group matrix pages:
file.list <- file.list[grep("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/Nexus file", file.list)]
# Vector for storing output:
results <- vector(mode = "character")
# Main loop:
for(i in 1:length(file.list)) {
# Read in ith file:
X <- scan(file.list[i], what = "", sep = "\n", quiet = TRUE)
# Find first p tag opening:
begins <- grep("<p class=\"hangingindent\">", X)
# FInd last p tag closing:
ends <- grep("</p>", X)
# Reduce X to just the portion with references:
X <- X[begins[1]:ends[length(ends)]]
# Find where p tags open:
begins <- grep("<p class=\"hangingindent\">", X)
# Find where p tags close:
ends <- grep("</p>", X)
# Check p tags are closed and warn if not:
if(length(begins) != length(ends)) print(paste("Error in", file.list[i]))
# For each set of p tags:
for(j in 1:length(ends)) {
# Get full reference block:
Y <- X[begins[j]:ends[j]]
# Only proceed if this has not already been dealt with:
if(length(grep("<a href", Y)) == 0) {
# Remove bookmarks:
Y <- gsub("</p>", "", gsub("<p class=\"hangingindent\">", "", Y))
# Strip out leading whitespace:
while(length(grep("\t", Y)) > 0) Y <- gsub("\t", " ", Y)
# Strip out leading whitespace:
while(length(grep(" ", Y)) > 0) Y <- gsub(" ", " ", Y)
# Strip out last leading whitespace:
for(k in 1:length(Y)) Y[k] <- paste(strsplit(Y[k], "")[[1]][2:length(strsplit(Y[k], "")[[1]])], collapse = "")
# Isolate author and year:
authorandyear <- strsplit(gsub(" and ", "%%", gsub("\\., ", ".%%", Y[1])), "%%")[[1]]
# Isolate title:
title <- Y[2]
#
locale <- gsub("</b>", "", gsub("<b>", "", gsub("</em>", "", gsub("<em>", "", strsplit(gsub("\\.", "", gsub(", ", "%%", Y[3])), "%%")[[1]]))))
#
authorline <- paste("\t\t<Author>\n", paste("\t\t\t<List>", authorandyear[1:(length(authorandyear) - 1)], "</List>", sep = "", collapse = "\n"), "\n\t\t</Author>\n", sep = "")
#
yearline <- paste("\t\t<Year>", gsub("\\.", "", authorandyear[length(authorandyear)]), "</Year>\n", sep = "")
#
year <- gsub("</Year>\n", "", gsub("\t\t<Year>", "", yearline))
#
titleline <- strsplit(title, "")[[1]]
#
if(titleline[length(titleline)] == ".") titleline <- titleline[-length(titleline)]
#
titleline <- paste(titleline, collapse = "")
#
titleline <- paste("\t\t<Title>", titleline, "</Title>\n", sep = "")
# Case if a book chapter:
if(length(grep("In ", locale[1])) == 1) {
# Restore locale to original line:
locale <- Y[3]
#
locale <- gsub("<em>In</em> ", "", locale)
# Insert first (editor(s)) separator:
locale <- gsub(" \\(eds\\.\\) ", "%%", locale)
# Insert first (editor(s)) separator:
locale <- gsub(" \\(ed\\.\\) ", "%%", locale)
# Insert first (editor(s)) separator:
locale <- gsub(" \\(eds\\) ", "%%", locale)
# Insert first (editor(s)) separator:
locale <- gsub(" \\(ed\\) ", "%%", locale)
# Isolate editors
editors <- strsplit(locale, "%%")[[1]][1]
# Add "and" separator:
editors <- gsub(" and ", "%%", editors)
#
if(length(grep(",", editors)) > 0) {
# Case if single editor in correct "Surname, Initials" format:
if(length(grep("%%", editors)) == 0) editorsline <- paste("\t\t<Editor>\n", paste("\t\t\t<List>", editors, "</List>\n", sep = ""), "\t\t</Editor>\n", sep = "")
# Case if authors are in incorrect "Intitals Surname" format:
if(strsplit(editors, "")[[1]][2] == ".") {
# Add separator between names:
editors <- gsub(", ", "%%", editors)
#
editors <- strsplit(editors, "%%")[[1]]
#
for(k in 1:length(editors)) {
#
temp <- strsplit(editors[k], "\\. ")[[1]]
#
editors[k] <- paste(temp[length(temp)], paste(temp[1:(length(temp) - 1)], ".", sep = "", collapse = " "), sep = ", ")
}
#
editorsline <- paste("\t\t<Editor>\n", paste("\t\t\t<List>", editors, "</List>\n", sep = "", collapse = ""), "\t\t</Editor>\n", sep = "")
#
} else {
# Add separator between names:
editors <- gsub("\\., ", ".%%", editors)
#
editorsline <- paste("\t\t<Editor>\n", paste("\t\t\t<List>", strsplit(editors, "%%")[[1]], "</List>\n", sep = "", collapse = ""), "\t\t</Editor>\n", sep = "")
}
#
} else {
# Case if single editor in incorrect "Intitals Surname" format:
if(length(grep("%%",editors)) == 0) {
#
editors <- strsplit(editors, "\\. ")[[1]]
#
editors <- paste(paste(editors[length(editors)], ",", sep = ""), paste(editors[1:(length(editors) - 1)], ".", sep = "", collapse = " "), collapse = " ")
#
editorsline <- paste("\t\t<Editor>\n", paste("\t\t\t<List>", editors, "</List>\n", sep = ""), "\t\t</Editor>\n", sep = "")
# Case of two authors in incorrect "Intitals Surname" format:
} else {
#
editors <- strsplit(editors, "%%")[[1]]
#
for(k in 1:length(editors)) {
#
temp <- strsplit(editors[k], "\\. ")[[1]]
#
editors[k] <- paste(temp[length(temp)], paste(temp[1:(length(temp) - 1)], ".", sep = "", collapse = " "), sep = ", ")
}
#
editorsline <- paste("\t\t<Editor>\n", paste("\t\t\t<List>", editors, "</List>\n", sep = "", collapse = ""), "\t\t</Editor>\n", sep = "")
}
}
# Remove editors from rest of book information:
locale <- paste(strsplit(locale, "%%")[[1]][2:length(strsplit(locale, "%%")[[1]])], sep = "%%")
# Find end of book title separator:
locale <- gsub("\\. ", "%%", locale)
# Remove trailing period:
locale <- gsub("\\.", "", locale)
# Isolate booktitle:
booktitleline <- paste("\t\t<Booktitle>", strsplit(locale, "%%")[[1]][1], "</Booktitle>\n", sep = "")
# Remove booktitle from rest of book information:
locale <- paste(strsplit(locale, "%%")[[1]][2:length(strsplit(locale, "%%")[[1]])], sep = "%%")
# Remove false gaps:
while(length(locale) > 1) locale <- paste(locale, collapse = ". ")
# Separate remaining portions:
locale <- strsplit(locale, ", ")[[1]]
#
publisherline <- paste("\t\t<Publisher>", locale[1], "</Publisher>\n", sep = "")
#
cityline <- paste("\t\t<City>", locale[2], "</City>\n", sep = "")
#
pagesline <- paste("\t\t<Pages>", gsub("<br>", "", gsub("p", "", locale[3])), "</Pages>\n", sep = "")
#
fulllines <- paste(authorline, yearline, titleline, "\t\t<Journal/>\n", "\t\t<Volume/>\n", pagesline, booktitleline, publisherline, cityline, editorsline, sep = "")
# Case if a journal:
} else {
#
if(year == "in press") {
# Case if journal title with commas:
if(length(locale) > 2) {
# Collapse journal title:
locale[1] <- paste(locale[1], locale[2], sep = ", ")
# Remove redudnant second part
locale <- locale[-2]
}
# Delete empty volume value
if(locale[2] == "") locale <- locale[-2]
}
# Find journal titles with commas:
while(length(locale) > 3) {
# Collapse journal title:
locale[1] <- paste(locale[1], locale[2], sep = ", ")
# Remove redudnant second part:
locale <- locale[-2]
}
#
journalline <- paste("\t\t<Journal>", locale[1], "</Journal>\n", sep = "")
#
if(length(locale) > 1) {
#
volumeline <- paste("\t\t<Volume>", locale[2], "</Volume>\n", sep = "")
#
} else {
#
volumeline <- "\t\t<Volume/>\n"
}
#
if(length(locale) > 2) {
#
pagesline <- paste("\t\t<Pages>", locale[3], "</Pages>\n", sep = "")
#
} else {
#
pagesline <- "\t\t<Pages/>\n"
}
#
fulllines <- paste(authorline, yearline, titleline, journalline, volumeline, pagesline, "\t\t<Booktitle/>\n", "\t\t<Publisher/>\n", "\t\t<City/>\n","\t\t<Editor/>\n", sep = "")
}
}
#
results <- c(results, fulllines)
}
}
# Collapse to just unique references (not sure how duplicates ended up in here...):
results <- sort(unique(results))
# Create empty vector to store hypothetical file names:
filenames <- vector(mode = "character")
# For each reference:
for(i in 1:length(results)) {
# Isolate authors:
authors <- strsplit(strsplit(gsub("\n|\t", "", results[i]), split = "<Author>|</Author>")[[1]][2], split = "<List>|</List>")[[1]][which(nchar(strsplit(strsplit(gsub("\n|\t", "", results[i]), split = "<Author>|</Author>")[[1]][2], split = "<List>|</List>")[[1]]) > 0)]
# Isolate surnames:
surnames <- unlist(lapply(strsplit(authors, split = ","), '[', 1))
# Get publication year:
year <- gsub(" ", "", strsplit(gsub("\n|\t", "", results[i]), split = "<Year>|</Year>")[[1]][2])
# If a single author:
if(length(surnames) == 1) filenames <- c(filenames, gsub("'", "", gsub(" ", "_", paste(surnames, year, sep = "_"))))
# If two authors:
if(length(surnames) == 2) filenames <- c(filenames, gsub("'", "", gsub(" ", "_", paste(paste(surnames, collapse = "_et_"), year, sep = "_"))))
# If more than two authors:
if(length(surnames) > 2) filenames <- c(filenames, gsub("'", "", gsub(" ", "_", paste(surnames[1], "etal", year, sep = "_"))))
}
# Isolate references that have multiple file names (i.e., two or more refrences could be contracted to the same name):
duplicates <- unique(filenames[duplicated(filenames)])
# Set working directory:
setwd("/Users/eargtl/Documents/Homepage/www.graemetlloyd.com/ToAdd")
# Get list of folders:
folder.list <- list.files()[-grep("\\.", list.files())]
# Get full paths for each folder:
for(i in 1:length(folder.list)) folder.list[i] <- paste(getwd(), "/", folder.list[i], sep = "")
###########
# Vector for storing nexus file list:
file.list <- vector(mode = "character")
# Find all file paths for nexus files:
for(i in 1:length(folder.list)) {
# Set working directory for current folder:
setwd(folder.list[i])
# Look for NEXUS files:
if(length(grep(".nex", list.files())) > 0) {
# Add any found to file list:
file.list <- c(file.list, paste(folder.list[i], "/", list.files()[grep(".nex", list.files())], sep = ""))
}
}
#########
# Load Claddis library:
library(Claddis)
# Set working directory:
#setwd("~/Documents/Homepage/www.graemetlloyd.com")
setwd("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/")
# Get file list:
file.list <- list.files()
# Get just the group matrix pages:
file.list <- file.list[grep("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/Nexus file", file.list)]
# Get just the NEXUS file names:
nexus.files <- unlist(lapply(strsplit(file.list, "/"), '[', 9))
# Reset working directory:
#setwd("/Users/eargtl/Documents/Homepage/www.graemetlloyd.com/ToAdd")
#######IDK if this will work corrently, trying to relace line 365####
#file.list <- list.files("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/Nexus files")
#file.list <-list.files("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/Nexus files", full.names = TRUE)
file.list <-list.files("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/nexus 4", full.names = TRUE)
#######STILL NEED NEXUS.FILES#### DON'T KNOW WHat the difference is between file.list and nexus.files
#nexus.files <- list.files("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/Nexus files")
nexus.files <- list.files("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/nexus 4")
#######STILL NEED filenames####
filenames <- vector(mode = "character")
# Create vector to store multiple hits:
multi_hitters <- vector(mode = "character")
# Set scratch counter:
scratch_counter <- 1
# Create nexus, tnt and xml files:
for(i in 1:length(file.list)) {
# Start feedback:
cat("Attempting to read: ", file.list[i], "...")
# Get stripped verion of name (i.e., missing a, b, aa etc. ending):
stripped_name <- gsub(strsplit(nexus.files[i], "[:0-9:]{4}|inpress")[[1]][2], "", nexus.files[i])
# Get hits for stripped name in filenames:
## hits <- grep(stripped_name, filenames)
# Check there is a match:
## if(length(hits) == 0) stop("No reference with matching name.")
# Create reference info:
## reference_info <- paste(results[hits], collapse = "\n\nOR\n\n")
# If multiple hits add to list so these can be manually checked later:
## if(length(hits) > 1) multi_hitters <- c(multi_hitters, nexus.files[i])
# Read in matrix:
mymatrix <- read_nexus_matrix(file.list[i])
# Update header text:
#?#mymatrix$Topper$Header <- "File downloaded from graemetlloyd.com"
# Make file name:
file.name <- gsub(".nex", "", strsplit(file.list[i], "/")[[1]][length(strsplit(file.list[i], "/")[[1]])])
# Write out NEXUS data:
#WriteMorphNexus(CladisticMatrix = mymatrix, filename = paste("/Users/eargtl/Documents/Homepage/www.graemetlloyd.com/nexus", "/", file.name, ".nex", sep = ""))
### write_nexus_matrix(CladisticMatrix = mymatrix, filename = paste("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/nexus1", "/", file.name, ".nex", sep = ""))
#?#write_nexus_matrix(mymatrix, paste("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/nexus1", "/", file.name, ".nex", sep = ""))
# Write out TNT data:
#WriteMorphTNT(CladisticMatrix = mymatrix, filename = paste("/Users/eargtl/Documents/Homepage/www.graemetlloyd.com/tnt", "/", file.name, ".tnt", sep = ""))
###write_tnt_matrix(CladisticMatrix = mymatrix, filename = paste("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/tnt", "/", file.name, ".tnt", sep = ""))
write_tnt_matrix(mymatrix, paste("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/tnt", "/", file.name, ".tnt", sep = ""))
# Write out TNT for analysis:
#write_tnt_matrix(CladisticMatrix = mymatrix, filename = paste("/Users/spencerhellert", "/", file.name, ".tnt", sep = ""), add.analysis.block = TRUE)
write_tnt_matrix(mymatrix, paste("/Users/spencerhellert", "/", file.name, ".tnt", sep = ""), add_analysis_block = TRUE)
TNTFA <- readLines(paste("/Users/spencerhellert", "/", file.name, ".tnt", sep = ""))
# If scratch.tre is found:
if(length(grep("scratch.tre", TNTFA, fixed = TRUE)) > 0) {
# Replace scratch.tre with numbered version:
TNTFA <- gsub("scratch.tre", paste("scratch", scratch_counter, ".tre", sep = ""), TNTFA, fixed = TRUE)
# Overwrite TNT for analysis with numbered scratch.tre:
write(TNTFA, paste("/Users/spencerhellert", "/", file.name, ".tnt", sep = ""))
# Increment scratch counter:
scratch_counter <- scratch_counter + 1
}
# Make XML file:
## myxml <- paste(paste("<?xml version=\"1.0\" standalone=\"yes\"?>\n<SourceTree>\n\t<Source>\n", reference_info, "\t</Source>"), paste("\t<Taxa number=\"", length(mymatrix$Matrix_1$Matrix[, 1]), "\">", sep = ""), paste(paste("\t\t<List recon_name=\"DELETE\" recon_no=\"-1\">", rownames(mymatrix$Matrix_1$Matrix), "</List>", sep = ""), collapse = "\n"), "\t</Taxa>\n\t<Characters>\n\t\t<Molecular/>", paste("\t\t<Morphological number=\"", sum(unlist(lapply(lapply(mymatrix[2:length(mymatrix)], '[[', "Matrix"), ncol))), "\">", sep = ""), "\t\t\t<Type>Osteology</Type>\n\t\t</Morphological>\n\t\t<Behavioural/>\n\t\t<Other/>\n\t</Characters>\n\t<Analysis>\n\t\t<Type>Maximum Parsimony</Type>\n\t</Analysis>\n\t<Notes>Based on reanalysis of the original matrix.</Notes>", paste("\t<Filename>", gsub("\\.nex", "", strsplit(file.list[i], "/")[[1]][length(strsplit(file.list[i], "/")[[1]])]), "</Filename>", sep = ""), "\t<Parent/>\n\t<Sibling/>\n</SourceTree>", sep = "\n")
# Write out XML file:
##write(myxml, paste("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/xml1", "/", file.name, ".xml", sep = ""))
# Feedback:
cat("Done\n")
}
# List multiple hitters for checking:
sort(multi_hitters)
|
/RScripts/maketntandnexusandxml.R
|
no_license
|
shellert/MammalJawTree
|
R
| false
| false
| 17,713
|
r
|
# Load Claddis library:
library(Claddis)
# Set working directory:
#setwd("~/Documents/Homepage/www.graemetlloyd.com")
setwd("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/")
# Get file list:
file.list <- list.files()
# Get just the group matrix pages:
file.list <- file.list[grep("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/Nexus file", file.list)]
# Vector for storing output:
results <- vector(mode = "character")
# Main loop:
for(i in 1:length(file.list)) {
# Read in ith file:
X <- scan(file.list[i], what = "", sep = "\n", quiet = TRUE)
# Find first p tag opening:
begins <- grep("<p class=\"hangingindent\">", X)
# FInd last p tag closing:
ends <- grep("</p>", X)
# Reduce X to just the portion with references:
X <- X[begins[1]:ends[length(ends)]]
# Find where p tags open:
begins <- grep("<p class=\"hangingindent\">", X)
# Find where p tags close:
ends <- grep("</p>", X)
# Check p tags are closed and warn if not:
if(length(begins) != length(ends)) print(paste("Error in", file.list[i]))
# For each set of p tags:
for(j in 1:length(ends)) {
# Get full reference block:
Y <- X[begins[j]:ends[j]]
# Only proceed if this has not already been dealt with:
if(length(grep("<a href", Y)) == 0) {
# Remove bookmarks:
Y <- gsub("</p>", "", gsub("<p class=\"hangingindent\">", "", Y))
# Strip out leading whitespace:
while(length(grep("\t", Y)) > 0) Y <- gsub("\t", " ", Y)
# Strip out leading whitespace:
while(length(grep(" ", Y)) > 0) Y <- gsub(" ", " ", Y)
# Strip out last leading whitespace:
for(k in 1:length(Y)) Y[k] <- paste(strsplit(Y[k], "")[[1]][2:length(strsplit(Y[k], "")[[1]])], collapse = "")
# Isolate author and year:
authorandyear <- strsplit(gsub(" and ", "%%", gsub("\\., ", ".%%", Y[1])), "%%")[[1]]
# Isolate title:
title <- Y[2]
#
locale <- gsub("</b>", "", gsub("<b>", "", gsub("</em>", "", gsub("<em>", "", strsplit(gsub("\\.", "", gsub(", ", "%%", Y[3])), "%%")[[1]]))))
#
authorline <- paste("\t\t<Author>\n", paste("\t\t\t<List>", authorandyear[1:(length(authorandyear) - 1)], "</List>", sep = "", collapse = "\n"), "\n\t\t</Author>\n", sep = "")
#
yearline <- paste("\t\t<Year>", gsub("\\.", "", authorandyear[length(authorandyear)]), "</Year>\n", sep = "")
#
year <- gsub("</Year>\n", "", gsub("\t\t<Year>", "", yearline))
#
titleline <- strsplit(title, "")[[1]]
#
if(titleline[length(titleline)] == ".") titleline <- titleline[-length(titleline)]
#
titleline <- paste(titleline, collapse = "")
#
titleline <- paste("\t\t<Title>", titleline, "</Title>\n", sep = "")
# Case if a book chapter:
if(length(grep("In ", locale[1])) == 1) {
# Restore locale to original line:
locale <- Y[3]
#
locale <- gsub("<em>In</em> ", "", locale)
# Insert first (editor(s)) separator:
locale <- gsub(" \\(eds\\.\\) ", "%%", locale)
# Insert first (editor(s)) separator:
locale <- gsub(" \\(ed\\.\\) ", "%%", locale)
# Insert first (editor(s)) separator:
locale <- gsub(" \\(eds\\) ", "%%", locale)
# Insert first (editor(s)) separator:
locale <- gsub(" \\(ed\\) ", "%%", locale)
# Isolate editors
editors <- strsplit(locale, "%%")[[1]][1]
# Add "and" separator:
editors <- gsub(" and ", "%%", editors)
#
if(length(grep(",", editors)) > 0) {
# Case if single editor in correct "Surname, Initials" format:
if(length(grep("%%", editors)) == 0) editorsline <- paste("\t\t<Editor>\n", paste("\t\t\t<List>", editors, "</List>\n", sep = ""), "\t\t</Editor>\n", sep = "")
# Case if authors are in incorrect "Intitals Surname" format:
if(strsplit(editors, "")[[1]][2] == ".") {
# Add separator between names:
editors <- gsub(", ", "%%", editors)
#
editors <- strsplit(editors, "%%")[[1]]
#
for(k in 1:length(editors)) {
#
temp <- strsplit(editors[k], "\\. ")[[1]]
#
editors[k] <- paste(temp[length(temp)], paste(temp[1:(length(temp) - 1)], ".", sep = "", collapse = " "), sep = ", ")
}
#
editorsline <- paste("\t\t<Editor>\n", paste("\t\t\t<List>", editors, "</List>\n", sep = "", collapse = ""), "\t\t</Editor>\n", sep = "")
#
} else {
# Add separator between names:
editors <- gsub("\\., ", ".%%", editors)
#
editorsline <- paste("\t\t<Editor>\n", paste("\t\t\t<List>", strsplit(editors, "%%")[[1]], "</List>\n", sep = "", collapse = ""), "\t\t</Editor>\n", sep = "")
}
#
} else {
# Case if single editor in incorrect "Intitals Surname" format:
if(length(grep("%%",editors)) == 0) {
#
editors <- strsplit(editors, "\\. ")[[1]]
#
editors <- paste(paste(editors[length(editors)], ",", sep = ""), paste(editors[1:(length(editors) - 1)], ".", sep = "", collapse = " "), collapse = " ")
#
editorsline <- paste("\t\t<Editor>\n", paste("\t\t\t<List>", editors, "</List>\n", sep = ""), "\t\t</Editor>\n", sep = "")
# Case of two authors in incorrect "Intitals Surname" format:
} else {
#
editors <- strsplit(editors, "%%")[[1]]
#
for(k in 1:length(editors)) {
#
temp <- strsplit(editors[k], "\\. ")[[1]]
#
editors[k] <- paste(temp[length(temp)], paste(temp[1:(length(temp) - 1)], ".", sep = "", collapse = " "), sep = ", ")
}
#
editorsline <- paste("\t\t<Editor>\n", paste("\t\t\t<List>", editors, "</List>\n", sep = "", collapse = ""), "\t\t</Editor>\n", sep = "")
}
}
# Remove editors from rest of book information:
locale <- paste(strsplit(locale, "%%")[[1]][2:length(strsplit(locale, "%%")[[1]])], sep = "%%")
# Find end of book title separator:
locale <- gsub("\\. ", "%%", locale)
# Remove trailing period:
locale <- gsub("\\.", "", locale)
# Isolate booktitle:
booktitleline <- paste("\t\t<Booktitle>", strsplit(locale, "%%")[[1]][1], "</Booktitle>\n", sep = "")
# Remove booktitle from rest of book information:
locale <- paste(strsplit(locale, "%%")[[1]][2:length(strsplit(locale, "%%")[[1]])], sep = "%%")
# Remove false gaps:
while(length(locale) > 1) locale <- paste(locale, collapse = ". ")
# Separate remaining portions:
locale <- strsplit(locale, ", ")[[1]]
#
publisherline <- paste("\t\t<Publisher>", locale[1], "</Publisher>\n", sep = "")
#
cityline <- paste("\t\t<City>", locale[2], "</City>\n", sep = "")
#
pagesline <- paste("\t\t<Pages>", gsub("<br>", "", gsub("p", "", locale[3])), "</Pages>\n", sep = "")
#
fulllines <- paste(authorline, yearline, titleline, "\t\t<Journal/>\n", "\t\t<Volume/>\n", pagesline, booktitleline, publisherline, cityline, editorsline, sep = "")
# Case if a journal:
} else {
#
if(year == "in press") {
# Case if journal title with commas:
if(length(locale) > 2) {
# Collapse journal title:
locale[1] <- paste(locale[1], locale[2], sep = ", ")
# Remove redudnant second part
locale <- locale[-2]
}
# Delete empty volume value
if(locale[2] == "") locale <- locale[-2]
}
# Find journal titles with commas:
while(length(locale) > 3) {
# Collapse journal title:
locale[1] <- paste(locale[1], locale[2], sep = ", ")
# Remove redudnant second part:
locale <- locale[-2]
}
#
journalline <- paste("\t\t<Journal>", locale[1], "</Journal>\n", sep = "")
#
if(length(locale) > 1) {
#
volumeline <- paste("\t\t<Volume>", locale[2], "</Volume>\n", sep = "")
#
} else {
#
volumeline <- "\t\t<Volume/>\n"
}
#
if(length(locale) > 2) {
#
pagesline <- paste("\t\t<Pages>", locale[3], "</Pages>\n", sep = "")
#
} else {
#
pagesline <- "\t\t<Pages/>\n"
}
#
fulllines <- paste(authorline, yearline, titleline, journalline, volumeline, pagesline, "\t\t<Booktitle/>\n", "\t\t<Publisher/>\n", "\t\t<City/>\n","\t\t<Editor/>\n", sep = "")
}
}
#
results <- c(results, fulllines)
}
}
# Collapse to just unique references (not sure how duplicates ended up in here...):
results <- sort(unique(results))
# Create empty vector to store hypothetical file names:
filenames <- vector(mode = "character")
# For each reference:
for(i in 1:length(results)) {
# Isolate authors:
authors <- strsplit(strsplit(gsub("\n|\t", "", results[i]), split = "<Author>|</Author>")[[1]][2], split = "<List>|</List>")[[1]][which(nchar(strsplit(strsplit(gsub("\n|\t", "", results[i]), split = "<Author>|</Author>")[[1]][2], split = "<List>|</List>")[[1]]) > 0)]
# Isolate surnames:
surnames <- unlist(lapply(strsplit(authors, split = ","), '[', 1))
# Get publication year:
year <- gsub(" ", "", strsplit(gsub("\n|\t", "", results[i]), split = "<Year>|</Year>")[[1]][2])
# If a single author:
if(length(surnames) == 1) filenames <- c(filenames, gsub("'", "", gsub(" ", "_", paste(surnames, year, sep = "_"))))
# If two authors:
if(length(surnames) == 2) filenames <- c(filenames, gsub("'", "", gsub(" ", "_", paste(paste(surnames, collapse = "_et_"), year, sep = "_"))))
# If more than two authors:
if(length(surnames) > 2) filenames <- c(filenames, gsub("'", "", gsub(" ", "_", paste(surnames[1], "etal", year, sep = "_"))))
}
# Isolate references that have multiple file names (i.e., two or more refrences could be contracted to the same name):
duplicates <- unique(filenames[duplicated(filenames)])
# Set working directory:
setwd("/Users/eargtl/Documents/Homepage/www.graemetlloyd.com/ToAdd")
# Get list of folders:
folder.list <- list.files()[-grep("\\.", list.files())]
# Get full paths for each folder:
for(i in 1:length(folder.list)) folder.list[i] <- paste(getwd(), "/", folder.list[i], sep = "")
###########
# Vector for storing nexus file list:
file.list <- vector(mode = "character")
# Find all file paths for nexus files:
for(i in 1:length(folder.list)) {
# Set working directory for current folder:
setwd(folder.list[i])
# Look for NEXUS files:
if(length(grep(".nex", list.files())) > 0) {
# Add any found to file list:
file.list <- c(file.list, paste(folder.list[i], "/", list.files()[grep(".nex", list.files())], sep = ""))
}
}
#########
# Load Claddis library:
library(Claddis)
# Set working directory:
#setwd("~/Documents/Homepage/www.graemetlloyd.com")
setwd("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/")
# Get file list:
file.list <- list.files()
# Get just the group matrix pages:
file.list <- file.list[grep("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/Nexus file", file.list)]
# Get just the NEXUS file names:
nexus.files <- unlist(lapply(strsplit(file.list, "/"), '[', 9))
# Reset working directory:
#setwd("/Users/eargtl/Documents/Homepage/www.graemetlloyd.com/ToAdd")
#######IDK if this will work corrently, trying to relace line 365####
#file.list <- list.files("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/Nexus files")
#file.list <-list.files("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/Nexus files", full.names = TRUE)
file.list <-list.files("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/nexus 4", full.names = TRUE)
#######STILL NEED NEXUS.FILES#### DON'T KNOW WHat the difference is between file.list and nexus.files
#nexus.files <- list.files("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/Nexus files")
nexus.files <- list.files("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/nexus 4")
#######STILL NEED filenames####
filenames <- vector(mode = "character")
# Create vector to store multiple hits:
multi_hitters <- vector(mode = "character")
# Set scratch counter:
scratch_counter <- 1
# Create nexus, tnt and xml files:
for(i in 1:length(file.list)) {
# Start feedback:
cat("Attempting to read: ", file.list[i], "...")
# Get stripped verion of name (i.e., missing a, b, aa etc. ending):
stripped_name <- gsub(strsplit(nexus.files[i], "[:0-9:]{4}|inpress")[[1]][2], "", nexus.files[i])
# Get hits for stripped name in filenames:
## hits <- grep(stripped_name, filenames)
# Check there is a match:
## if(length(hits) == 0) stop("No reference with matching name.")
# Create reference info:
## reference_info <- paste(results[hits], collapse = "\n\nOR\n\n")
# If multiple hits add to list so these can be manually checked later:
## if(length(hits) > 1) multi_hitters <- c(multi_hitters, nexus.files[i])
# Read in matrix:
mymatrix <- read_nexus_matrix(file.list[i])
# Update header text:
#?#mymatrix$Topper$Header <- "File downloaded from graemetlloyd.com"
# Make file name:
file.name <- gsub(".nex", "", strsplit(file.list[i], "/")[[1]][length(strsplit(file.list[i], "/")[[1]])])
# Write out NEXUS data:
#WriteMorphNexus(CladisticMatrix = mymatrix, filename = paste("/Users/eargtl/Documents/Homepage/www.graemetlloyd.com/nexus", "/", file.name, ".nex", sep = ""))
### write_nexus_matrix(CladisticMatrix = mymatrix, filename = paste("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/nexus1", "/", file.name, ".nex", sep = ""))
#?#write_nexus_matrix(mymatrix, paste("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/nexus1", "/", file.name, ".nex", sep = ""))
# Write out TNT data:
#WriteMorphTNT(CladisticMatrix = mymatrix, filename = paste("/Users/eargtl/Documents/Homepage/www.graemetlloyd.com/tnt", "/", file.name, ".tnt", sep = ""))
###write_tnt_matrix(CladisticMatrix = mymatrix, filename = paste("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/tnt", "/", file.name, ".tnt", sep = ""))
write_tnt_matrix(mymatrix, paste("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/tnt", "/", file.name, ".tnt", sep = ""))
# Write out TNT for analysis:
#write_tnt_matrix(CladisticMatrix = mymatrix, filename = paste("/Users/spencerhellert", "/", file.name, ".tnt", sep = ""), add.analysis.block = TRUE)
write_tnt_matrix(mymatrix, paste("/Users/spencerhellert", "/", file.name, ".tnt", sep = ""), add_analysis_block = TRUE)
TNTFA <- readLines(paste("/Users/spencerhellert", "/", file.name, ".tnt", sep = ""))
# If scratch.tre is found:
if(length(grep("scratch.tre", TNTFA, fixed = TRUE)) > 0) {
# Replace scratch.tre with numbered version:
TNTFA <- gsub("scratch.tre", paste("scratch", scratch_counter, ".tre", sep = ""), TNTFA, fixed = TRUE)
# Overwrite TNT for analysis with numbered scratch.tre:
write(TNTFA, paste("/Users/spencerhellert", "/", file.name, ".tnt", sep = ""))
# Increment scratch counter:
scratch_counter <- scratch_counter + 1
}
# Make XML file:
## myxml <- paste(paste("<?xml version=\"1.0\" standalone=\"yes\"?>\n<SourceTree>\n\t<Source>\n", reference_info, "\t</Source>"), paste("\t<Taxa number=\"", length(mymatrix$Matrix_1$Matrix[, 1]), "\">", sep = ""), paste(paste("\t\t<List recon_name=\"DELETE\" recon_no=\"-1\">", rownames(mymatrix$Matrix_1$Matrix), "</List>", sep = ""), collapse = "\n"), "\t</Taxa>\n\t<Characters>\n\t\t<Molecular/>", paste("\t\t<Morphological number=\"", sum(unlist(lapply(lapply(mymatrix[2:length(mymatrix)], '[[', "Matrix"), ncol))), "\">", sep = ""), "\t\t\t<Type>Osteology</Type>\n\t\t</Morphological>\n\t\t<Behavioural/>\n\t\t<Other/>\n\t</Characters>\n\t<Analysis>\n\t\t<Type>Maximum Parsimony</Type>\n\t</Analysis>\n\t<Notes>Based on reanalysis of the original matrix.</Notes>", paste("\t<Filename>", gsub("\\.nex", "", strsplit(file.list[i], "/")[[1]][length(strsplit(file.list[i], "/")[[1]])]), "</Filename>", sep = ""), "\t<Parent/>\n\t<Sibling/>\n</SourceTree>", sep = "\n")
# Write out XML file:
##write(myxml, paste("~/Desktop/Desktop_Spencers_MacBook_Pro_2/Grossnickle Tree/xml1", "/", file.name, ".xml", sep = ""))
# Feedback:
cat("Done\n")
}
# List multiple hitters for checking:
sort(multi_hitters)
|
### soil texture TERN
## spatial prediction: step 3: untangle compositional data
## modified: 26/3/20
## Finished:
### variables
vart<- "clay"
depth<- "d1"
batch<- 1
srt<- 1
fin<- 500
## libraries
library(parallel);library(sp);library(rgdal);library(doParallel);library(raster);library(compositions)
# root directories
root.tiles<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soiltexture/predictions/tiles/"
root.slurm<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soiltexture/rcode/digitalsoilmapping/spatialprediction/slurm/"
### Folders where the predictions are
fols<- as.numeric(list.files(root.tiles, full.names = FALSE))
length(fols)
###
# begin parallel cluster and register it with foreach
cpus<- 8
cl<- makeCluster(spec=cpus)
# register with foreach
registerDoParallel(cl)
# Apply model to each tile
oper1<- foreach(i=srt:fin, .packages = c("raster", "sp", "rgdal", "compositions")) %dopar% {
# inverse compostional function
f2<- function(x)(c(ilrInv(x)))
#select the folder
sfol<- fols[i]
sfol
nm1<- paste0(root.tiles,sfol)
nm1
## MEAN
## get the predictions (mean)
files<- list.files(path = nm1, pattern= paste0("mean_", depth, ".tif"), full.names=TRUE, recursive = F)
files
#stack rasters
s1<- stack()
for (j in 1:length(files)){
s1<- stack(s1, raster(files[j]))
}
names(s1)
# inverse
s2 <- calc(s1, fun=f2)
# write files to raster
names(s2)<- c("clay", "sand", "silt")
x.name<- c("clay", "sand", "silt")
for (j in 1:nlayers(s2)){
out.name<- paste0(nm1, "/", "pred_",x.name[j], "_compos_mean_", depth, ".tif")
writeRaster(x = s2[[j]], filename = out.name,format = "GTiff", datatype = "FLT4S", overwrite = TRUE )
}
## UPPER PI
## get the predictions (mean)
files<- list.files(path = nm1, pattern= paste0("upPL_", depth, ".tif"), full.names=TRUE, recursive = F)
files
#stack rasters
s1<- stack()
for (j in 1:length(files)){
s1<- stack(s1, raster(files[j]))
}
names(s1)
# inverse
s2 <- calc(s1, fun=f2)
# write files to raster
names(s2)<- c("clay", "sand", "silt")
x.name<- c("clay", "sand", "silt")
for (j in 1:nlayers(s2)){
out.name<- paste0(nm1, "/", "pred_",x.name[j], "_compos_upPL_", depth, ".tif")
writeRaster(x = s2[[j]], filename = out.name,format = "GTiff", datatype = "FLT4S", overwrite = TRUE )
}
## LOWER PI
## get the predictions (mean)
files<- list.files(path = nm1, pattern= paste0("loPL_", depth, ".tif"), full.names=TRUE, recursive = F)
files
#stack rasters
s1<- stack()
for (j in 1:length(files)){
s1<- stack(s1, raster(files[j]))
}
names(s1)
# inverse
s2 <- calc(s1, fun=f2)
# write files to raster
names(s2)<- c("clay", "sand", "silt")
x.name<- c("clay", "sand", "silt")
for (j in 1:nlayers(s2)){
out.name<- paste0(nm1, "/", "pred_",x.name[j], "_compos_loPL_", depth, ".tif")
writeRaster(x = s2[[j]], filename = out.name,format = "GTiff", datatype = "FLT4S", overwrite = TRUE )
}
# slurm sign of life
itOuts<- c(i,as.character(Sys.time()))
nmz<- paste0(root.slurm, vart, "/",depth, "/",batch, "/slurmckeck_", i, ".txt")
write.table(itOuts,
file = nmz,
row.names = F, col.names = F, sep=",")
}
##END
|
/Production/DSM/SoilTexture/digitalsoilmapping/spatialprediction/clay/step3/d1/spatialise_clay_d1_1.R
|
permissive
|
AusSoilsDSM/SLGA
|
R
| false
| false
| 3,325
|
r
|
### soil texture TERN
## spatial prediction: step 3: untangle compositional data
## modified: 26/3/20
## Finished:
### variables
vart<- "clay"
depth<- "d1"
batch<- 1
srt<- 1
fin<- 500
## libraries
library(parallel);library(sp);library(rgdal);library(doParallel);library(raster);library(compositions)
# root directories
root.tiles<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soiltexture/predictions/tiles/"
root.slurm<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soiltexture/rcode/digitalsoilmapping/spatialprediction/slurm/"
### Folders where the predictions are
fols<- as.numeric(list.files(root.tiles, full.names = FALSE))
length(fols)
###
# begin parallel cluster and register it with foreach
cpus<- 8
cl<- makeCluster(spec=cpus)
# register with foreach
registerDoParallel(cl)
# Apply model to each tile
oper1<- foreach(i=srt:fin, .packages = c("raster", "sp", "rgdal", "compositions")) %dopar% {
# inverse compostional function
f2<- function(x)(c(ilrInv(x)))
#select the folder
sfol<- fols[i]
sfol
nm1<- paste0(root.tiles,sfol)
nm1
## MEAN
## get the predictions (mean)
files<- list.files(path = nm1, pattern= paste0("mean_", depth, ".tif"), full.names=TRUE, recursive = F)
files
#stack rasters
s1<- stack()
for (j in 1:length(files)){
s1<- stack(s1, raster(files[j]))
}
names(s1)
# inverse
s2 <- calc(s1, fun=f2)
# write files to raster
names(s2)<- c("clay", "sand", "silt")
x.name<- c("clay", "sand", "silt")
for (j in 1:nlayers(s2)){
out.name<- paste0(nm1, "/", "pred_",x.name[j], "_compos_mean_", depth, ".tif")
writeRaster(x = s2[[j]], filename = out.name,format = "GTiff", datatype = "FLT4S", overwrite = TRUE )
}
## UPPER PI
## get the predictions (mean)
files<- list.files(path = nm1, pattern= paste0("upPL_", depth, ".tif"), full.names=TRUE, recursive = F)
files
#stack rasters
s1<- stack()
for (j in 1:length(files)){
s1<- stack(s1, raster(files[j]))
}
names(s1)
# inverse
s2 <- calc(s1, fun=f2)
# write files to raster
names(s2)<- c("clay", "sand", "silt")
x.name<- c("clay", "sand", "silt")
for (j in 1:nlayers(s2)){
out.name<- paste0(nm1, "/", "pred_",x.name[j], "_compos_upPL_", depth, ".tif")
writeRaster(x = s2[[j]], filename = out.name,format = "GTiff", datatype = "FLT4S", overwrite = TRUE )
}
## LOWER PI
## get the predictions (mean)
files<- list.files(path = nm1, pattern= paste0("loPL_", depth, ".tif"), full.names=TRUE, recursive = F)
files
#stack rasters
s1<- stack()
for (j in 1:length(files)){
s1<- stack(s1, raster(files[j]))
}
names(s1)
# inverse
s2 <- calc(s1, fun=f2)
# write files to raster
names(s2)<- c("clay", "sand", "silt")
x.name<- c("clay", "sand", "silt")
for (j in 1:nlayers(s2)){
out.name<- paste0(nm1, "/", "pred_",x.name[j], "_compos_loPL_", depth, ".tif")
writeRaster(x = s2[[j]], filename = out.name,format = "GTiff", datatype = "FLT4S", overwrite = TRUE )
}
# slurm sign of life
itOuts<- c(i,as.character(Sys.time()))
nmz<- paste0(root.slurm, vart, "/",depth, "/",batch, "/slurmckeck_", i, ".txt")
write.table(itOuts,
file = nmz,
row.names = F, col.names = F, sep=",")
}
##END
|
# Compute the isotonic regression of numeric vector 'x', with
# weights 'wt', with respect to simple order. The pool-adjacent-
# violators algorithm is used. Returns a vector of the same length
# as 'x' containing the regression.
# 02 Sep 1994 / R.F. Raubertas
pava <- function (x, wt=rep(1,length(x)))
{
n <- length(x)
if (n <= 1) return (list(estim=x,levelsets = 1))
if (any(is.na(x)) || any(is.na(wt))) {
stop ("Missing values in 'x' or 'wt' not allowed")
}
lvlsets <- (1:n)
repeat {
viol <- (as.vector(diff(x)) < 0) # Find adjacent violators
if (!(any(viol))) break
i <- min( (1:(n-1))[viol]) # Pool first pair of violators
lvl1 <- lvlsets[i]
lvl2 <- lvlsets[i+1]
ilvl <- (lvlsets == lvl1 | lvlsets == lvl2)
x[ilvl] <- sum(x[ilvl]*wt[ilvl]) / sum(wt[ilvl])
lvlsets[ilvl] <- lvl1
}
list( estim = x, levelsets = lvlsets)
}
# example
# pava(c(22.5,23.33,20.833,24.25),wt=c(3,3,3,2))
# One Simulation
one_sml <- function(scenario,ssize,Tau,m,delta)
{
maxdose <- nrow(scenario)
p0 <- scenario[,1]; p1 <- scenario[,2]; p2 <- scenario[,3]; p3 <- scenario[,4]
cumpr=cbind(p0,p0+p1,p0+p1+p2,1)
respmat <- matrix(0,maxdose,4)
trials <- rep(0,maxdose)
dosenum <- 1
count <- 0
phat0 <- rep(0,maxdose)
phat123 <- matrix(0,maxdose,3)
while (count < ssize) {
# simulate the result for one patient
otvet <- rank(c(runif(1),cumpr[dosenum,1:3]))[1]
respmat[dosenum,otvet] <- respmat[dosenum,otvet]+1
trials[dosenum] <-trials[dosenum]+1
# isotonic estimation of toxicity and response probability
dwp <- which(trials!=0) # doses have at least one patient
phat0[dwp] <- pava((respmat/trials)[dwp,1])$estim
for (j in dwp) {
if (respmat[j,2]>=respmat[j,4]) {phat123[j,] <- pava((respmat/trials)[j,4:2])$estim[3:1]}
else {phat123[j,] <- pava((respmat/trials)[j,2:4])$estim}
}
# decide next dosenum
if (trials[dosenum]<m) {
dosenum <- dosenum
} else if (length(which((1-phat0-phat123[,1])>delta))>0) {
dosenum <- min(which((1-phat0-phat123[,1])>delta))
} else if (phat123[dosenum,1]>phat0[dosenum]) {
dosenum <- min(dosenum+1,maxdose)
} else if (phat123[dosenum,1]<=phat0[dosenum]) {
diff=phat0-phat123[,1]
if (length(which(diff==0))>0) {dosenum <- min(which(diff==0))}
else {
for (k in 1:max((dosenum-1),1)) {
if (diff[k]<0 & diff[k+1]>0) {
if (abs(diff[k])>diff[k+1]) {dosenum <- min(k+1,maxdose); break}
else {dosenum <- k; break}
}
}
}
}
if (length(phat0)>maxdose) {
while (phat0[dosenum]>Tau & dosenum>1) {dosenum <- dosenum-1}
}
count=count+1
}
# find the optimal dose
toltox=ifelse(phat0<0.5,1,0)
score <- toltox*(phat123[,1]+2*phat123[,2]+3*phat123[,3])
bestdose <- order(score)[maxdose]
(bestdose)
}
# Simulation Results
scenario31 = matrix(c(0.1,0.72,0.09,0.09,0.2,0.32,0.24,0.24,0.3,0.07,0.07,0.56),byrow=TRUE,ncol=4)
scenario32 = matrix(c(0.15,0.6375,0.17,0.0425,0.3,0.42,0.21,0.07,0.45,0.0275,0.11,0.4125),byrow=TRUE,ncol=4)
scenario33 = matrix(c(0.2,0.56,0.16,0.08,0.4,0.06,0.18,0.36,0.7,0.03,0.03,0.24),byrow=TRUE,ncol=4)
scenario41 = matrix(c(0.1,0.72,0.09,0.09,0.2,0.32,0.24,0.24,0.3,0.07,0.07,0.56,0.4,0.06,0.06,0.48),byrow=TRUE,ncol=4)
scenario42 = matrix(c(0.15,0.6375,0.17,0.0425,0.3,0.42,0.21,0.07,0.45,0.0275,0.11,0.4125,0.6,0.02,0.06,0.32),byrow=TRUE,ncol=4)
scenario43 = matrix(c(0.2,0.56,0.16,0.08,0.4,0.06,0.18,0.36,0.7,0.03,0.03,0.24,0.8,0.02,0.02,0.16),byrow=TRUE,ncol=4)
scenariob1 = matrix(c(0.1,0.00,0.45,0.45,0.2,0.32,0.24,0.24,0.3,0.07,0.28,0.35,0.4,0.4,0.1,0.1),byrow=TRUE,ncol=4)
scenariob2 = matrix(c(0.15,0.55,0.2,0.1,0.3,0.00,0.35,0.35,0.45,0.2,0.2,0.15,0.6,0.4,0.00,0.00),byrow=TRUE,ncol=4)
scenariob3 = matrix(c(0.0,0.6,0.4,0.0,0.1,0.7,0.2,0.0,0.7,0.1,0.1,0.1,0.8,0.2,0.0,0.0),byrow=TRUE,ncol=4)
scenario61 = matrix(c(0.1,0.5,0.4,0.0,0.2,0.3,0.3,0.2,0.3,0.1,0.25,0.35,0.4,0,0.1,0.5,0.5,0,0,0.5,0.6,0,0,0.4),byrow=TRUE,ncol=4)
scenario62 = matrix(c(0,1,0,0,0.1,0.9,0,0,0.2,0.7,0.1,0,0.3,0.4,0.2,0.1,0.4,0.1,0.2,0.3,0.5,0,0,0.5),byrow=TRUE,ncol=4)
scenario63 = matrix(c(0.15,0.4,0.35,0.1,0.3,0.3,0.25,0.15,0.45,0.1,0.15,0.3,0.5,0,0,0.5,0.75,0,0,0.25,0.9,0,0,0.1),byrow=TRUE,ncol=4)
scenario64 = matrix(c(0.3,0.3,0.2,0.2,0.35,0.3,0.2,0.15,0.4,0.15,0.15,0.3,0.45,0.05,0.1,0.4,0.5,0,0,0.5,0.55,0,0,0.45),byrow=TRUE,ncol=4)
result = matrix(0,10000)
for (i in 1:10000) {
result[i]=one_sml(scenario64,ssize=30,Tau=0.5,m=3,delta=0.7)
}
a=hist(result)
a$counts
# 还需要将DOSE level数目增加
|
/scripts/second_version.R
|
no_license
|
YixiaoD/A_New_Dose-Finding_Design
|
R
| false
| false
| 4,655
|
r
|
# Compute the isotonic regression of numeric vector 'x', with
# weights 'wt', with respect to simple order. The pool-adjacent-
# violators algorithm is used. Returns a vector of the same length
# as 'x' containing the regression.
# 02 Sep 1994 / R.F. Raubertas
pava <- function (x, wt=rep(1,length(x)))
{
n <- length(x)
if (n <= 1) return (list(estim=x,levelsets = 1))
if (any(is.na(x)) || any(is.na(wt))) {
stop ("Missing values in 'x' or 'wt' not allowed")
}
lvlsets <- (1:n)
repeat {
viol <- (as.vector(diff(x)) < 0) # Find adjacent violators
if (!(any(viol))) break
i <- min( (1:(n-1))[viol]) # Pool first pair of violators
lvl1 <- lvlsets[i]
lvl2 <- lvlsets[i+1]
ilvl <- (lvlsets == lvl1 | lvlsets == lvl2)
x[ilvl] <- sum(x[ilvl]*wt[ilvl]) / sum(wt[ilvl])
lvlsets[ilvl] <- lvl1
}
list( estim = x, levelsets = lvlsets)
}
# example
# pava(c(22.5,23.33,20.833,24.25),wt=c(3,3,3,2))
# One Simulation
one_sml <- function(scenario,ssize,Tau,m,delta)
{
maxdose <- nrow(scenario)
p0 <- scenario[,1]; p1 <- scenario[,2]; p2 <- scenario[,3]; p3 <- scenario[,4]
cumpr=cbind(p0,p0+p1,p0+p1+p2,1)
respmat <- matrix(0,maxdose,4)
trials <- rep(0,maxdose)
dosenum <- 1
count <- 0
phat0 <- rep(0,maxdose)
phat123 <- matrix(0,maxdose,3)
while (count < ssize) {
# simulate the result for one patient
otvet <- rank(c(runif(1),cumpr[dosenum,1:3]))[1]
respmat[dosenum,otvet] <- respmat[dosenum,otvet]+1
trials[dosenum] <-trials[dosenum]+1
# isotonic estimation of toxicity and response probability
dwp <- which(trials!=0) # doses have at least one patient
phat0[dwp] <- pava((respmat/trials)[dwp,1])$estim
for (j in dwp) {
if (respmat[j,2]>=respmat[j,4]) {phat123[j,] <- pava((respmat/trials)[j,4:2])$estim[3:1]}
else {phat123[j,] <- pava((respmat/trials)[j,2:4])$estim}
}
# decide next dosenum
if (trials[dosenum]<m) {
dosenum <- dosenum
} else if (length(which((1-phat0-phat123[,1])>delta))>0) {
dosenum <- min(which((1-phat0-phat123[,1])>delta))
} else if (phat123[dosenum,1]>phat0[dosenum]) {
dosenum <- min(dosenum+1,maxdose)
} else if (phat123[dosenum,1]<=phat0[dosenum]) {
diff=phat0-phat123[,1]
if (length(which(diff==0))>0) {dosenum <- min(which(diff==0))}
else {
for (k in 1:max((dosenum-1),1)) {
if (diff[k]<0 & diff[k+1]>0) {
if (abs(diff[k])>diff[k+1]) {dosenum <- min(k+1,maxdose); break}
else {dosenum <- k; break}
}
}
}
}
if (length(phat0)>maxdose) {
while (phat0[dosenum]>Tau & dosenum>1) {dosenum <- dosenum-1}
}
count=count+1
}
# find the optimal dose
toltox=ifelse(phat0<0.5,1,0)
score <- toltox*(phat123[,1]+2*phat123[,2]+3*phat123[,3])
bestdose <- order(score)[maxdose]
(bestdose)
}
# Simulation Results
scenario31 = matrix(c(0.1,0.72,0.09,0.09,0.2,0.32,0.24,0.24,0.3,0.07,0.07,0.56),byrow=TRUE,ncol=4)
scenario32 = matrix(c(0.15,0.6375,0.17,0.0425,0.3,0.42,0.21,0.07,0.45,0.0275,0.11,0.4125),byrow=TRUE,ncol=4)
scenario33 = matrix(c(0.2,0.56,0.16,0.08,0.4,0.06,0.18,0.36,0.7,0.03,0.03,0.24),byrow=TRUE,ncol=4)
scenario41 = matrix(c(0.1,0.72,0.09,0.09,0.2,0.32,0.24,0.24,0.3,0.07,0.07,0.56,0.4,0.06,0.06,0.48),byrow=TRUE,ncol=4)
scenario42 = matrix(c(0.15,0.6375,0.17,0.0425,0.3,0.42,0.21,0.07,0.45,0.0275,0.11,0.4125,0.6,0.02,0.06,0.32),byrow=TRUE,ncol=4)
scenario43 = matrix(c(0.2,0.56,0.16,0.08,0.4,0.06,0.18,0.36,0.7,0.03,0.03,0.24,0.8,0.02,0.02,0.16),byrow=TRUE,ncol=4)
scenariob1 = matrix(c(0.1,0.00,0.45,0.45,0.2,0.32,0.24,0.24,0.3,0.07,0.28,0.35,0.4,0.4,0.1,0.1),byrow=TRUE,ncol=4)
scenariob2 = matrix(c(0.15,0.55,0.2,0.1,0.3,0.00,0.35,0.35,0.45,0.2,0.2,0.15,0.6,0.4,0.00,0.00),byrow=TRUE,ncol=4)
scenariob3 = matrix(c(0.0,0.6,0.4,0.0,0.1,0.7,0.2,0.0,0.7,0.1,0.1,0.1,0.8,0.2,0.0,0.0),byrow=TRUE,ncol=4)
scenario61 = matrix(c(0.1,0.5,0.4,0.0,0.2,0.3,0.3,0.2,0.3,0.1,0.25,0.35,0.4,0,0.1,0.5,0.5,0,0,0.5,0.6,0,0,0.4),byrow=TRUE,ncol=4)
scenario62 = matrix(c(0,1,0,0,0.1,0.9,0,0,0.2,0.7,0.1,0,0.3,0.4,0.2,0.1,0.4,0.1,0.2,0.3,0.5,0,0,0.5),byrow=TRUE,ncol=4)
scenario63 = matrix(c(0.15,0.4,0.35,0.1,0.3,0.3,0.25,0.15,0.45,0.1,0.15,0.3,0.5,0,0,0.5,0.75,0,0,0.25,0.9,0,0,0.1),byrow=TRUE,ncol=4)
scenario64 = matrix(c(0.3,0.3,0.2,0.2,0.35,0.3,0.2,0.15,0.4,0.15,0.15,0.3,0.45,0.05,0.1,0.4,0.5,0,0,0.5,0.55,0,0,0.45),byrow=TRUE,ncol=4)
result = matrix(0,10000)
for (i in 1:10000) {
result[i]=one_sml(scenario64,ssize=30,Tau=0.5,m=3,delta=0.7)
}
a=hist(result)
a$counts
# 还需要将DOSE level数目增加
|
###########################################################################/**
# @RdocClass Discretize
#
# @title "Discretize class"
#
# \description{
# Containing all methods related to discritizing an uni/bi-variate normal distribution. Both uniform and nonuniform discretization possible.
# @classhierarchy
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Not used.}
# }
#
# \section{Fields and Methods}{
# @allmethods ""
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
# \references{
# [1] Nielsen, L.R.; Jørgensen, E. & Højsgaard, S. Embedding a state space model into a Markov decision process Dept. of Genetics and Biotechnology, Aarhus University, 2008. \cr
# }
#
# @author
#*/###########################################################################
setConstructorS3("Discretize", function(...)
{
extend(Object(), "Discretize"
)
})
#########################################################################/**
# @RdocMethod volCube
#
# @title "Volume/length of cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds (columnwise) (bivariate case) or vector of length 2 (univariate case). }
# \item{...}{Not used.}
# }
#
# @author
#
# \references{
# Based on
# \emph{Kozlov, A. & Koller, D. Nonuniform dynamic discretization in hybrid networks The Thirteenth Conference on Uncertainty in Artificial Intelligence (UAI-97), 1997, 314-325 }}
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("volCube", "Discretize", function(this, cube, ...){
return(prod(cube[2,]-cube[1,]))
})
#########################################################################/**
# @RdocMethod klBound1D
#
# @title "Upper bound on KL distance on a 1D cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is vector of length 2 containing the upper and lower bounds (univariate case). }
# \item{mu}{ The mean. }
# \item{sigma2}{ The variance. }
# \item{...}{ Not used. }
# }
#
# \value{
# @get "title".
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("klBound1D", "Discretize", function(this,cube,mu,sigma2, ...){
if (cube[1,1]<= -Inf) cube[1,1]<-mu-2.5*sqrt(sigma2)
if (cube[2,1]>= Inf) cube[2,1]<-mu+2.5*sqrt(sigma2)
b<-this$bounds1D(cube,mu,sigma2)
return(((b$max-b$mean)/(b$max-b$min)*b$min*log(b$min/b$mean)+(b$mean-b$min)/(b$max-b$mean)*b$max*log(b$max/b$mean))*this$volCube(cube))
}, private=TRUE)
#########################################################################/**
# @RdocMethod klBound2D
#
# @title "Upper bound on KL distance on a 2D cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{mu}{ The mean. }
# \item{sigma}{ The covariate matrix. }
# \item{...}{Not used.}
# }
#
# \value{
# @get "title".
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("klBound2D", "Discretize", function(this,cube,mu,sigma, ...){
b<-this$bounds2D(cube,mu,sigma)
return(((b$max-b$mean)/(b$max-b$min)*b$min*log(b$min/b$mean)+(b$mean-b$min)/(b$max-b$mean)*b$max*log(b$max/b$mean))*this$volCube(cube))
})
#########################################################################/**
# @RdocMethod bounds1D
#
# @title "Min, mean and max density on a 1D cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{mu}{ The mean. }
# \item{sigma2}{ The variance. }
# \item{len}{ The number of samples of each coordinate.}
# \item{...}{Not used.}
# }
#
# \value{
# @get "title".
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("bounds1D", "Discretize", function(this, cube,mu,sigma2,len=100, ...){
tmp<-matrix(NA,len,length(cube[1,]))
for (i in 1:length(cube[1,])) {
tmp[,i]<-seq(cube[1,i],cube[2,i],len=len)
}
g <- expand.grid(as.list(as.data.frame(tmp)))
f<-dnorm(g[,1],mu,sqrt(sigma2))
return(list(min=min(f),mean=mean(f),max=max(f)))
})
#########################################################################/**
# @RdocMethod bounds2D
#
# @title "Min, mean and max density on a 2D cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{mu}{ The mean. }
# \item{sigma}{ The covariate matrix. }
# \item{len}{ The number of samples of each coordinate.}
# \item{...}{Not used.}
# }
#
# \value{
# @get "title".
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("bounds2D", "Discretize", function(this, cube,mu,sigma,len=100, ...){
tmp<-matrix(NA,len,length(cube[1,]))
for (i in 1:length(cube[1,])) {
tmp[,i]<-seq(cube[1,i],cube[2,i],len=len)
}
g <- expand.grid(as.list(as.data.frame(tmp)))
f<-dmvnorm(g,mean=mu,sigma=sigma)
return(list(min=min(f),mean=mean(f),max=max(f)))
})
#########################################################################/**
# @RdocMethod ratio
#
# @title "Calc max divided by min density value"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{x}{ Values to calc. density for. }
# \item{mu}{ The mean. }
# \item{sigma}{ The covariate matrix. }
# \item{len}{ The number of samples of each coordinate.}
# \item{...}{Not used.}
# }
#
# \value{
# @get "title".
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("ratio", "Discretize", function(this, x,mu,sigma, ...){
f<-dmvnorm(x,mean=mu,sigma=sigma)
return(max(f)/min(f))
})
#########################################################################/**
# @RdocMethod direc
#
# @title "Finds the optimal (approximate) direcection to spilt a cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{mu}{ The mean. }
# \item{sigma}{ The covariate matrix. }
# \item{...}{Not used.}
# }
#
# \value{
# Return the variable index to split.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("direc", "Discretize", function(this, cube,mu,sigma, ...){
l<-cube[2,]-cube[1,] # length of variables in the cube
center<-cube[1,]+l/2 # cube center
idx<-0
maxV<- -Inf
for (i in 1:length(cube[1,])) {
tmp<-matrix(center,100,length(center),byrow=TRUE)
tmp[,i]<-seq(cube[1,i],cube[2,i],len=100)
rat<-this$ratio(tmp,mu,sigma)
if (rat>maxV) {idx<-i; maxV=rat}
}
return(idx)
})
#########################################################################/**
# @RdocMethod plotCubes
#
# @title "Plot the cubes (only bivariate distributions)"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{start}{ An cube used to set the plot area.}
# \item{colors}{ An integer vector of same length as the number of cubes used to give the cubes colors. The color is set by the integer value. }
# \item{...}{Further arguments passed to plot.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("plotCubes", "Discretize", function(this, cubes, start, colors, ...) {
plot(0,0,xlim=c(start[1,1],start[2,1]),ylim=c(start[1,2],start[2,2]),type="n",xlab="",ylab="", ...)
if (is.null(colors)) {
for (i in 1:length(cubes)) {
this$addCube(cubes[[i]]$cubeB)
}
} else {
for (i in 1:length(cubes)) {
this$addCubeCol(cubes[[i]]$cubeB,colors[i])
}
for (i in 1:length(cubes)) {
this$addCube(cubes[[i]]$cubeB)
}
}
})
#########################################################################/**
# @RdocMethod addCube
#
# @title "Adds a 2D cube to the plot"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{col}{ Color of the lines. }
# \item{...}{Not used.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("addCube", "Discretize", function(this, cube,col="black", ...) {
lines(c(cube[1,1],cube[1,1],cube[2,1],cube[2,1],cube[1,1]),c(cube[1,2],cube[2,2],cube[2,2],cube[1,2],cube[1,2]),col=col)
})
#########################################################################/**
# @RdocMethod addCubeCol
#
# @title "Adds a 2D cube with color to the plot"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{color}{ Color of the cube. }
# \item{...}{Not used.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("addCubeCol", "Discretize", function(this, cube,color=NULL, ...) {
rect(cube[1,1], cube[1,2], cube[2,1], cube[2,2], col = color ,border="black")
})
#########################################################################/**
# @RdocMethod addPoints
#
# @title "Adds center points to the plot"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{...}{Not used.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("addPoints", "Discretize", function(this, cubes, ...) {
x<-y<-NULL
for (i in 1:length(cubes)) {
cube<-cubes[[i]]$center
x<-c(x,cube[1])
y<-c(y,cube[2])
}
points(x,y,pch=".")
})
#########################################################################/**
# @RdocMethod addIdx
#
# @title "Add cube index to the plot"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{...}{Not used.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("addIdx", "Discretize", function(this, cubes, ...) {
x<-y<-idx<-NULL
for (i in 1:length(cubes)) {
cube<-cubes[[i]]$center
x<-c(x,cube[1])
y<-c(y,cube[2])
idx<-c(idx,i-1)
}
text(x,y,labels=paste(1:length(cubes)-1,sep=""))
})
#########################################################################/**
# @RdocMethod addText
#
# @title "Add text to the plot"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{text}{ Text to be added to each hypercube.}
# \item{...}{Not used.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("addText", "Discretize", function(this, cubes, text, ...) {
x<-y<-yield<-NULL
for (i in 1:length(cubes)) {
cube<-cubes[[i]]$center
x<-c(x,cube[1])
y<-c(y,cube[2])
}
text(x,y,labels=text)
})
#########################################################################/**
# @RdocMethod discretize1DUnifEqLth
#
# @title "Discretize a normal distribution such that intervals have equal length"
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{mu}{ The mean. }
# \item{sigma2}{ The variance. }
# \item{n}{ Number of intervals. }
# \item{asDF}{ Return result as a data frame. If false return matrix. }
# \item{...}{Not used.}
# }
#
# \value{
# A list of intervals (data frame if \code{asDF = TRUE}).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize1DUnifEqLth", "Discretize", function(this, mu, sigma2,
n, asDF=TRUE, ...)
{
lgd<-c(mu-2.5*sqrt(sigma2),mu+2.5*sqrt(sigma2)) # bounds used
lgdInvX<-diff(lgd)/n # length in each interval
dat<-data.frame(center=NA,min=NA,max=NA,idxA=1:n-1)
minX<-lgd[1]
for (i in 1:n) {
dat$min[i]<-minX
dat$center[i]<-minX+lgdInvX/2
dat$max[i]<-minX+lgdInvX
minX<-minX+lgdInvX
}
dat$min[1]<- -Inf
dat$max[nrow(dat)]<-Inf
if (!asDF) return(as.matrix(dat))
return(dat)
})
#########################################################################/**
# @RdocMethod discretize1DUnifEqProb
#
# @title "Discretize a normal distribution such that intervals have equal probability"
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{mu}{ The mean. }
# \item{sigma2}{ The variance. }
# \item{n}{ Number of intervals. }
# \item{asDF}{ Return result as a data frame. If false return matrix. }
# \item{...}{Not used.}
# }
#
# \value{
# A list of intervals (data frame if \code{asDF = TRUE}).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize1DUnifEqProb", "Discretize", function(this, mu, sigma2,
n, asDF=TRUE, ...)
{
pX<-1/n # prob in each interval
#xB<-c(mu-3*sqrt(sigma2),mu+3*sqrt(sigma2)) # bounds used
q<-0; meanX<-NULL
x<- -Inf
for (i in 1:(n-1)) {
x<-c(x,qnorm(pX*i,mu,sqrt(sigma2)))
if (i==1) {
meanX<-c(meanX,mu-sigma2*(dnorm(x[i+1],mu,sqrt(sigma2)))/pX)
} else {
meanX<-c(meanX,mu-sigma2*(dnorm(x[i+1],mu,sqrt(sigma2))-dnorm(x[i],mu,sqrt(sigma2)))/pX)
}
}
x<-c(x,Inf)
meanX<-c(meanX,mu-sqrt(sigma2)^2*(0-dnorm(x[i+1],mu,sqrt(sigma2)))/pX)
elements<-vector("list", 2) # empty list of maxIte
queue<-list(elements=elements)
for (i in 1:(length(x)-1)) {
cube<-matrix(c(x[i],x[i+1]),2,1)
center<-meanX[i]
element<-list(center=center,cube=cube)
queue$elements[[i]]<-element
queue$elements[[i]]<-element
}
for (i in 1:length(queue$elements)) {
queue$elements[[i]]$idxA<- i-1
}
KL<-0
for (i in 1:length(queue$elements)) {
KL<-KL+this$klBound1D(queue$elements[[i]]$cube,mu,sigma2)
}
cat(" KL-bound:",KL,"\n")
if (!asDF) return(queue$elements)
dF<-NULL
for (i in 1:(length(x)-1)) {
tmp1<-queue$elements[[i]]$cube
tmp2<-queue$elements[[i]]$center
tmp3<-queue$elements[[i]]$idxA
dF<-rbind(dF,c(center=tmp2,min=tmp1[1,1],max=tmp1[2,1],idxA=tmp3))
}
rownames(dF)<-1:(length(x)-1)
return(as.data.frame(dF))
})
#########################################################################/**
# @RdocMethod discretize1DVec
#
# @title "Discretize the real numbers according to a set of center points"
#
# \description{
# @get "title". Create intervals with center points as given in the argument.
# }
#
# @synopsis
#
# \arguments{
# \item{v}{ A vector of center points. }
# \item{inf}{ Value used for infinity. }
# \item{mInf}{ Value used for minus infinity. }
# \item{asDF}{ Return result as a data frame. If false return matrix. }
# \item{...}{Not used.}
# }
#
# \value{
# A list of intervals (data frame if \code{asDF = TRUE}).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize1DVec", "Discretize", function(this, v, inf=Inf, mInf=-inf, asDF=TRUE, ...)
{
v<-sort(v)
dat<-data.frame(center=v,min=NA,max=NA)
for (i in 1:length(v)) {
if (i==1) dat$min[i]<- mInf
else dat$min[i]<-dat$center[i]-(dat$center[i]-dat$center[i-1])/2
if (i==length(v)) dat$max[i]<-inf
else dat$max[i]<-dat$center[i]+(dat$center[i+1]-dat$center[i])/2
}
if (!asDF) return(as.matrix(dat))
return(dat)
})
#########################################################################/**
# @RdocMethod discretize2DNonunif
#
# @title "Discretize a bivariate normal distribution using a non-uniform discretization "
#
# \description{
# Discretize a bivariate normal distribution into hypercubes (squares)
# such that the approximation have a certain Kulback Libler (KL) distance.
# }
#
# @synopsis
#
# \arguments{
# \item{mu}{ The mean (2-dim vector). }
# \item{sigma}{ The covariance (2x2 matrix). }
# \item{maxKL}{ Max KL distance. }
# \item{maxIte}{ Max number of iterations. }
# \item{modifyCenter}{ If no don't split the cubes around the mean center. If "split1" split the 4 cubes around the mean into 9 squares such that the mean is the center of a cube. If "split2" first add cubes such that the axis of the mean always in the center of the cubes. }
# \item{split}{ Only used if modifyCenter = "split2" to set the size of the nine cubes around the mean. }
# \item{...}{Not used.}
# }
#
# \value{
# A list where each element describe the cube and contains: KL - an upper bound on the KL-distance, cube - the bounds, center - the center, idxM - the index, cubeB - the fixed bounds (used for plotting).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize2DNonunif", "Discretize", function(this, mu, sigma,
maxKL=0.5, maxIte=500, modifyCenter="no", split=0.25, ...)
{
xB<-c(mu[1]-2.5*sqrt(sigma[1,1]),mu[1]+2.5*sqrt(sigma[1,1]))
yB<-c(mu[2]-2.5*sqrt(sigma[2,2]),mu[2]+2.5*sqrt(sigma[2,2]))
cube0<-cube<-matrix(c(xB[1],xB[2],yB[1],yB[2]),2,2)
if (modifyCenter!="split2") {
KL<-this$klBound2D(cube,mu,sigma)
element<-list(KL=KL,cube=cube) # the first element in the queue
elements<-vector("list", 2) # empty list of 2 elements
elements[[1]]<-element
queue<-list(maxIdx=1, lastIdx=1, KL=KL,elements=elements)
}
if (modifyCenter=="split2") {
# add nine cubes split around zero (numbered from topleft to bottomright)
x<-c(mu[1]-split*sqrt(sigma[1,1]),mu[1]+split*sqrt(sigma[1,1]))
y<-c(mu[2]-split*sqrt(sigma[2,2]),mu[2]+split*sqrt(sigma[2,2]))
cube<-list()
cube[[1]]<-matrix(c(xB[1],x[1],y[2],yB[2]),2,2)
cube[[2]]<-matrix(c(x[1],x[2],y[2],yB[2]),2,2)
cube[[3]]<-matrix(c(x[2],xB[2],y[2],yB[2]),2,2)
cube[[4]]<-matrix(c(xB[1],x[1],y[1],y[2]),2,2)
cube[[5]]<-matrix(c(x[1],x[2],y[1],y[2]),2,2) # the center cube
cube[[6]]<-matrix(c(x[2],xB[2],y[1],y[2]),2,2)
cube[[7]]<-matrix(c(xB[1],x[1],yB[1],y[1]),2,2)
cube[[8]]<-matrix(c(x[1],x[2],yB[1],y[1]),2,2)
cube[[9]]<-matrix(c(x[2],xB[2],yB[1],y[1]),2,2)
elements<-list() # empty list
KL<-maxI<-Max<-0
for (i in 1:9) {
cubeKL<-this$klBound2D(cube[[i]],mu,sigma)
if (cubeKL>Max) {
Max<-cubeKL
maxI<-i
}
KL<-KL+cubeKL
element<-list(KL=cubeKL,cube=cube[[i]])
elements[[i]]<-element
}
queue<-list(maxIdx=maxI, lastIdx=9, KL=KL,elements=elements)
}
ite<-1
while (queue$KL>maxKL & ite<maxIte){
maxIdx<-queue$maxIdx
#cat("Total KL = ",queue$KL,"\n")
KL<-queue$KL-queue$elements[[maxIdx]]$KL
cube<-queue$elements[[maxIdx]]$cube
#cat("Split cube:\n"); print(cube)
splitIdx<-this$direc(cube,mu,sigma)
#cat("Split variable number ",splitIdx,"\n")
split<-cube[1,splitIdx]+(cube[2,splitIdx]-cube[1,splitIdx])/2
cube1<-cube2<-cube
cube1[2,splitIdx]<-split
cube2[1,splitIdx]<-split
KL1<-this$klBound2D(cube1,mu,sigma)
KL2<-this$klBound2D(cube2,mu,sigma)
queue$KL<-KL+KL1+KL2
element1<-list(KL=KL1,cube=cube1)
element2<-list(KL=KL2,cube=cube2)
queue$elements[[maxIdx]]<-element1
queue$lastIdx<-queue$lastIdx+1
queue$elements[[queue$lastIdx]]<-element2
#cat("The two new elements:\n"); print(element1); print(element2);
maxVal<- -Inf;
for (i in 1:queue$lastIdx) {
if (queue$elements[[i]]$KL>maxVal) {
maxIdx<-i; maxVal<-queue$elements[[i]]$KL
}
}
queue$maxIdx<-maxIdx; ite<-ite+1
}
if (modifyCenter=="split1") {
# split the 4 cubes close to mu such that mu becomes the center of a cube
idx<-NULL
for (i in 1:queue$lastIdx) { # first find cubes
if (queue$elements[[i]]$cube[1,1]==mu[1] | queue$elements[[i]]$cube[2,1]==mu[1]) {
if (queue$elements[[i]]$cube[1,2]==mu[2] | queue$elements[[i]]$cube[2,2]==mu[2]) {
idx<-c(idx,i)
}
}
}
maxY=maxX=-Inf
minY=minX=Inf
for (i in idx) {
maxX=max(maxX,queue$elements[[i]]$cube[2,1])
maxY=max(maxY,queue$elements[[i]]$cube[2,2])
minX=min(minX,queue$elements[[i]]$cube[1,1])
minY=min(minY,queue$elements[[i]]$cube[1,2])
queue$KL<-queue$KL-queue$elements[[i]]$KL
}
difX=(maxX-minX)/3
difY=(maxY-minY)/3
for (i in 0:2) {
for (j in 0:2) {
x=c(minX+i*difX,minX+(i+1)*difX)
y=c(minY+j*difY,minY+(j+1)*difY)
cube<-matrix(c(x[1],x[2],y[1],y[2]),2,2)
KL<-this$klBound2D(cube,mu,sigma)
element<-list(KL=KL,cube=cube)
if (!is.null(idx)) { # if still some idx to change
queue$elements[[idx[1]]]<-element
if(length(idx)>1) {
idx<-idx[2:length(idx)]
} else {
idx<-NULL
}
} else {
queue$lastIdx<-queue$lastIdx+1
queue$elements[[queue$lastIdx]]<-element
}
queue$KL<-queue$KL+KL
}
}
}
# find center
for (i in 1:queue$lastIdx) {
cube<-queue$elements[[i]]$cube
x<-cube[1,1]+(cube[2,1]-cube[1,1])/2
y<-cube[1,2]+(cube[2,2]-cube[1,2])/2
queue$elements[[i]]$center<-c(x,y)
}
# set index
for (i in 1:queue$lastIdx) {
queue$elements[[i]]$idxM<- i-1
}
# remove borders (the one with borders saved in cubeB)
cubes<-queue$elements
m<-matrix(c(Inf,-Inf,Inf,-Inf),nrow=2,ncol=2)
for (i in 1:length(cubes)) { # min and max values, i.e. borders
idx1<-cubes[[i]]$cube[1,]<m[1,]
idx2<-cubes[[i]]$cube[2,]>m[2,]
m[1,idx1]<-cubes[[i]]$cube[1,idx1]
m[2,idx2]<-cubes[[i]]$cube[2,idx2]
}
for (i in 1:length(cubes)) {
cubes[[i]]$cubeB<-cubes[[i]]$cube
if (cubes[[i]]$cube[1,1]==m[1,1]) cubes[[i]]$cube[1,1]<- -Inf
if (cubes[[i]]$cube[1,2]==m[1,2]) cubes[[i]]$cube[1,2]<- -Inf
if (cubes[[i]]$cube[2,1]==m[2,1]) cubes[[i]]$cube[2,1]<- Inf
if (cubes[[i]]$cube[2,2]==m[2,2]) cubes[[i]]$cube[2,2]<- Inf
}
cat("Total KL = ",queue$KL,"\n")
return(cubes)
})
#########################################################################/**
# @RdocMethod discretize2DUnifEqInv
#
# @title "Discretize a bivariate normal distribution using a uniform discretization with intervals of equal length "
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{mu}{ The mean (2-dim vector). }
# \item{sigma}{ The covariance (2x2 matrix). }
# \item{lgdX}{ Number for intervals of x coordinate. }
# \item{lgdY}{ Number for intervals of y coordinate. }
# \item{...}{Not used.}
# }
#
# \value{
# A list where each element describe the cube and contains: KL - an upper bound on the KL-distance, cube - the bounds, center - the center, idxM - the index, cubeB - the fixed bounds (used for plotting).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize2DUnifEqInv", "Discretize", function(this, mu, sigma, lgdX, lgdY, ...){
xB<-c(mu[1]-2.5*sqrt(sigma[1,1]),mu[1]+2.5*sqrt(sigma[1,1]))
yB<-c(mu[2]-2.5*sqrt(sigma[2,2]),mu[2]+2.5*sqrt(sigma[2,2]))
cube0<-cube<-matrix(c(xB[1],xB[2],yB[1],yB[2]),2,2)
x<-seq(xB[1],xB[2],length=lgdX+1)
y<-seq(yB[1],yB[2],length=lgdY+1)
g <- expand.grid(x = x, y = y)
z<-matrix(dmvnorm(g, mu, sigma),lgdX+1,lgdY+1)
elements<-vector("list", 2) # empty list od two elements
queue<-list(maxIdx=NA, lastIdx=1, KL=0,elements=elements)
for (i in 1:(length(x)-1)) {
for (j in 1:(length(y)-1)) {
cube<-matrix(c(x[i],x[i+1],
y[j],y[j+1]),2,2)
KL<-this$klBound2D(cube,mu,sigma)
element<-list(KL=KL,cube=cube)
queue$KL=queue$KL+KL
queue$elements[[queue$lastIdx]]<-element
queue$lastIdx<-queue$lastIdx+1
}
}
queue$lastIdx<-queue$lastIdx-1
# calc center point
for (i in 1:queue$lastIdx) {
cube<-queue$elements[[i]]$cube
x<-cube[1,1]+(cube[2,1]-cube[1,1])/2
y<-cube[1,2]+(cube[2,2]-cube[1,2])/2
queue$elements[[i]]$center<-c(x,y)
}
# set index
for (i in 1:queue$lastIdx) {
queue$elements[[i]]$idxM <- i-1
}
# remove borders (the one with borders saved in cubeB)
cubes<-queue$elements
m<-matrix(c(Inf,-Inf,Inf,-Inf),nrow=2,ncol=2)
for (i in 1:length(cubes)) {
idx1<-cubes[[i]]$cube[1,]<m[1,]
idx2<-cubes[[i]]$cube[2,]>m[2,]
m[1,idx1]<-cubes[[i]]$cube[1,idx1]
m[2,idx2]<-cubes[[i]]$cube[2,idx2]
}
for (i in 1:length(cubes)) {
cubes[[i]]$cubeB<-cubes[[i]]$cube
if (cubes[[i]]$cube[1,1]==m[1,1]) cubes[[i]]$cube[1,1]<- -Inf
if (cubes[[i]]$cube[1,2]==m[1,2]) cubes[[i]]$cube[1,2]<- -Inf
if (cubes[[i]]$cube[2,1]==m[2,1]) cubes[[i]]$cube[2,1]<- Inf
if (cubes[[i]]$cube[2,2]==m[2,2]) cubes[[i]]$cube[2,2]<- Inf
}
cat("Total KL = ",queue$KL,"\n")
return(cubes)
})
#########################################################################/**
# @RdocMethod discretize2DUnifEqProb
#
# @title "Discretize a bivariate normal distribution using a uniform discretization with intervals of equal probability "
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{mu}{ The mean (2-dim vector). }
# \item{sigma}{ The covariance (2x2 matrix). }
# \item{lgdX}{ Number for intervals of x coordinate. }
# \item{lgdY}{ Number for intervals of y coordinate. }
# \item{...}{Not used.}
# }
#
# \value{
# A list where each element describe the cube and contains: KL - an upper bound on the KL-distance, cube - the bounds, center - the center, idxM - the index, cubeB - the fixed bounds (used for plotting).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize2DUnifEqProb", "Discretize", function(this,mu,sigma,lgdX,lgdY, ...){
pX<-1/lgdX # prob in each interval
pY<-1/lgdY
xB<-c(mu[1]-2.5*sqrt(sigma[1,1]),mu[1]+2.5*sqrt(sigma[1,1])) # bounds used
yB<-c(mu[2]-2.5*sqrt(sigma[2,2]),mu[2]+2.5*sqrt(sigma[2,2]))
cube0<-cube<-matrix(c(xB[1],xB[2],yB[1],yB[2]),2,2)
q<-0; meanX<-NULL
x<-xB[1]
for (i in 1:(lgdX-1)) {
x<-c(x,qnorm(pX*i,mu[1],sqrt(sigma[1,1])))
if (i==1) {
meanX<-c(meanX,mu[1]-sqrt(sigma[1,1])^2*(dnorm(x[i+1],mu[1],sqrt(sigma[1,1])))/pX)
} else {
meanX<-c(meanX,mu[1]-sqrt(sigma[1,1])^2*(dnorm(x[i+1],mu[1],sqrt(sigma[1,1]))-dnorm(x[i],mu[1],sqrt(sigma[1,1])))/pX)
}
}
x<-c(x,xB[2])
i<-lgdX
meanX<-c(meanX,mu[1]-sqrt(sigma[1,1])^2*(0-dnorm(x[i],mu[1],sqrt(sigma[1,1])))/pX)
q<-0; meanY<-NULL
y<-yB[1]
for (i in 1:(lgdY-1)) {
y<-c(y,qnorm(pY*i,mu[2],sqrt(sigma[2,2])))
if (i==1) {
meanY<-c(meanY,mu[2]-sqrt(sigma[2,2])^2*(dnorm(y[i+1],mu[2],sqrt(sigma[2,2])))/pY)
} else {
meanY<-c(meanY,mu[2]-sqrt(sigma[2,2])^2*(dnorm(y[i+1],mu[2],sqrt(sigma[2,2]))-dnorm(y[i],mu[2],sqrt(sigma[2,2])))/pY)
}
}
y<-c(y,yB[2])
i<-lgdY
meanY<-c(meanY,mu[2]-sqrt(sigma[2,2])^2*(0-dnorm(y[i],mu[2],sqrt(sigma[2,2])))/pY)
g <- expand.grid(x = x, y = y)
m <- expand.grid(m1 = meanX, m2 = meanY)
z<-matrix(dmvnorm(g, mu, sigma),lgdX+1,lgdY+1)
elements<-vector("list", 2) # empty list of maxIte
queue<-list(maxIdx=NA, lastIdx=1, KL=0,elements=elements)
for (i in 1:(length(x)-1)) {
for (j in 1:(length(y)-1)) {
cube<-matrix(c(x[i],x[i+1],
y[j],y[j+1]),2,2)
KL<-this$klBound2D(cube,mu,sigma)
center<-c(meanX[i],meanY[j])
element<-list(KL=KL,cube=cube,center=center)
queue$KL=queue$KL+KL
queue$elements[[queue$lastIdx]]<-element
queue$lastIdx<-queue$lastIdx+1
}
}
queue$lastIdx<-queue$lastIdx-1
# calc center point
for (i in 1:queue$lastIdx) {
cube<-queue$elements[[i]]$cube
x<-cube[1,1]+(cube[2,1]-cube[1,1])/2
y<-cube[1,2]+(cube[2,2]-cube[1,2])/2
queue$elements[[i]]$center<-c(x,y)
}
# set index
for (i in 1:queue$lastIdx) {
queue$elements[[i]]$idxM<-i-1
}
# remove borders (the one with borders saved in cubeB)
cubes<-queue$elements
m<-matrix(c(Inf,-Inf,Inf,-Inf),nrow=2,ncol=2)
for (i in 1:length(cubes)) {
idx1<-cubes[[i]]$cube[1,]<m[1,]
idx2<-cubes[[i]]$cube[2,]>m[2,]
m[1,idx1]<-cubes[[i]]$cube[1,idx1]
m[2,idx2]<-cubes[[i]]$cube[2,idx2]
}
for (i in 1:length(cubes)) {
cubes[[i]]$cubeB<-cubes[[i]]$cube
if (cubes[[i]]$cube[1,1]==m[1,1]) cubes[[i]]$cube[1,1]<- -Inf
if (cubes[[i]]$cube[1,2]==m[1,2]) cubes[[i]]$cube[1,2]<- -Inf
if (cubes[[i]]$cube[2,1]==m[2,1]) cubes[[i]]$cube[2,1]<- Inf
if (cubes[[i]]$cube[2,2]==m[2,2]) cubes[[i]]$cube[2,2]<- Inf
}
cat("Total KL = ",queue$KL,"\n")
return(cubes)
})
#########################################################################/**
# @RdocMethod plotHypercubes
#
# @title "Plotting the discretization of a bivariate random normal variable "
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{text}{ Text to be added to each hypercube. Value 'center'
# show the center point, 'index' show the index of the cube and if text i a vector of same length as the number of cube plot the text. }
# \item{borders}{ Show the border of the hypercubes if true.}
# \item{colors}{ A integer vector of same length as the number of cubes used to give the cubes colors. The color is set by the integer value. }
# \item{...}{Not used.}
# }
#
# \value{
# A plot is produced.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("plotHypercubes", "Discretize", function(this, cubes, text="center", borders=FALSE, colors=NULL, ...){
if (!is.null(colors)) {
if (length(colors)!=length(cubes)) stop("Argument colors must have length equal to the number of cubes")
}
m<-matrix(c(Inf,-Inf,Inf,-Inf),nrow=2,ncol=2)
for (i in 1:length(cubes)) {
idx1<-cubes[[i]]$cubeB[1,]<m[1,]
idx2<-cubes[[i]]$cubeB[2,]>m[2,]
m[1,idx1]<-cubes[[i]]$cubeB[1,idx1]
m[2,idx2]<-cubes[[i]]$cubeB[2,idx2]
}
cube0<-m
this$plotCubes(cubes,cube0,colors)
if (!borders) this$addCube(cube0,col="white")
if (length(text)==length(cubes)) {
this$addText(cubes,text)
} else {
if (text=="index") this$addText(cubes,1:length(cubes)-1)
if (text=="center") this$addPoints(cubes)
}
title(xlab=expression(m[1]),ylab=expression(m[2]))
cat(" Plotted", length(cubes), "cubes.\n")
invisible(NULL)
})
#########################################################################/**
# @RdocMethod splitCube2D
#
# @title "Split a cube further up "
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{mu}{ The mean (2-dim vector). }
# \item{sigma}{ The covariance (2x2 matrix). }
# \item{iM}{ Index of the cube that we want to split. }
# \item{...}{Not used.}
# }
#
# \value{
# A list where each element describe the cube and contains: KL - an upper bound on the KL-distance, cube - the bounds, center - the center, idxM - the index, cubeB - the fixed bounds (used for plotting).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("splitCube2D", "Discretize", function(this, cubes, mu, sigma, iM, ...) {
# bounds the area
xB<-c(mu[1]-2.5*sqrt(sigma[1,1]),mu[1]+2.5*sqrt(sigma[1,1]))
yB<-c(mu[2]-2.5*sqrt(sigma[2,2]),mu[2]+2.5*sqrt(sigma[2,2]))
cube<-cubes[[iM+1]]$cubeB
#cat("Split cube:",maxIdx,"\n"); print(cube)
#cat("KL=",queue$elements[[maxIdx]]$KL,"\n")
splitIdx<-this$direc(cube,mu,sigma)
#cat("Split variable number ",splitIdx,"\n")
split<-cube[1,splitIdx]+(cube[2,splitIdx]-cube[1,splitIdx])/2
cube1<-cube2<-cube
cube1[2,splitIdx]<-split
cube2[1,splitIdx]<-split
KL1<-this$klBound2D(cube1,mu,sigma)
KL2<-this$klBound2D(cube2,mu,sigma)
element1<-list(KL=KL1,cube=cube1,center=NA,idxM=iM,cubeB=cube1)
element2<-list(KL=KL2,cube=cube2,center=NA,idxM=length(cubes),cubeB=cube2)
cubes[[iM+1]]<-element1
cubes[[length(cubes)+1]]<-element2
# find center
for (i in c(iM+1,length(cubes))) {
cube<-cubes[[i]]$cube
x<-cube[1,1]+(cube[2,1]-cube[1,1])/2
y<-cube[1,2]+(cube[2,2]-cube[1,2])/2
cubes[[i]]$center<-c(x,y)
}
# remove borders (the one with borders saved in cubeB)
m<-matrix(c(Inf,-Inf,Inf,-Inf),nrow=2,ncol=2)
for (i in 1:length(cubes)) { # min and max values, i.e. borders of the cube
idx1<-cubes[[i]]$cubeB[1,]<m[1,]
idx2<-cubes[[i]]$cubeB[2,]>m[2,]
m[1,idx1]<-cubes[[i]]$cubeB[1,idx1]
m[2,idx2]<-cubes[[i]]$cubeB[2,idx2]
}
for (i in c(iM+1,length(cubes))) {
if (cubes[[i]]$cube[1,1]==m[1,1]) cubes[[i]]$cube[1,1]<- -Inf
if (cubes[[i]]$cube[1,2]==m[1,2]) cubes[[i]]$cube[1,2]<- -Inf
if (cubes[[i]]$cube[2,1]==m[2,1]) cubes[[i]]$cube[2,1]<- Inf
if (cubes[[i]]$cube[2,2]==m[2,2]) cubes[[i]]$cube[2,2]<- Inf
}
return(cubes)
})
|
/discretizeGaussian/R/discretize.R
|
permissive
|
relund/discretizeNormal
|
R
| false
| false
| 35,703
|
r
|
###########################################################################/**
# @RdocClass Discretize
#
# @title "Discretize class"
#
# \description{
# Containing all methods related to discritizing an uni/bi-variate normal distribution. Both uniform and nonuniform discretization possible.
# @classhierarchy
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Not used.}
# }
#
# \section{Fields and Methods}{
# @allmethods ""
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
# \references{
# [1] Nielsen, L.R.; Jørgensen, E. & Højsgaard, S. Embedding a state space model into a Markov decision process Dept. of Genetics and Biotechnology, Aarhus University, 2008. \cr
# }
#
# @author
#*/###########################################################################
setConstructorS3("Discretize", function(...)
{
extend(Object(), "Discretize"
)
})
#########################################################################/**
# @RdocMethod volCube
#
# @title "Volume/length of cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds (columnwise) (bivariate case) or vector of length 2 (univariate case). }
# \item{...}{Not used.}
# }
#
# @author
#
# \references{
# Based on
# \emph{Kozlov, A. & Koller, D. Nonuniform dynamic discretization in hybrid networks The Thirteenth Conference on Uncertainty in Artificial Intelligence (UAI-97), 1997, 314-325 }}
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("volCube", "Discretize", function(this, cube, ...){
return(prod(cube[2,]-cube[1,]))
})
#########################################################################/**
# @RdocMethod klBound1D
#
# @title "Upper bound on KL distance on a 1D cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is vector of length 2 containing the upper and lower bounds (univariate case). }
# \item{mu}{ The mean. }
# \item{sigma2}{ The variance. }
# \item{...}{ Not used. }
# }
#
# \value{
# @get "title".
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("klBound1D", "Discretize", function(this,cube,mu,sigma2, ...){
if (cube[1,1]<= -Inf) cube[1,1]<-mu-2.5*sqrt(sigma2)
if (cube[2,1]>= Inf) cube[2,1]<-mu+2.5*sqrt(sigma2)
b<-this$bounds1D(cube,mu,sigma2)
return(((b$max-b$mean)/(b$max-b$min)*b$min*log(b$min/b$mean)+(b$mean-b$min)/(b$max-b$mean)*b$max*log(b$max/b$mean))*this$volCube(cube))
}, private=TRUE)
#########################################################################/**
# @RdocMethod klBound2D
#
# @title "Upper bound on KL distance on a 2D cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{mu}{ The mean. }
# \item{sigma}{ The covariate matrix. }
# \item{...}{Not used.}
# }
#
# \value{
# @get "title".
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("klBound2D", "Discretize", function(this,cube,mu,sigma, ...){
b<-this$bounds2D(cube,mu,sigma)
return(((b$max-b$mean)/(b$max-b$min)*b$min*log(b$min/b$mean)+(b$mean-b$min)/(b$max-b$mean)*b$max*log(b$max/b$mean))*this$volCube(cube))
})
#########################################################################/**
# @RdocMethod bounds1D
#
# @title "Min, mean and max density on a 1D cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{mu}{ The mean. }
# \item{sigma2}{ The variance. }
# \item{len}{ The number of samples of each coordinate.}
# \item{...}{Not used.}
# }
#
# \value{
# @get "title".
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("bounds1D", "Discretize", function(this, cube,mu,sigma2,len=100, ...){
tmp<-matrix(NA,len,length(cube[1,]))
for (i in 1:length(cube[1,])) {
tmp[,i]<-seq(cube[1,i],cube[2,i],len=len)
}
g <- expand.grid(as.list(as.data.frame(tmp)))
f<-dnorm(g[,1],mu,sqrt(sigma2))
return(list(min=min(f),mean=mean(f),max=max(f)))
})
#########################################################################/**
# @RdocMethod bounds2D
#
# @title "Min, mean and max density on a 2D cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{mu}{ The mean. }
# \item{sigma}{ The covariate matrix. }
# \item{len}{ The number of samples of each coordinate.}
# \item{...}{Not used.}
# }
#
# \value{
# @get "title".
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("bounds2D", "Discretize", function(this, cube,mu,sigma,len=100, ...){
tmp<-matrix(NA,len,length(cube[1,]))
for (i in 1:length(cube[1,])) {
tmp[,i]<-seq(cube[1,i],cube[2,i],len=len)
}
g <- expand.grid(as.list(as.data.frame(tmp)))
f<-dmvnorm(g,mean=mu,sigma=sigma)
return(list(min=min(f),mean=mean(f),max=max(f)))
})
#########################################################################/**
# @RdocMethod ratio
#
# @title "Calc max divided by min density value"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{x}{ Values to calc. density for. }
# \item{mu}{ The mean. }
# \item{sigma}{ The covariate matrix. }
# \item{len}{ The number of samples of each coordinate.}
# \item{...}{Not used.}
# }
#
# \value{
# @get "title".
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("ratio", "Discretize", function(this, x,mu,sigma, ...){
f<-dmvnorm(x,mean=mu,sigma=sigma)
return(max(f)/min(f))
})
#########################################################################/**
# @RdocMethod direc
#
# @title "Finds the optimal (approximate) direcection to spilt a cube"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{mu}{ The mean. }
# \item{sigma}{ The covariate matrix. }
# \item{...}{Not used.}
# }
#
# \value{
# Return the variable index to split.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("direc", "Discretize", function(this, cube,mu,sigma, ...){
l<-cube[2,]-cube[1,] # length of variables in the cube
center<-cube[1,]+l/2 # cube center
idx<-0
maxV<- -Inf
for (i in 1:length(cube[1,])) {
tmp<-matrix(center,100,length(center),byrow=TRUE)
tmp[,i]<-seq(cube[1,i],cube[2,i],len=100)
rat<-this$ratio(tmp,mu,sigma)
if (rat>maxV) {idx<-i; maxV=rat}
}
return(idx)
})
#########################################################################/**
# @RdocMethod plotCubes
#
# @title "Plot the cubes (only bivariate distributions)"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{start}{ An cube used to set the plot area.}
# \item{colors}{ An integer vector of same length as the number of cubes used to give the cubes colors. The color is set by the integer value. }
# \item{...}{Further arguments passed to plot.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("plotCubes", "Discretize", function(this, cubes, start, colors, ...) {
plot(0,0,xlim=c(start[1,1],start[2,1]),ylim=c(start[1,2],start[2,2]),type="n",xlab="",ylab="", ...)
if (is.null(colors)) {
for (i in 1:length(cubes)) {
this$addCube(cubes[[i]]$cubeB)
}
} else {
for (i in 1:length(cubes)) {
this$addCubeCol(cubes[[i]]$cubeB,colors[i])
}
for (i in 1:length(cubes)) {
this$addCube(cubes[[i]]$cubeB)
}
}
})
#########################################################################/**
# @RdocMethod addCube
#
# @title "Adds a 2D cube to the plot"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{col}{ Color of the lines. }
# \item{...}{Not used.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("addCube", "Discretize", function(this, cube,col="black", ...) {
lines(c(cube[1,1],cube[1,1],cube[2,1],cube[2,1],cube[1,1]),c(cube[1,2],cube[2,2],cube[2,2],cube[1,2],cube[1,2]),col=col)
})
#########################################################################/**
# @RdocMethod addCubeCol
#
# @title "Adds a 2D cube with color to the plot"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cube}{ The cube under consideration which is a (2x2) matrix containing the bounds of the variables (columnwise) (bivariate case). }
# \item{color}{ Color of the cube. }
# \item{...}{Not used.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("addCubeCol", "Discretize", function(this, cube,color=NULL, ...) {
rect(cube[1,1], cube[1,2], cube[2,1], cube[2,2], col = color ,border="black")
})
#########################################################################/**
# @RdocMethod addPoints
#
# @title "Adds center points to the plot"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{...}{Not used.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("addPoints", "Discretize", function(this, cubes, ...) {
x<-y<-NULL
for (i in 1:length(cubes)) {
cube<-cubes[[i]]$center
x<-c(x,cube[1])
y<-c(y,cube[2])
}
points(x,y,pch=".")
})
#########################################################################/**
# @RdocMethod addIdx
#
# @title "Add cube index to the plot"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{...}{Not used.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("addIdx", "Discretize", function(this, cubes, ...) {
x<-y<-idx<-NULL
for (i in 1:length(cubes)) {
cube<-cubes[[i]]$center
x<-c(x,cube[1])
y<-c(y,cube[2])
idx<-c(idx,i-1)
}
text(x,y,labels=paste(1:length(cubes)-1,sep=""))
})
#########################################################################/**
# @RdocMethod addText
#
# @title "Add text to the plot"
#
# \description{
# Internal function.
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{text}{ Text to be added to each hypercube.}
# \item{...}{Not used.}
# }
#
# \value{
# NULL
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @visibility "private"
#
#*/#########################################################################
setMethodS3("addText", "Discretize", function(this, cubes, text, ...) {
x<-y<-yield<-NULL
for (i in 1:length(cubes)) {
cube<-cubes[[i]]$center
x<-c(x,cube[1])
y<-c(y,cube[2])
}
text(x,y,labels=text)
})
#########################################################################/**
# @RdocMethod discretize1DUnifEqLth
#
# @title "Discretize a normal distribution such that intervals have equal length"
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{mu}{ The mean. }
# \item{sigma2}{ The variance. }
# \item{n}{ Number of intervals. }
# \item{asDF}{ Return result as a data frame. If false return matrix. }
# \item{...}{Not used.}
# }
#
# \value{
# A list of intervals (data frame if \code{asDF = TRUE}).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize1DUnifEqLth", "Discretize", function(this, mu, sigma2,
n, asDF=TRUE, ...)
{
lgd<-c(mu-2.5*sqrt(sigma2),mu+2.5*sqrt(sigma2)) # bounds used
lgdInvX<-diff(lgd)/n # length in each interval
dat<-data.frame(center=NA,min=NA,max=NA,idxA=1:n-1)
minX<-lgd[1]
for (i in 1:n) {
dat$min[i]<-minX
dat$center[i]<-minX+lgdInvX/2
dat$max[i]<-minX+lgdInvX
minX<-minX+lgdInvX
}
dat$min[1]<- -Inf
dat$max[nrow(dat)]<-Inf
if (!asDF) return(as.matrix(dat))
return(dat)
})
#########################################################################/**
# @RdocMethod discretize1DUnifEqProb
#
# @title "Discretize a normal distribution such that intervals have equal probability"
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{mu}{ The mean. }
# \item{sigma2}{ The variance. }
# \item{n}{ Number of intervals. }
# \item{asDF}{ Return result as a data frame. If false return matrix. }
# \item{...}{Not used.}
# }
#
# \value{
# A list of intervals (data frame if \code{asDF = TRUE}).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize1DUnifEqProb", "Discretize", function(this, mu, sigma2,
n, asDF=TRUE, ...)
{
pX<-1/n # prob in each interval
#xB<-c(mu-3*sqrt(sigma2),mu+3*sqrt(sigma2)) # bounds used
q<-0; meanX<-NULL
x<- -Inf
for (i in 1:(n-1)) {
x<-c(x,qnorm(pX*i,mu,sqrt(sigma2)))
if (i==1) {
meanX<-c(meanX,mu-sigma2*(dnorm(x[i+1],mu,sqrt(sigma2)))/pX)
} else {
meanX<-c(meanX,mu-sigma2*(dnorm(x[i+1],mu,sqrt(sigma2))-dnorm(x[i],mu,sqrt(sigma2)))/pX)
}
}
x<-c(x,Inf)
meanX<-c(meanX,mu-sqrt(sigma2)^2*(0-dnorm(x[i+1],mu,sqrt(sigma2)))/pX)
elements<-vector("list", 2) # empty list of maxIte
queue<-list(elements=elements)
for (i in 1:(length(x)-1)) {
cube<-matrix(c(x[i],x[i+1]),2,1)
center<-meanX[i]
element<-list(center=center,cube=cube)
queue$elements[[i]]<-element
queue$elements[[i]]<-element
}
for (i in 1:length(queue$elements)) {
queue$elements[[i]]$idxA<- i-1
}
KL<-0
for (i in 1:length(queue$elements)) {
KL<-KL+this$klBound1D(queue$elements[[i]]$cube,mu,sigma2)
}
cat(" KL-bound:",KL,"\n")
if (!asDF) return(queue$elements)
dF<-NULL
for (i in 1:(length(x)-1)) {
tmp1<-queue$elements[[i]]$cube
tmp2<-queue$elements[[i]]$center
tmp3<-queue$elements[[i]]$idxA
dF<-rbind(dF,c(center=tmp2,min=tmp1[1,1],max=tmp1[2,1],idxA=tmp3))
}
rownames(dF)<-1:(length(x)-1)
return(as.data.frame(dF))
})
#########################################################################/**
# @RdocMethod discretize1DVec
#
# @title "Discretize the real numbers according to a set of center points"
#
# \description{
# @get "title". Create intervals with center points as given in the argument.
# }
#
# @synopsis
#
# \arguments{
# \item{v}{ A vector of center points. }
# \item{inf}{ Value used for infinity. }
# \item{mInf}{ Value used for minus infinity. }
# \item{asDF}{ Return result as a data frame. If false return matrix. }
# \item{...}{Not used.}
# }
#
# \value{
# A list of intervals (data frame if \code{asDF = TRUE}).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize1DVec", "Discretize", function(this, v, inf=Inf, mInf=-inf, asDF=TRUE, ...)
{
v<-sort(v)
dat<-data.frame(center=v,min=NA,max=NA)
for (i in 1:length(v)) {
if (i==1) dat$min[i]<- mInf
else dat$min[i]<-dat$center[i]-(dat$center[i]-dat$center[i-1])/2
if (i==length(v)) dat$max[i]<-inf
else dat$max[i]<-dat$center[i]+(dat$center[i+1]-dat$center[i])/2
}
if (!asDF) return(as.matrix(dat))
return(dat)
})
#########################################################################/**
# @RdocMethod discretize2DNonunif
#
# @title "Discretize a bivariate normal distribution using a non-uniform discretization "
#
# \description{
# Discretize a bivariate normal distribution into hypercubes (squares)
# such that the approximation have a certain Kulback Libler (KL) distance.
# }
#
# @synopsis
#
# \arguments{
# \item{mu}{ The mean (2-dim vector). }
# \item{sigma}{ The covariance (2x2 matrix). }
# \item{maxKL}{ Max KL distance. }
# \item{maxIte}{ Max number of iterations. }
# \item{modifyCenter}{ If no don't split the cubes around the mean center. If "split1" split the 4 cubes around the mean into 9 squares such that the mean is the center of a cube. If "split2" first add cubes such that the axis of the mean always in the center of the cubes. }
# \item{split}{ Only used if modifyCenter = "split2" to set the size of the nine cubes around the mean. }
# \item{...}{Not used.}
# }
#
# \value{
# A list where each element describe the cube and contains: KL - an upper bound on the KL-distance, cube - the bounds, center - the center, idxM - the index, cubeB - the fixed bounds (used for plotting).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize2DNonunif", "Discretize", function(this, mu, sigma,
maxKL=0.5, maxIte=500, modifyCenter="no", split=0.25, ...)
{
xB<-c(mu[1]-2.5*sqrt(sigma[1,1]),mu[1]+2.5*sqrt(sigma[1,1]))
yB<-c(mu[2]-2.5*sqrt(sigma[2,2]),mu[2]+2.5*sqrt(sigma[2,2]))
cube0<-cube<-matrix(c(xB[1],xB[2],yB[1],yB[2]),2,2)
if (modifyCenter!="split2") {
KL<-this$klBound2D(cube,mu,sigma)
element<-list(KL=KL,cube=cube) # the first element in the queue
elements<-vector("list", 2) # empty list of 2 elements
elements[[1]]<-element
queue<-list(maxIdx=1, lastIdx=1, KL=KL,elements=elements)
}
if (modifyCenter=="split2") {
# add nine cubes split around zero (numbered from topleft to bottomright)
x<-c(mu[1]-split*sqrt(sigma[1,1]),mu[1]+split*sqrt(sigma[1,1]))
y<-c(mu[2]-split*sqrt(sigma[2,2]),mu[2]+split*sqrt(sigma[2,2]))
cube<-list()
cube[[1]]<-matrix(c(xB[1],x[1],y[2],yB[2]),2,2)
cube[[2]]<-matrix(c(x[1],x[2],y[2],yB[2]),2,2)
cube[[3]]<-matrix(c(x[2],xB[2],y[2],yB[2]),2,2)
cube[[4]]<-matrix(c(xB[1],x[1],y[1],y[2]),2,2)
cube[[5]]<-matrix(c(x[1],x[2],y[1],y[2]),2,2) # the center cube
cube[[6]]<-matrix(c(x[2],xB[2],y[1],y[2]),2,2)
cube[[7]]<-matrix(c(xB[1],x[1],yB[1],y[1]),2,2)
cube[[8]]<-matrix(c(x[1],x[2],yB[1],y[1]),2,2)
cube[[9]]<-matrix(c(x[2],xB[2],yB[1],y[1]),2,2)
elements<-list() # empty list
KL<-maxI<-Max<-0
for (i in 1:9) {
cubeKL<-this$klBound2D(cube[[i]],mu,sigma)
if (cubeKL>Max) {
Max<-cubeKL
maxI<-i
}
KL<-KL+cubeKL
element<-list(KL=cubeKL,cube=cube[[i]])
elements[[i]]<-element
}
queue<-list(maxIdx=maxI, lastIdx=9, KL=KL,elements=elements)
}
ite<-1
while (queue$KL>maxKL & ite<maxIte){
maxIdx<-queue$maxIdx
#cat("Total KL = ",queue$KL,"\n")
KL<-queue$KL-queue$elements[[maxIdx]]$KL
cube<-queue$elements[[maxIdx]]$cube
#cat("Split cube:\n"); print(cube)
splitIdx<-this$direc(cube,mu,sigma)
#cat("Split variable number ",splitIdx,"\n")
split<-cube[1,splitIdx]+(cube[2,splitIdx]-cube[1,splitIdx])/2
cube1<-cube2<-cube
cube1[2,splitIdx]<-split
cube2[1,splitIdx]<-split
KL1<-this$klBound2D(cube1,mu,sigma)
KL2<-this$klBound2D(cube2,mu,sigma)
queue$KL<-KL+KL1+KL2
element1<-list(KL=KL1,cube=cube1)
element2<-list(KL=KL2,cube=cube2)
queue$elements[[maxIdx]]<-element1
queue$lastIdx<-queue$lastIdx+1
queue$elements[[queue$lastIdx]]<-element2
#cat("The two new elements:\n"); print(element1); print(element2);
maxVal<- -Inf;
for (i in 1:queue$lastIdx) {
if (queue$elements[[i]]$KL>maxVal) {
maxIdx<-i; maxVal<-queue$elements[[i]]$KL
}
}
queue$maxIdx<-maxIdx; ite<-ite+1
}
if (modifyCenter=="split1") {
# split the 4 cubes close to mu such that mu becomes the center of a cube
idx<-NULL
for (i in 1:queue$lastIdx) { # first find cubes
if (queue$elements[[i]]$cube[1,1]==mu[1] | queue$elements[[i]]$cube[2,1]==mu[1]) {
if (queue$elements[[i]]$cube[1,2]==mu[2] | queue$elements[[i]]$cube[2,2]==mu[2]) {
idx<-c(idx,i)
}
}
}
maxY=maxX=-Inf
minY=minX=Inf
for (i in idx) {
maxX=max(maxX,queue$elements[[i]]$cube[2,1])
maxY=max(maxY,queue$elements[[i]]$cube[2,2])
minX=min(minX,queue$elements[[i]]$cube[1,1])
minY=min(minY,queue$elements[[i]]$cube[1,2])
queue$KL<-queue$KL-queue$elements[[i]]$KL
}
difX=(maxX-minX)/3
difY=(maxY-minY)/3
for (i in 0:2) {
for (j in 0:2) {
x=c(minX+i*difX,minX+(i+1)*difX)
y=c(minY+j*difY,minY+(j+1)*difY)
cube<-matrix(c(x[1],x[2],y[1],y[2]),2,2)
KL<-this$klBound2D(cube,mu,sigma)
element<-list(KL=KL,cube=cube)
if (!is.null(idx)) { # if still some idx to change
queue$elements[[idx[1]]]<-element
if(length(idx)>1) {
idx<-idx[2:length(idx)]
} else {
idx<-NULL
}
} else {
queue$lastIdx<-queue$lastIdx+1
queue$elements[[queue$lastIdx]]<-element
}
queue$KL<-queue$KL+KL
}
}
}
# find center
for (i in 1:queue$lastIdx) {
cube<-queue$elements[[i]]$cube
x<-cube[1,1]+(cube[2,1]-cube[1,1])/2
y<-cube[1,2]+(cube[2,2]-cube[1,2])/2
queue$elements[[i]]$center<-c(x,y)
}
# set index
for (i in 1:queue$lastIdx) {
queue$elements[[i]]$idxM<- i-1
}
# remove borders (the one with borders saved in cubeB)
cubes<-queue$elements
m<-matrix(c(Inf,-Inf,Inf,-Inf),nrow=2,ncol=2)
for (i in 1:length(cubes)) { # min and max values, i.e. borders
idx1<-cubes[[i]]$cube[1,]<m[1,]
idx2<-cubes[[i]]$cube[2,]>m[2,]
m[1,idx1]<-cubes[[i]]$cube[1,idx1]
m[2,idx2]<-cubes[[i]]$cube[2,idx2]
}
for (i in 1:length(cubes)) {
cubes[[i]]$cubeB<-cubes[[i]]$cube
if (cubes[[i]]$cube[1,1]==m[1,1]) cubes[[i]]$cube[1,1]<- -Inf
if (cubes[[i]]$cube[1,2]==m[1,2]) cubes[[i]]$cube[1,2]<- -Inf
if (cubes[[i]]$cube[2,1]==m[2,1]) cubes[[i]]$cube[2,1]<- Inf
if (cubes[[i]]$cube[2,2]==m[2,2]) cubes[[i]]$cube[2,2]<- Inf
}
cat("Total KL = ",queue$KL,"\n")
return(cubes)
})
#########################################################################/**
# @RdocMethod discretize2DUnifEqInv
#
# @title "Discretize a bivariate normal distribution using a uniform discretization with intervals of equal length "
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{mu}{ The mean (2-dim vector). }
# \item{sigma}{ The covariance (2x2 matrix). }
# \item{lgdX}{ Number for intervals of x coordinate. }
# \item{lgdY}{ Number for intervals of y coordinate. }
# \item{...}{Not used.}
# }
#
# \value{
# A list where each element describe the cube and contains: KL - an upper bound on the KL-distance, cube - the bounds, center - the center, idxM - the index, cubeB - the fixed bounds (used for plotting).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize2DUnifEqInv", "Discretize", function(this, mu, sigma, lgdX, lgdY, ...){
xB<-c(mu[1]-2.5*sqrt(sigma[1,1]),mu[1]+2.5*sqrt(sigma[1,1]))
yB<-c(mu[2]-2.5*sqrt(sigma[2,2]),mu[2]+2.5*sqrt(sigma[2,2]))
cube0<-cube<-matrix(c(xB[1],xB[2],yB[1],yB[2]),2,2)
x<-seq(xB[1],xB[2],length=lgdX+1)
y<-seq(yB[1],yB[2],length=lgdY+1)
g <- expand.grid(x = x, y = y)
z<-matrix(dmvnorm(g, mu, sigma),lgdX+1,lgdY+1)
elements<-vector("list", 2) # empty list od two elements
queue<-list(maxIdx=NA, lastIdx=1, KL=0,elements=elements)
for (i in 1:(length(x)-1)) {
for (j in 1:(length(y)-1)) {
cube<-matrix(c(x[i],x[i+1],
y[j],y[j+1]),2,2)
KL<-this$klBound2D(cube,mu,sigma)
element<-list(KL=KL,cube=cube)
queue$KL=queue$KL+KL
queue$elements[[queue$lastIdx]]<-element
queue$lastIdx<-queue$lastIdx+1
}
}
queue$lastIdx<-queue$lastIdx-1
# calc center point
for (i in 1:queue$lastIdx) {
cube<-queue$elements[[i]]$cube
x<-cube[1,1]+(cube[2,1]-cube[1,1])/2
y<-cube[1,2]+(cube[2,2]-cube[1,2])/2
queue$elements[[i]]$center<-c(x,y)
}
# set index
for (i in 1:queue$lastIdx) {
queue$elements[[i]]$idxM <- i-1
}
# remove borders (the one with borders saved in cubeB)
cubes<-queue$elements
m<-matrix(c(Inf,-Inf,Inf,-Inf),nrow=2,ncol=2)
for (i in 1:length(cubes)) {
idx1<-cubes[[i]]$cube[1,]<m[1,]
idx2<-cubes[[i]]$cube[2,]>m[2,]
m[1,idx1]<-cubes[[i]]$cube[1,idx1]
m[2,idx2]<-cubes[[i]]$cube[2,idx2]
}
for (i in 1:length(cubes)) {
cubes[[i]]$cubeB<-cubes[[i]]$cube
if (cubes[[i]]$cube[1,1]==m[1,1]) cubes[[i]]$cube[1,1]<- -Inf
if (cubes[[i]]$cube[1,2]==m[1,2]) cubes[[i]]$cube[1,2]<- -Inf
if (cubes[[i]]$cube[2,1]==m[2,1]) cubes[[i]]$cube[2,1]<- Inf
if (cubes[[i]]$cube[2,2]==m[2,2]) cubes[[i]]$cube[2,2]<- Inf
}
cat("Total KL = ",queue$KL,"\n")
return(cubes)
})
#########################################################################/**
# @RdocMethod discretize2DUnifEqProb
#
# @title "Discretize a bivariate normal distribution using a uniform discretization with intervals of equal probability "
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{mu}{ The mean (2-dim vector). }
# \item{sigma}{ The covariance (2x2 matrix). }
# \item{lgdX}{ Number for intervals of x coordinate. }
# \item{lgdY}{ Number for intervals of y coordinate. }
# \item{...}{Not used.}
# }
#
# \value{
# A list where each element describe the cube and contains: KL - an upper bound on the KL-distance, cube - the bounds, center - the center, idxM - the index, cubeB - the fixed bounds (used for plotting).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("discretize2DUnifEqProb", "Discretize", function(this,mu,sigma,lgdX,lgdY, ...){
pX<-1/lgdX # prob in each interval
pY<-1/lgdY
xB<-c(mu[1]-2.5*sqrt(sigma[1,1]),mu[1]+2.5*sqrt(sigma[1,1])) # bounds used
yB<-c(mu[2]-2.5*sqrt(sigma[2,2]),mu[2]+2.5*sqrt(sigma[2,2]))
cube0<-cube<-matrix(c(xB[1],xB[2],yB[1],yB[2]),2,2)
q<-0; meanX<-NULL
x<-xB[1]
for (i in 1:(lgdX-1)) {
x<-c(x,qnorm(pX*i,mu[1],sqrt(sigma[1,1])))
if (i==1) {
meanX<-c(meanX,mu[1]-sqrt(sigma[1,1])^2*(dnorm(x[i+1],mu[1],sqrt(sigma[1,1])))/pX)
} else {
meanX<-c(meanX,mu[1]-sqrt(sigma[1,1])^2*(dnorm(x[i+1],mu[1],sqrt(sigma[1,1]))-dnorm(x[i],mu[1],sqrt(sigma[1,1])))/pX)
}
}
x<-c(x,xB[2])
i<-lgdX
meanX<-c(meanX,mu[1]-sqrt(sigma[1,1])^2*(0-dnorm(x[i],mu[1],sqrt(sigma[1,1])))/pX)
q<-0; meanY<-NULL
y<-yB[1]
for (i in 1:(lgdY-1)) {
y<-c(y,qnorm(pY*i,mu[2],sqrt(sigma[2,2])))
if (i==1) {
meanY<-c(meanY,mu[2]-sqrt(sigma[2,2])^2*(dnorm(y[i+1],mu[2],sqrt(sigma[2,2])))/pY)
} else {
meanY<-c(meanY,mu[2]-sqrt(sigma[2,2])^2*(dnorm(y[i+1],mu[2],sqrt(sigma[2,2]))-dnorm(y[i],mu[2],sqrt(sigma[2,2])))/pY)
}
}
y<-c(y,yB[2])
i<-lgdY
meanY<-c(meanY,mu[2]-sqrt(sigma[2,2])^2*(0-dnorm(y[i],mu[2],sqrt(sigma[2,2])))/pY)
g <- expand.grid(x = x, y = y)
m <- expand.grid(m1 = meanX, m2 = meanY)
z<-matrix(dmvnorm(g, mu, sigma),lgdX+1,lgdY+1)
elements<-vector("list", 2) # empty list of maxIte
queue<-list(maxIdx=NA, lastIdx=1, KL=0,elements=elements)
for (i in 1:(length(x)-1)) {
for (j in 1:(length(y)-1)) {
cube<-matrix(c(x[i],x[i+1],
y[j],y[j+1]),2,2)
KL<-this$klBound2D(cube,mu,sigma)
center<-c(meanX[i],meanY[j])
element<-list(KL=KL,cube=cube,center=center)
queue$KL=queue$KL+KL
queue$elements[[queue$lastIdx]]<-element
queue$lastIdx<-queue$lastIdx+1
}
}
queue$lastIdx<-queue$lastIdx-1
# calc center point
for (i in 1:queue$lastIdx) {
cube<-queue$elements[[i]]$cube
x<-cube[1,1]+(cube[2,1]-cube[1,1])/2
y<-cube[1,2]+(cube[2,2]-cube[1,2])/2
queue$elements[[i]]$center<-c(x,y)
}
# set index
for (i in 1:queue$lastIdx) {
queue$elements[[i]]$idxM<-i-1
}
# remove borders (the one with borders saved in cubeB)
cubes<-queue$elements
m<-matrix(c(Inf,-Inf,Inf,-Inf),nrow=2,ncol=2)
for (i in 1:length(cubes)) {
idx1<-cubes[[i]]$cube[1,]<m[1,]
idx2<-cubes[[i]]$cube[2,]>m[2,]
m[1,idx1]<-cubes[[i]]$cube[1,idx1]
m[2,idx2]<-cubes[[i]]$cube[2,idx2]
}
for (i in 1:length(cubes)) {
cubes[[i]]$cubeB<-cubes[[i]]$cube
if (cubes[[i]]$cube[1,1]==m[1,1]) cubes[[i]]$cube[1,1]<- -Inf
if (cubes[[i]]$cube[1,2]==m[1,2]) cubes[[i]]$cube[1,2]<- -Inf
if (cubes[[i]]$cube[2,1]==m[2,1]) cubes[[i]]$cube[2,1]<- Inf
if (cubes[[i]]$cube[2,2]==m[2,2]) cubes[[i]]$cube[2,2]<- Inf
}
cat("Total KL = ",queue$KL,"\n")
return(cubes)
})
#########################################################################/**
# @RdocMethod plotHypercubes
#
# @title "Plotting the discretization of a bivariate random normal variable "
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{text}{ Text to be added to each hypercube. Value 'center'
# show the center point, 'index' show the index of the cube and if text i a vector of same length as the number of cube plot the text. }
# \item{borders}{ Show the border of the hypercubes if true.}
# \item{colors}{ A integer vector of same length as the number of cubes used to give the cubes colors. The color is set by the integer value. }
# \item{...}{Not used.}
# }
#
# \value{
# A plot is produced.
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("plotHypercubes", "Discretize", function(this, cubes, text="center", borders=FALSE, colors=NULL, ...){
if (!is.null(colors)) {
if (length(colors)!=length(cubes)) stop("Argument colors must have length equal to the number of cubes")
}
m<-matrix(c(Inf,-Inf,Inf,-Inf),nrow=2,ncol=2)
for (i in 1:length(cubes)) {
idx1<-cubes[[i]]$cubeB[1,]<m[1,]
idx2<-cubes[[i]]$cubeB[2,]>m[2,]
m[1,idx1]<-cubes[[i]]$cubeB[1,idx1]
m[2,idx2]<-cubes[[i]]$cubeB[2,idx2]
}
cube0<-m
this$plotCubes(cubes,cube0,colors)
if (!borders) this$addCube(cube0,col="white")
if (length(text)==length(cubes)) {
this$addText(cubes,text)
} else {
if (text=="index") this$addText(cubes,1:length(cubes)-1)
if (text=="center") this$addPoints(cubes)
}
title(xlab=expression(m[1]),ylab=expression(m[2]))
cat(" Plotted", length(cubes), "cubes.\n")
invisible(NULL)
})
#########################################################################/**
# @RdocMethod splitCube2D
#
# @title "Split a cube further up "
#
# \description{
# @get "title"
# }
#
# @synopsis
#
# \arguments{
# \item{cubes}{ The list of hypercubes. }
# \item{mu}{ The mean (2-dim vector). }
# \item{sigma}{ The covariance (2x2 matrix). }
# \item{iM}{ Index of the cube that we want to split. }
# \item{...}{Not used.}
# }
#
# \value{
# A list where each element describe the cube and contains: KL - an upper bound on the KL-distance, cube - the bounds, center - the center, idxM - the index, cubeB - the fixed bounds (used for plotting).
# }
#
# @author
#
# \seealso{
# @seeclass
# }
#
# @examples "../RdocFiles/Discretize.Rex"
#
#*/#########################################################################
setMethodS3("splitCube2D", "Discretize", function(this, cubes, mu, sigma, iM, ...) {
# bounds the area
xB<-c(mu[1]-2.5*sqrt(sigma[1,1]),mu[1]+2.5*sqrt(sigma[1,1]))
yB<-c(mu[2]-2.5*sqrt(sigma[2,2]),mu[2]+2.5*sqrt(sigma[2,2]))
cube<-cubes[[iM+1]]$cubeB
#cat("Split cube:",maxIdx,"\n"); print(cube)
#cat("KL=",queue$elements[[maxIdx]]$KL,"\n")
splitIdx<-this$direc(cube,mu,sigma)
#cat("Split variable number ",splitIdx,"\n")
split<-cube[1,splitIdx]+(cube[2,splitIdx]-cube[1,splitIdx])/2
cube1<-cube2<-cube
cube1[2,splitIdx]<-split
cube2[1,splitIdx]<-split
KL1<-this$klBound2D(cube1,mu,sigma)
KL2<-this$klBound2D(cube2,mu,sigma)
element1<-list(KL=KL1,cube=cube1,center=NA,idxM=iM,cubeB=cube1)
element2<-list(KL=KL2,cube=cube2,center=NA,idxM=length(cubes),cubeB=cube2)
cubes[[iM+1]]<-element1
cubes[[length(cubes)+1]]<-element2
# find center
for (i in c(iM+1,length(cubes))) {
cube<-cubes[[i]]$cube
x<-cube[1,1]+(cube[2,1]-cube[1,1])/2
y<-cube[1,2]+(cube[2,2]-cube[1,2])/2
cubes[[i]]$center<-c(x,y)
}
# remove borders (the one with borders saved in cubeB)
m<-matrix(c(Inf,-Inf,Inf,-Inf),nrow=2,ncol=2)
for (i in 1:length(cubes)) { # min and max values, i.e. borders of the cube
idx1<-cubes[[i]]$cubeB[1,]<m[1,]
idx2<-cubes[[i]]$cubeB[2,]>m[2,]
m[1,idx1]<-cubes[[i]]$cubeB[1,idx1]
m[2,idx2]<-cubes[[i]]$cubeB[2,idx2]
}
for (i in c(iM+1,length(cubes))) {
if (cubes[[i]]$cube[1,1]==m[1,1]) cubes[[i]]$cube[1,1]<- -Inf
if (cubes[[i]]$cube[1,2]==m[1,2]) cubes[[i]]$cube[1,2]<- -Inf
if (cubes[[i]]$cube[2,1]==m[2,1]) cubes[[i]]$cube[2,1]<- Inf
if (cubes[[i]]$cube[2,2]==m[2,2]) cubes[[i]]$cube[2,2]<- Inf
}
return(cubes)
})
|
library(tidyverse)
library(readxl)
library(zoo)
##### Mapping to original file headers - ICS Report A
# NHSRegionSortOrder "region_index,",
# NHSRegion "region_name,",
# STPNameEngland "ics_name,",
# FiscalYearQtrLabel "fiscal_year,",
# Category "category,",
# SocialPrescribingreferralSTD_Weeks "ref_std,",
# SocialPrescribingreferralWeightedAverage "ref_rate,",
# SocialPrescribingreferralLowerCI "ref_ci_low,",
# SocialPrescribingreferralUpperCI "ref_ci_high,",
# SocialPrescribingDeclinedSTD_Weeks "dec_std",
# SocialPrescribingDeclinedWeightedAverage "dec_rate,",
# SocialPrescribingDeclinedLowerCI "dec_ci_low,",
# SocialPrescribingDeclinedUpperCI "dec_ci_high",
##### Mapping to original file headers - ICS Report C and D
# NHSRegionSortOrder "region_index",
# NHSRegion "region_name",
# STPNameEngland "ics_name",
# FiscalYearQtrLabel "fiscal_year",
# Category "category",
# IssuesrelatingtomentalhealthSTD_Weeks "mh_std",
# IssuesrelatingtomentalhealthWeightedAverage "mh_rate",
# IssuesrelatingtomentalhealthLowerCI "mh_ci_low",
# IssuesrelatingtomentalhealthUpperCI "mh_ci_high",
# IssuesrelatingtosubstancemisuseSTD_Weeks "subs_std",
# IssuesrelatingtosubstancemisuseWeightedAverage "subst_rate",
# IssuesrelatingtosubstancemisuseLowerCI "subst_ci_low",
# IssuesrelatingtosubstancemisuseUpperCI "subst_ci_high",
# IssuesrelatingtoemploymentSTD_Weeks "empl_std",
# IssuesrelatingtoemploymentWeightedAverage "empl_rate",
# IssuesrelatingtoemploymentLowerCI "empl_ci_low",
# IssuesrelatingtoemploymentUpperCI "empl_ci_high",
# IssuesrelatingtomoneySTD_Weeks "money_std",
# IssuesrelatingtomoneyWeightedAverage "money_rate",
# IssuesrelatingtomoneyLowerCI "money_ci_low",
# IssuesrelatingtomoneyUpperCI "money_ci_high",
# IssuesrelatingtomanagingalongtermconditionSTD_Weeks "ltc_std",
# IssuesrelatingtomanagingalongtermconditionWeightedAverage "ltc_rate",
# IssuesrelatingtomanagingalongtermconditionLowerCI "ltc_ci_low",
# IssuesrelatingtomanagingalongtermconditionUpperCI "ltc_ci_high",
# IssuesrelatingtoabuseSTD_Weeks "abuse_std",
# IssuesrelatingtoabuseWeightedAverage "abuse_rate",
# IssuesrelatingtoabuseLowerCI "abuse_ci_low",
# IssuesrelatingtoabuseUpperCI "abuse_ci_high",
# IssuesrelatingtohousingSTD_Weeks "housing_std",
# IssuesrelatingtohousingWeightedAverage "housing_rate",
# IssuesrelatingtohousingLowerCI "housing_ci_low",
# IssuesrelatingtohousingUpperCI "housing_ci_high",
# IssuesrelatingtoparentingSTD_Weeks "parent_std",
# IssuesrelatingtoparentingWeightedAverage "parent_rate",
# IssuesrelatingtoparentingLowerCI "parent_ci_low",
# IssuesrelatingtoparentingUpperCI "parent_ci_high",
# ReferralToBenefitsAgencySTD_Weeks "benefit_std",
# ReferralToBenefitsAgencyWeightedAverage "benefit_rate",
# ReferralToBenefitsAgencyLowerCI "benefit_ci_low",
# ReferralToBenefitsAgencyUpperCI "benefit_ci_high",
# ReferralToPhysicalActivityProgrammeSTD_Weeks "physical_rate",
# ReferralToPhysicalActivityProgrammeWeightedAverage "physical_rate",
# ReferralToPhysicalActivityProgrammeLowerCI "physical_ci_low",
# ReferralToPhysicalActivityProgrammeUpperCI "physical_ci_high",
# ReferralToArtsTherapyServicesSTD_Weeks "arts_std",
# ReferralToArtsTherapyServicesWeightedAverage "arts_rate",
# ReferralToArtsTherapyServicesLowerCI "arts_ci_low",
# ReferralToArtsTherapyServicesUpperCI "arts_ci_high",
# SocialPrescribingForMentalHealthSTD_Weeks "sp4mh_std",
# SocialPrescribingForMentalHealthWeightedAverage "sp4mh_rate",
# SocialPrescribingForMentalHealthLowerCI "sp4mh_ci_low",
# SocialPrescribingForMentalHealthUpperCI "sp4mh_ci_high",
# HealthEducationOfferedSTD_Weeks "heoffer_std",
# HealthEducationOfferedWeightedAverage "heoffer_rate",
# HealthEducationOfferedLowerCI "heoffer_ci_low",
# HealthEducationOfferedUpperCI "heoffer_ci_high",
##### Mapping to original file headers - NHSE_A report
# NHSRegionSortOrder region_index
# NHSRegion region_name
# STPNameEngland ics_name
# FiscalYearQtrLabel fiscal_year
# SocialPrescribingReferralAvWeeklyRate ref_rate
# SocialPrescribingReferralLowerCI ref_rate_ci_low
# SocialPrescribingReferralUpperCI ref_rate_ci_high
# SocialPrescribingDeclinedAvWeeklyRate dec_rate
# SocialPrescribingLowerCI dec_rate_ci_low
# SocialPrescribingUpperCI dec_rate_ci_high
# PersonalisedCareAndSupportPlanAgreedAvWeeklyRate pcsp_agreed_rate
# PersonalisedCareAndSupportPlanAgreedLowerCI pcsp_agree_ci_low
# PersonalisedCareAndSupportPlanAgreedUpperCI pcsp_agree_ci_high
# PersonalisedCareAndSupportPlanReviewedAvWeeklyRate pcsp_review_rate
# PersonalisedCareAndSupportPlanReviewedLowerCI pcsp_review_ci_low
# PersonalisedCareAndSupportPlanReviewedUpperCI pcsp_review_ci_high
##### Mapping to original file headers - NHSE_A2 report
# NHSRegionSortOrder "region_index",
# NHSRegion "region_name",
# FiscalYear "fiscal_year",
# Category "category",
# SocialPrescribingReferralSTD_Weeks "ref_std",
# SocialPrescribingReferralWeightedAverage "ref_rate",
# SocialPrescribingReferralLowerCI "ref_ci_low",
# SocialPrescribingReferralUpperCI "ref_ci_high",
# SocialPrescribingDeclinedSTD_Weeks "dec_std",
# SocialPrescribingDeclinedWeightedAverage "dec_rate",
# SocialPrescribingLowerCI "dec_ci_low",
# SocialPrescribingUpperCI "dec_ci_high",
# PersonalisedCareAndSupportPlanAgreedSTD_Weeks "pcsp_agree_rate",
# PersonalisedCareAndSupportPlanAgreedWeightedAverage "pcsp_agree_rate",
# PersonalisedCareAndSupportPlanAgreedLowerCI "pcsp_agree_ci_low",
# PersonalisedCareAndSupportPlanAgreedUpperCI "pcsp_agree_ci_high",
# PersonalisedCareAndSupportPlanReviewedSTD_Weeks "pcsp_review_std",
# PersonalisedCareAndSupportPlanReviewedWeightedAverage "pcsp_review_rate",
# PersonalisedCareAndSupportPlanReviewedLowerCI "pcsp_review_ci_low",
# PersonalisedCareAndSupportPlanReviewedUpperCI "pcsp_review_ci_high",
##### Mapping to original file headers - NHSE_B report
# NHSRegion "region_name",
# FiscalYearQtrLabel "fiscal_year",
# SocialPrescribingOfferedSTD_Weeks "sp_offered_std",
# SocialPrescribingOfferedWeightedAverage "sp_offered_rate",
# SocialPrescribingOfferedLowerCI "sp_offered_ci_low",
# SocialPrescribingOfferedUpperCI "sp_offered_ci_high",
# SocialprescribingsignpostingSTD_Weeks "sp_signpost_std",
# SocialprescribingsignpostingWeightedAverage "sp_signpost_rate",
# SocialprescribingsignpostingLowerCI "sp_signpost_ci_low",
# SocialprescribingsignpostingUpperCI "sp_signpost_ci_high",
# SocialPrescribingForMentalHealthSTD_Weeks "sp4mh_std",
# SocialPrescribingForMentalHealthWeightedAverage "sp4mh_rate",
# SocialPrescribingForMentalHealthLowerCI "sp4mh_ci_low",
# SocialPrescribingForMentalHealthUpperCI "sp4mh_ci_high",
# HealthcoachingreferralSTD_Weeks "hc_ref_std",
# HealthcoachingreferralWeightedAverage "hc_ref_rate",
# HealthcoachingreferralLowerCI "hc_ref_ci_low",
# HealthcoachingreferralUpperCI "hc_ref_ci_high",
# SeenbyhealthcoachSTD_Weeks "seenbyhc_std",
# SeenbyhealthcoachWeightedAverage "seenbyhc_rate",
# SeenbyhealthcoachLowerCI "seenbyhc_ci_low",
# SeenbyhealthcoachUpperCI "seenbyhc_ci_high",
# SeenbyhealthandwellbeingcoachSTD_Weeks "seenbyhcwbcoach_std",
# SeenbyhealthandwellbeingcoachWeightedAverage "seenbyhcwbcoach_rate",
# SeenbyhealthandwellbeingcoachLowerCI "seenbyhcwbcoach_ci_low",
# SeenbyhealthandwellbeingcoachUpperCI "seenbyhcwbcoach_ci_high",
# SeenbycarecoordinatorSTD_Weeks "seenbycc_std",
# SeenbycarecoordinatorWeightedAverage "seenbycc_rate",
# SeenbycarecoordinatorLowerCI "seenbycc_ci_low",
# SeenbycarecoordinatorUpperCI "seenbycc_ci_high",
# ShareddecisionmakingSTD_Weeks "shareddm_std",
# ShareddecisionmakingWeightedAverage "shareddm_rate",
# ShareddecisionmakingLowerCI "shareddm_ci_low",
# ShareddecisionmakingUpperCI "shareddm_ci_high",
# ShareddecisionmakingwithdecisionsupportSTD_Weeks "shareddm_supp_std",
# ShareddecisionmakingwithdecisionsupportWeightedAverage "shareddm_supp_rate",
# ShareddecisionmakingwithdecisionsupportLowerCI "shareddm_supp_ci_low",
# ShareddecisionmakingwithdecisionsupportUpperCI "shareddm_supp_ci_high",
# ShareddecisionmakingwithoutdecisionsupportSTD_Weeks "shareddm_wo_supp_std",
# ShareddecisionmakingwithoutdecisionsupportWeightedAverage "shareddm_wo_supp_rate",
# ShareddecisionmakingwithoutdecisionsupportLowerCI "shareddm_wo_supp_ci_low",
# ShareddecisionmakingwithoutdecisionsupportUpperCI "shareddm_wo_supp_ci_high",
# ShareddecisionmakingwithpatientdecisionaidSTD_Weeks "shareddm_patdec_std",
# ShareddecisionmakingwithpatientdecisionaidWeightedAverage "shareddm_patdec_rate",
# ShareddecisionmakingwithpatientdecisionaidLowerCI "shareddm_patdec_ci_low",
# ShareddecisionmakingwithpatientdecisionaidUpperCI "shareddm_patdec_ci_high",
# ShareddecisionmakingwithoutpatientdecisionaidSTD_Weeks "shareddm_wo_patdec_std",
# ShareddecisionmakingwithoutpatientdecisionaidWeightedAverage "shareddm_wo_patdec_rate",
# ShareddecisionmakingwithoutpatientdecisionaidLowerCI "shareddm_wo_patdec_ci_low",
# ShareddecisionmakingwithoutpatientdecisionaidUpperCI "shareddm_wo_patdec_ci_high",
# HaspersonalhealthbudgetSTD_Weeks "phbudget_std",
# HaspersonalhealthbudgetWeightedAverage "phbudget_rate",
# HaspersonalhealthbudgetLowerCI "phbudget_ci_low",
# HaspersonalhealthbudgetUpperCI "phbudget_ci_high",
##### Mapping to original file headers - NHSE_CD report
# NHSRegionSortOrder "region_index",
# NHSRegion "region_name",
# FiscalYear "fiscal_year",
# Category "category",
# IssuesrelatingtomentalhealthSTD_Weeks "mh_std",
# IssuesrelatingtomentalhealthWeightedAverage "mh_rate",
# IssuesrelatingtomentalhealthLowerCI "mh_ci_low",
# IssuesrelatingtomentalhealthUpperCI "mh_ci_high",
# IssuesrelatingtosubstancemisuseSTD_Weeks "subst_std",
# IssuesrelatingtosubstancemisuseWeightedAverage "subst_rate",
# IssuesrelatingtosubstancemisuseLowerCI "subst_ci_low",
# IssuesrelatingtosubstancemisuseUpperCI "subst_ci_high",
# IssuesrelatingtoemploymentSTD_Weeks "empl_std",
# IssuesrelatingtoemploymentWeightedAverage "empl_rate",
# IssuesrelatingtoemploymentLowerCI "empl_ci_low",
# IssuesrelatingtoemploymentUpperCI "empl_ci_high",
# IssuesrelatingtomoneySTD_Weeks "money_std",
# IssuesrelatingtomoneyWeightedAverage "money_rate",
# IssuesrelatingtomoneyLowerCI "money_ci_low",
# IssuesrelatingtomoneyUpperCI "money_ci_high",
# IssuesrelatingtomanagingalongtermconditionSTD_Weeks "ltc_std",
# IssuesrelatingtomanagingalongtermconditionWeightedAverage "ltc_rate",
# IssuesrelatingtomanagingalongtermconditionLowerCI "ltc_ci_low",
# IssuesrelatingtomanagingalongtermconditionUpperCI "ltc_ci_high",
# IssuesrelatingtoabuseSTD_Weeks "abuse_std",
# IssuesrelatingtoabuseWeightedAverage "abuse_rate",
# IssuesrelatingtoabuseLowerCI "abuse_ci_low",
# IssuesrelatingtoabuseUpperCI "abuse_ci_high",
# IssuesrelatingtohousingSTD_Weeks "housing_std",
# IssuesrelatingtohousingWeightedAverage "housing_rate",
# IssuesrelatingtohousingLowerCI "housing_ci_low",
# IssuesrelatingtohousingUpperCI "housing_ci_high",
# IssuesrelatingtoparentingSTD_Weeks "parent_std",
# IssuesrelatingtoparentingWeightedAverage "parent_rate",
# IssuesrelatingtoparentingLowerCI "parent_ci_low",
# IssuesrelatingtoparentingUpperCI "parent_ci_high",
# ReferralToBenefitsAgencySTD_Weeks "benefit_std",
# ReferralToBenefitsAgencyWeightedAverage "benefit_rate",
# ReferralToBenefitsAgencyLowerCI "benefit_ci_low",
# ReferralToBenefitsAgencyUpperCI "benefit_ci_high",
# ReferralToPhysicalActivityProgrammeSTD_Weeks "physical_std",
# ReferralToPhysicalActivityProgrammeWeightedAverage "physical_rate",
# ReferralToPhysicalActivityProgrammeLowerCI "physical_ci_low",
# ReferralToPhysicalActivityProgrammeUpperCI "physical_ci_high",
# ReferralToArtsTherapyServicesSTD_Weeks "arts_std",
# ReferralToArtsTherapyServicesWeightedAverage "arts_rate",
# ReferralToArtsTherapyServicesLowerCI "arts_ci_low",
# ReferralToArtsTherapyServicesUpperCI "arts_ci_high",
# SocialPrescribingForMentalHealthSTD_Weeks "sp4mh_std",
# SocialPrescribingForMentalHealthWeightedAverage "sp4mh_rate",
# SocialPrescribingForMentalHealthLowerCI "sp4mh_ci_low",
# SocialPrescribingForMentalHealthUpperCI "sp4mh_ci_high",
# HealthEducationOfferedSTD_Weeks "heoffer_std",
# HealthEducationOfferedWeightedAverage "heoffer_rate",
# HealthEducationOfferedLowerCI "heoffer_ci_low",
# HealthEducationOfferedUpperCI "heoffer_ci_high"
#####Short column definitions and data types
colnames_ICS_A = c("region_index", "region_name", "ics_name", "fiscal_year", "category", "ref_std", "ref_rate",
"ref_ci_low", "ref_ci_high", "dec_std", "dec_rate", "dec_ci_low", "dec_ci_high")
coltypes_ICS_A = c("numeric", "text", "text", "text", "text", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric")
colnames_ICS_CD = c("region_index", "region_name", "ics_name", "fiscal_year", "category", "mh_std", "mh_rate",
"mh_ci_low", "mh_ci_high", "subs_std", "subst_rate", "subst_ci_low", "subst_ci_high",
"empl_std", "empl_rate", "empl_ci_low", "empl_ci_high", "money_std", "money_rate",
"money_ci_low", "money_ci_high", "ltc_std", "ltc_rate", "ltc_ci_low", "ltc_ci_high",
"abuse_std", "abuse_rate", "abuse_ci_low", "abuse_ci_high", "housing_std", "housing_rate",
"housing_ci_low", "housing_ci_high", "parent_std", "parent_rate", "parent_ci_low",
"parent_ci_high", "benefit_std", "benefit_rate", "benefit_ci_low", "benefit_ci_high",
"physical_std", "physical_rate", "physical_ci_low", "physical_ci_high", "arts_std",
"arts_rate", "arts_ci_low", "arts_ci_high", "sp4mh_std", "sp4mh_rate", "sp4mh_ci_low",
"sp4mh_ci_high", "heoffer_std", "heoffer_rate", "heoffer_ci_low", "heoffer_ci_high")
coltypes_ICS_CD = c("numeric", "text", "text", "text", "text", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
colnames_NHSE_A1 = c("region_index", "region_name", "ics_name", "fiscal_year", "ref_std", "ref_rate",
"ref_ci_low", "ref_ci_high", "dec_std", "dec_rate", "dec_ci_low", "dec_ci_high", "pcsp_agree_std",
"pcsp_agree_rate", "pcsp_agree_ci_low", "pcsp_agree_ci_high", "pcsp_review_std",
"pcsp_review_rate", "pcsp_review_ci_low", "pcsp_review_ci_high")
coltypes_NHSE_A1 = c("numeric", "text", "text", "text", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric")
colnames_NHSE_A2 = c("region_index", "region_name", "fiscal_year", "category", "ref_std", "ref_rate",
"ref_ci_low", "ref_ci_high", "dec_std", "dec_rate", "dec_ci_low", "dec_ci_high", "pcsp_agree_std",
"pcsp_agree_rate", "pcsp_agree_ci_low", "pcsp_agree_ci_high", "pcsp_review_std",
"pcsp_review_rate", "pcsp_review_ci_low", "pcsp_review_ci_high")
coltypes_NHSE_A2 = c("numeric", "text", "text", "text", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric")
colnames_NHSE_B =c("region_name", "fiscal_year", "sp_offered_std", "sp_offered_rate", "sp_offered_ci_low",
"sp_offered_ci_high", "sp_signpost_std", "sp_signpost_rate", "sp_signpost_ci_low",
"sp_signpost_ci_high", "sp4mh_std", "sp4mh_rate", "sp4mh_ci_low", "sp4mh_ci_high", "hc_ref_std",
"hc_ref_rate", "hc_ref_ci_low", "hc_ref_ci_high", "seenbyhc_std", "seenbyhc_rate", "seenbyhc_ci_low",
"seenbyhc_ci_high", "seenbyhcwbcoach_std", "seenbyhcwbcoach_rate", "seenbyhcwbcoach_ci_low",
"seenbyhcwbcoach_ci_high", "seenbycc_std", "seenbycc_rate", "seenbycc_ci_low", "seenbycc_ci_high",
"shareddm_std", "shareddm_rate", "shareddm_ci_low", "shareddm_ci_high", "shareddm_supp_std",
"shareddm_supp_rate", "shareddm_supp_ci_low", "shareddm_supp_ci_high", "shareddm_wo_supp_std",
"shareddm_wo_supp_rate", "shareddm_wo_supp_ci_low", "shareddm_wo_supp_ci_high", "shareddm_patdec_std",
"shareddm_patdec_rate", "shareddm_patdec_ci_low", "shareddm_patdec_ci_high", "shareddm_wo_patdec_std",
"shareddm_wo_patdec_rate", "shareddm_wo_patdec_ci_low", "shareddm_wo_patdec_ci_high", "phbudget_std",
"phbudget_rate", "phbudget_ci_low", "phbudget_ci_high")
coltypes_NHSE_B = c("text", "text", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
colnames_NHSE_CD = c("region_index", "region_name", "fiscal_year", "category", "mh_std", "mh_rate", "mh_ci_low",
"mh_ci_high", "subst_std", "subst_rate", "subst_ci_low", "subst_ci_high", "empl_std", "empl_rate",
"empl_ci_low", "empl_ci_high", "money_std", "money_rate", "money_ci_low", "money_ci_high",
"ltc_std", "ltc_rate", "ltc_ci_low", "ltc_ci_high", "abuse_std", "abuse_rate", "abuse_ci_low",
"abuse_ci_high", "housing_std", "housing_rate", "housing_ci_low", "housing_ci_high", "parent_std",
"parent_rate", "parent_ci_low", "parent_ci_high", "benefit_std", "benefit_rate", "benefit_ci_low",
"benefit_ci_high", "physical_std", "physical_rate", "physical_ci_low", "physical_ci_high",
"arts_std", "arts_rate", "arts_ci_low", "arts_ci_high", "sp4mh_std", "sp4mh_rate",
"sp4mh_ci_low", "sp4mh_ci_high", "heoffer_std", "heoffer_rate", "heoffer_ci_low", "heoffer_ci_high")
coltypes_NHSE_CD = c("numeric", "text", "text", "text", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
#### 1. File import 2. Pre-processing splitting category
file="Social Prescribing Report_1.1.xlsx"
dfICSReport_A <-read_excel(file,"ICS report - Group A", col_names = colnames_ICS_A, col_types = coltypes_ICS_A, skip=1)
dfICSReport_A <- dfICSReport_A %>%
separate("category", c("cat","cat_val"),sep="[.]")
dfICSReport_CD <-read_excel(file,"ICS report - Group C and D", col_names = colnames_ICS_CD, col_types = coltypes_ICS_CD,skip = 1)
dfICSReport_CD <- dfICSReport_CD %>%
separate("category", c("cat","cat_val"),sep="[.]")
dfNHSEReport_A1 <-read_excel(file,"NHSE report - Group A", col_names = colnames_NHSE_A1, col_types = coltypes_NHSE_A1,skip = 1)
##dfNHSEReport_A1 <- dfNHSEReport_A1 %>%
##separate("category", c("cat","cat_val"),sep="[.]")
dfNHSEReport_A2 <-read_excel(file,"NHSE report - Group A ii", col_names = colnames_NHSE_A2, col_types = coltypes_NHSE_A2, skip = 1)
dfNHSEReport_A2 <- dfNHSEReport_A2 %>%
separate("category", c("cat","cat_val"),sep="[.]")
dfNHSEReport_B <-read_excel(file,"NHSE report - Group B", col_names = colnames_NHSE_B,col_types = coltypes_NHSE_B, skip = 1)
dfNHSEReport_CD <-read_excel(file,"NHSE report - Group C and D", col_names = colnames_NHSE_CD, col_types = coltypes_NHSE_CD, skip = 1)
dfNHSEReport_CD <- dfNHSEReport_CD %>%
separate("category", c("cat","cat_val"),sep="[.]")
# remove FY from Fiscal Year column
dfNHSEReport_A2$fiscal_year <- gsub("^.{0,2}", "", dfNHSEReport_A2$fiscal_year)
#Fix Qtr to get preferred format
dfNHSEReport_A1 <- dfNHSEReport_A1 %>%
separate("fiscal_year", c("fiscal_year.qtr","fiscal_year.year"))
dfNHSEReport_A1$fiscal_year.year <- gsub("^.{0,2}", "", dfNHSEReport_A1$fiscal_year.year)
dfNHSEReport_A1$qtr <- paste(dfNHSEReport_A1$fiscal_year.year,dfNHSEReport_A1$fiscal_year.qtr)
dfNHSEReport_A1$qtr <- as.yearqtr(dfNHSEReport_A1$qtr)
#Fix Qtr to get preferred format
dfICSReport_A <- dfICSReport_A %>%
separate("fiscal_year", c("fiscal_year.qtr","fiscal_year.year"))
dfICSReport_A$fiscal_year.year <- gsub("^.{0,2}", "", dfICSReport_A$fiscal_year.year)
dfICSReport_A$qtr <- paste(dfICSReport_A$fiscal_year.year,dfICSReport_A$fiscal_year.qtr)
dfICSReport_A$qtr <- as.yearqtr(dfICSReport_A$qtr)
#Fix Qtr to get preferred format
dfICSReport_CD <- dfICSReport_CD %>%
separate("fiscal_year", c("fiscal_year.qtr","fiscal_year.year"))
dfICSReport_CD$fiscal_year.year <- gsub("^.{0,2}", "", dfICSReport_CD$fiscal_year.year)
dfICSReport_CD$qtr <- paste(dfICSReport_CD$fiscal_year.year,dfICSReport_CD$fiscal_year.qtr)
dfICSReport_CD$qtr <- as.yearqtr(dfICSReport_CD$qtr)
## The following code creates a summarised dataframe for reporting ICS group CD report
dfNationalMean_socialneed <- dfICSReport_CD %>%
filter(cat=="Age") %>%
select(region_name, qtr, mh_rate, subst_rate, empl_rate, money_rate, ltc_rate, abuse_rate, housing_rate,
parent_rate, benefit_rate, physical_rate, arts_rate, sp4mh_rate, heoffer_rate) %>%
group_by( qtr) %>%
summarise(nat_mean_mh_rate= mean(mh_rate),
nat_mean_subst_rate = mean(subst_rate),
nat_mean_empl_rate = mean(empl_rate),
nat_mean_money_rate = mean(money_rate),
nat_mean_ltc_rate = mean(ltc_rate),
nat_mean_abuse_rate = mean(abuse_rate),
nat_mean_housing_rate = mean(housing_rate),
nat_mean_parent_rate = mean(parent_rate),
nat_mean_benefit_rate = mean(benefit_rate),
nat_mean_physical_rate = mean(physical_rate),
nat_mean_arts_rate = mean(arts_rate),
nat_mean_sp4mh_rate = mean(sp4mh_rate),
nat_mean_heoffer_rate = mean(heoffer_rate),
.groups = 'keep')
dfICSMean_socialneed <- dfICSReport_CD %>%
filter(cat=="Age") %>%
select(ics_name, qtr, mh_rate, subst_rate, empl_rate, money_rate, ltc_rate, abuse_rate, housing_rate,
parent_rate, benefit_rate, physical_rate, arts_rate, sp4mh_rate, heoffer_rate) %>%
group_by(ics_name, qtr)%>%
summarise(mean_mh_rate= mean(mh_rate),
mean_subst_rate = mean(subst_rate),
mean_empl_rate = mean(empl_rate),
mean_money_rate = mean(money_rate),
mean_ltc_rate = mean(ltc_rate),
mean_abuse_rate = mean(abuse_rate),
mean_housing_rate = mean(housing_rate),
mean_parent_rate = mean(parent_rate),
mean_benefit_rate = mean(benefit_rate),
mean_physical_rate = mean(physical_rate),
mean_arts_rate = mean(arts_rate),
mean_sp4mh_rate = mean(sp4mh_rate),
mean_heoffer_rate = mean(heoffer_rate),
.groups = 'keep')
mergedSociaNeed <- dfICSMean_socialneed %>% inner_join(dfNationalMean_socialneed)
## The following code creates a summarised dataframe for reporting ICS group A report
dfNationalMean_socialprescribing<- dfICSReport_A %>%
filter(cat=="Age") %>%
select(ics_name, qtr, ref_rate, dec_rate) %>%
group_by(qtr) %>%
summarise(nat_mean_ref_rate= mean(ref_rate),
nat_mean_dec_rate = mean(dec_rate),
.groups = 'keep')
dfICSMean_socialprescribing <- dfICSReport_A %>%
filter(cat=="Age") %>%
select(ics_name, qtr, ref_rate, dec_rate) %>%
group_by(ics_name, qtr)%>%
summarise(mean_ref_rate= mean(ref_rate),
mean_dec_rate = mean(dec_rate),
.groups = 'keep')
mergedSocialPrescribing <- dfICSMean_socialprescribing %>% inner_join(dfNationalMean_socialprescribing)
|
/ics-data-v2.R
|
permissive
|
orchid-database/ics-reports
|
R
| false
| false
| 25,795
|
r
|
library(tidyverse)
library(readxl)
library(zoo)
##### Mapping to original file headers - ICS Report A
# NHSRegionSortOrder "region_index,",
# NHSRegion "region_name,",
# STPNameEngland "ics_name,",
# FiscalYearQtrLabel "fiscal_year,",
# Category "category,",
# SocialPrescribingreferralSTD_Weeks "ref_std,",
# SocialPrescribingreferralWeightedAverage "ref_rate,",
# SocialPrescribingreferralLowerCI "ref_ci_low,",
# SocialPrescribingreferralUpperCI "ref_ci_high,",
# SocialPrescribingDeclinedSTD_Weeks "dec_std",
# SocialPrescribingDeclinedWeightedAverage "dec_rate,",
# SocialPrescribingDeclinedLowerCI "dec_ci_low,",
# SocialPrescribingDeclinedUpperCI "dec_ci_high",
##### Mapping to original file headers - ICS Report C and D
# NHSRegionSortOrder "region_index",
# NHSRegion "region_name",
# STPNameEngland "ics_name",
# FiscalYearQtrLabel "fiscal_year",
# Category "category",
# IssuesrelatingtomentalhealthSTD_Weeks "mh_std",
# IssuesrelatingtomentalhealthWeightedAverage "mh_rate",
# IssuesrelatingtomentalhealthLowerCI "mh_ci_low",
# IssuesrelatingtomentalhealthUpperCI "mh_ci_high",
# IssuesrelatingtosubstancemisuseSTD_Weeks "subs_std",
# IssuesrelatingtosubstancemisuseWeightedAverage "subst_rate",
# IssuesrelatingtosubstancemisuseLowerCI "subst_ci_low",
# IssuesrelatingtosubstancemisuseUpperCI "subst_ci_high",
# IssuesrelatingtoemploymentSTD_Weeks "empl_std",
# IssuesrelatingtoemploymentWeightedAverage "empl_rate",
# IssuesrelatingtoemploymentLowerCI "empl_ci_low",
# IssuesrelatingtoemploymentUpperCI "empl_ci_high",
# IssuesrelatingtomoneySTD_Weeks "money_std",
# IssuesrelatingtomoneyWeightedAverage "money_rate",
# IssuesrelatingtomoneyLowerCI "money_ci_low",
# IssuesrelatingtomoneyUpperCI "money_ci_high",
# IssuesrelatingtomanagingalongtermconditionSTD_Weeks "ltc_std",
# IssuesrelatingtomanagingalongtermconditionWeightedAverage "ltc_rate",
# IssuesrelatingtomanagingalongtermconditionLowerCI "ltc_ci_low",
# IssuesrelatingtomanagingalongtermconditionUpperCI "ltc_ci_high",
# IssuesrelatingtoabuseSTD_Weeks "abuse_std",
# IssuesrelatingtoabuseWeightedAverage "abuse_rate",
# IssuesrelatingtoabuseLowerCI "abuse_ci_low",
# IssuesrelatingtoabuseUpperCI "abuse_ci_high",
# IssuesrelatingtohousingSTD_Weeks "housing_std",
# IssuesrelatingtohousingWeightedAverage "housing_rate",
# IssuesrelatingtohousingLowerCI "housing_ci_low",
# IssuesrelatingtohousingUpperCI "housing_ci_high",
# IssuesrelatingtoparentingSTD_Weeks "parent_std",
# IssuesrelatingtoparentingWeightedAverage "parent_rate",
# IssuesrelatingtoparentingLowerCI "parent_ci_low",
# IssuesrelatingtoparentingUpperCI "parent_ci_high",
# ReferralToBenefitsAgencySTD_Weeks "benefit_std",
# ReferralToBenefitsAgencyWeightedAverage "benefit_rate",
# ReferralToBenefitsAgencyLowerCI "benefit_ci_low",
# ReferralToBenefitsAgencyUpperCI "benefit_ci_high",
# ReferralToPhysicalActivityProgrammeSTD_Weeks "physical_rate",
# ReferralToPhysicalActivityProgrammeWeightedAverage "physical_rate",
# ReferralToPhysicalActivityProgrammeLowerCI "physical_ci_low",
# ReferralToPhysicalActivityProgrammeUpperCI "physical_ci_high",
# ReferralToArtsTherapyServicesSTD_Weeks "arts_std",
# ReferralToArtsTherapyServicesWeightedAverage "arts_rate",
# ReferralToArtsTherapyServicesLowerCI "arts_ci_low",
# ReferralToArtsTherapyServicesUpperCI "arts_ci_high",
# SocialPrescribingForMentalHealthSTD_Weeks "sp4mh_std",
# SocialPrescribingForMentalHealthWeightedAverage "sp4mh_rate",
# SocialPrescribingForMentalHealthLowerCI "sp4mh_ci_low",
# SocialPrescribingForMentalHealthUpperCI "sp4mh_ci_high",
# HealthEducationOfferedSTD_Weeks "heoffer_std",
# HealthEducationOfferedWeightedAverage "heoffer_rate",
# HealthEducationOfferedLowerCI "heoffer_ci_low",
# HealthEducationOfferedUpperCI "heoffer_ci_high",
##### Mapping to original file headers - NHSE_A report
# NHSRegionSortOrder region_index
# NHSRegion region_name
# STPNameEngland ics_name
# FiscalYearQtrLabel fiscal_year
# SocialPrescribingReferralAvWeeklyRate ref_rate
# SocialPrescribingReferralLowerCI ref_rate_ci_low
# SocialPrescribingReferralUpperCI ref_rate_ci_high
# SocialPrescribingDeclinedAvWeeklyRate dec_rate
# SocialPrescribingLowerCI dec_rate_ci_low
# SocialPrescribingUpperCI dec_rate_ci_high
# PersonalisedCareAndSupportPlanAgreedAvWeeklyRate pcsp_agreed_rate
# PersonalisedCareAndSupportPlanAgreedLowerCI pcsp_agree_ci_low
# PersonalisedCareAndSupportPlanAgreedUpperCI pcsp_agree_ci_high
# PersonalisedCareAndSupportPlanReviewedAvWeeklyRate pcsp_review_rate
# PersonalisedCareAndSupportPlanReviewedLowerCI pcsp_review_ci_low
# PersonalisedCareAndSupportPlanReviewedUpperCI pcsp_review_ci_high
##### Mapping to original file headers - NHSE_A2 report
# NHSRegionSortOrder "region_index",
# NHSRegion "region_name",
# FiscalYear "fiscal_year",
# Category "category",
# SocialPrescribingReferralSTD_Weeks "ref_std",
# SocialPrescribingReferralWeightedAverage "ref_rate",
# SocialPrescribingReferralLowerCI "ref_ci_low",
# SocialPrescribingReferralUpperCI "ref_ci_high",
# SocialPrescribingDeclinedSTD_Weeks "dec_std",
# SocialPrescribingDeclinedWeightedAverage "dec_rate",
# SocialPrescribingLowerCI "dec_ci_low",
# SocialPrescribingUpperCI "dec_ci_high",
# PersonalisedCareAndSupportPlanAgreedSTD_Weeks "pcsp_agree_rate",
# PersonalisedCareAndSupportPlanAgreedWeightedAverage "pcsp_agree_rate",
# PersonalisedCareAndSupportPlanAgreedLowerCI "pcsp_agree_ci_low",
# PersonalisedCareAndSupportPlanAgreedUpperCI "pcsp_agree_ci_high",
# PersonalisedCareAndSupportPlanReviewedSTD_Weeks "pcsp_review_std",
# PersonalisedCareAndSupportPlanReviewedWeightedAverage "pcsp_review_rate",
# PersonalisedCareAndSupportPlanReviewedLowerCI "pcsp_review_ci_low",
# PersonalisedCareAndSupportPlanReviewedUpperCI "pcsp_review_ci_high",
##### Mapping to original file headers - NHSE_B report
# NHSRegion "region_name",
# FiscalYearQtrLabel "fiscal_year",
# SocialPrescribingOfferedSTD_Weeks "sp_offered_std",
# SocialPrescribingOfferedWeightedAverage "sp_offered_rate",
# SocialPrescribingOfferedLowerCI "sp_offered_ci_low",
# SocialPrescribingOfferedUpperCI "sp_offered_ci_high",
# SocialprescribingsignpostingSTD_Weeks "sp_signpost_std",
# SocialprescribingsignpostingWeightedAverage "sp_signpost_rate",
# SocialprescribingsignpostingLowerCI "sp_signpost_ci_low",
# SocialprescribingsignpostingUpperCI "sp_signpost_ci_high",
# SocialPrescribingForMentalHealthSTD_Weeks "sp4mh_std",
# SocialPrescribingForMentalHealthWeightedAverage "sp4mh_rate",
# SocialPrescribingForMentalHealthLowerCI "sp4mh_ci_low",
# SocialPrescribingForMentalHealthUpperCI "sp4mh_ci_high",
# HealthcoachingreferralSTD_Weeks "hc_ref_std",
# HealthcoachingreferralWeightedAverage "hc_ref_rate",
# HealthcoachingreferralLowerCI "hc_ref_ci_low",
# HealthcoachingreferralUpperCI "hc_ref_ci_high",
# SeenbyhealthcoachSTD_Weeks "seenbyhc_std",
# SeenbyhealthcoachWeightedAverage "seenbyhc_rate",
# SeenbyhealthcoachLowerCI "seenbyhc_ci_low",
# SeenbyhealthcoachUpperCI "seenbyhc_ci_high",
# SeenbyhealthandwellbeingcoachSTD_Weeks "seenbyhcwbcoach_std",
# SeenbyhealthandwellbeingcoachWeightedAverage "seenbyhcwbcoach_rate",
# SeenbyhealthandwellbeingcoachLowerCI "seenbyhcwbcoach_ci_low",
# SeenbyhealthandwellbeingcoachUpperCI "seenbyhcwbcoach_ci_high",
# SeenbycarecoordinatorSTD_Weeks "seenbycc_std",
# SeenbycarecoordinatorWeightedAverage "seenbycc_rate",
# SeenbycarecoordinatorLowerCI "seenbycc_ci_low",
# SeenbycarecoordinatorUpperCI "seenbycc_ci_high",
# ShareddecisionmakingSTD_Weeks "shareddm_std",
# ShareddecisionmakingWeightedAverage "shareddm_rate",
# ShareddecisionmakingLowerCI "shareddm_ci_low",
# ShareddecisionmakingUpperCI "shareddm_ci_high",
# ShareddecisionmakingwithdecisionsupportSTD_Weeks "shareddm_supp_std",
# ShareddecisionmakingwithdecisionsupportWeightedAverage "shareddm_supp_rate",
# ShareddecisionmakingwithdecisionsupportLowerCI "shareddm_supp_ci_low",
# ShareddecisionmakingwithdecisionsupportUpperCI "shareddm_supp_ci_high",
# ShareddecisionmakingwithoutdecisionsupportSTD_Weeks "shareddm_wo_supp_std",
# ShareddecisionmakingwithoutdecisionsupportWeightedAverage "shareddm_wo_supp_rate",
# ShareddecisionmakingwithoutdecisionsupportLowerCI "shareddm_wo_supp_ci_low",
# ShareddecisionmakingwithoutdecisionsupportUpperCI "shareddm_wo_supp_ci_high",
# ShareddecisionmakingwithpatientdecisionaidSTD_Weeks "shareddm_patdec_std",
# ShareddecisionmakingwithpatientdecisionaidWeightedAverage "shareddm_patdec_rate",
# ShareddecisionmakingwithpatientdecisionaidLowerCI "shareddm_patdec_ci_low",
# ShareddecisionmakingwithpatientdecisionaidUpperCI "shareddm_patdec_ci_high",
# ShareddecisionmakingwithoutpatientdecisionaidSTD_Weeks "shareddm_wo_patdec_std",
# ShareddecisionmakingwithoutpatientdecisionaidWeightedAverage "shareddm_wo_patdec_rate",
# ShareddecisionmakingwithoutpatientdecisionaidLowerCI "shareddm_wo_patdec_ci_low",
# ShareddecisionmakingwithoutpatientdecisionaidUpperCI "shareddm_wo_patdec_ci_high",
# HaspersonalhealthbudgetSTD_Weeks "phbudget_std",
# HaspersonalhealthbudgetWeightedAverage "phbudget_rate",
# HaspersonalhealthbudgetLowerCI "phbudget_ci_low",
# HaspersonalhealthbudgetUpperCI "phbudget_ci_high",
##### Mapping to original file headers - NHSE_CD report
# NHSRegionSortOrder "region_index",
# NHSRegion "region_name",
# FiscalYear "fiscal_year",
# Category "category",
# IssuesrelatingtomentalhealthSTD_Weeks "mh_std",
# IssuesrelatingtomentalhealthWeightedAverage "mh_rate",
# IssuesrelatingtomentalhealthLowerCI "mh_ci_low",
# IssuesrelatingtomentalhealthUpperCI "mh_ci_high",
# IssuesrelatingtosubstancemisuseSTD_Weeks "subst_std",
# IssuesrelatingtosubstancemisuseWeightedAverage "subst_rate",
# IssuesrelatingtosubstancemisuseLowerCI "subst_ci_low",
# IssuesrelatingtosubstancemisuseUpperCI "subst_ci_high",
# IssuesrelatingtoemploymentSTD_Weeks "empl_std",
# IssuesrelatingtoemploymentWeightedAverage "empl_rate",
# IssuesrelatingtoemploymentLowerCI "empl_ci_low",
# IssuesrelatingtoemploymentUpperCI "empl_ci_high",
# IssuesrelatingtomoneySTD_Weeks "money_std",
# IssuesrelatingtomoneyWeightedAverage "money_rate",
# IssuesrelatingtomoneyLowerCI "money_ci_low",
# IssuesrelatingtomoneyUpperCI "money_ci_high",
# IssuesrelatingtomanagingalongtermconditionSTD_Weeks "ltc_std",
# IssuesrelatingtomanagingalongtermconditionWeightedAverage "ltc_rate",
# IssuesrelatingtomanagingalongtermconditionLowerCI "ltc_ci_low",
# IssuesrelatingtomanagingalongtermconditionUpperCI "ltc_ci_high",
# IssuesrelatingtoabuseSTD_Weeks "abuse_std",
# IssuesrelatingtoabuseWeightedAverage "abuse_rate",
# IssuesrelatingtoabuseLowerCI "abuse_ci_low",
# IssuesrelatingtoabuseUpperCI "abuse_ci_high",
# IssuesrelatingtohousingSTD_Weeks "housing_std",
# IssuesrelatingtohousingWeightedAverage "housing_rate",
# IssuesrelatingtohousingLowerCI "housing_ci_low",
# IssuesrelatingtohousingUpperCI "housing_ci_high",
# IssuesrelatingtoparentingSTD_Weeks "parent_std",
# IssuesrelatingtoparentingWeightedAverage "parent_rate",
# IssuesrelatingtoparentingLowerCI "parent_ci_low",
# IssuesrelatingtoparentingUpperCI "parent_ci_high",
# ReferralToBenefitsAgencySTD_Weeks "benefit_std",
# ReferralToBenefitsAgencyWeightedAverage "benefit_rate",
# ReferralToBenefitsAgencyLowerCI "benefit_ci_low",
# ReferralToBenefitsAgencyUpperCI "benefit_ci_high",
# ReferralToPhysicalActivityProgrammeSTD_Weeks "physical_std",
# ReferralToPhysicalActivityProgrammeWeightedAverage "physical_rate",
# ReferralToPhysicalActivityProgrammeLowerCI "physical_ci_low",
# ReferralToPhysicalActivityProgrammeUpperCI "physical_ci_high",
# ReferralToArtsTherapyServicesSTD_Weeks "arts_std",
# ReferralToArtsTherapyServicesWeightedAverage "arts_rate",
# ReferralToArtsTherapyServicesLowerCI "arts_ci_low",
# ReferralToArtsTherapyServicesUpperCI "arts_ci_high",
# SocialPrescribingForMentalHealthSTD_Weeks "sp4mh_std",
# SocialPrescribingForMentalHealthWeightedAverage "sp4mh_rate",
# SocialPrescribingForMentalHealthLowerCI "sp4mh_ci_low",
# SocialPrescribingForMentalHealthUpperCI "sp4mh_ci_high",
# HealthEducationOfferedSTD_Weeks "heoffer_std",
# HealthEducationOfferedWeightedAverage "heoffer_rate",
# HealthEducationOfferedLowerCI "heoffer_ci_low",
# HealthEducationOfferedUpperCI "heoffer_ci_high"
#####Short column definitions and data types
colnames_ICS_A = c("region_index", "region_name", "ics_name", "fiscal_year", "category", "ref_std", "ref_rate",
"ref_ci_low", "ref_ci_high", "dec_std", "dec_rate", "dec_ci_low", "dec_ci_high")
coltypes_ICS_A = c("numeric", "text", "text", "text", "text", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric")
colnames_ICS_CD = c("region_index", "region_name", "ics_name", "fiscal_year", "category", "mh_std", "mh_rate",
"mh_ci_low", "mh_ci_high", "subs_std", "subst_rate", "subst_ci_low", "subst_ci_high",
"empl_std", "empl_rate", "empl_ci_low", "empl_ci_high", "money_std", "money_rate",
"money_ci_low", "money_ci_high", "ltc_std", "ltc_rate", "ltc_ci_low", "ltc_ci_high",
"abuse_std", "abuse_rate", "abuse_ci_low", "abuse_ci_high", "housing_std", "housing_rate",
"housing_ci_low", "housing_ci_high", "parent_std", "parent_rate", "parent_ci_low",
"parent_ci_high", "benefit_std", "benefit_rate", "benefit_ci_low", "benefit_ci_high",
"physical_std", "physical_rate", "physical_ci_low", "physical_ci_high", "arts_std",
"arts_rate", "arts_ci_low", "arts_ci_high", "sp4mh_std", "sp4mh_rate", "sp4mh_ci_low",
"sp4mh_ci_high", "heoffer_std", "heoffer_rate", "heoffer_ci_low", "heoffer_ci_high")
coltypes_ICS_CD = c("numeric", "text", "text", "text", "text", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
colnames_NHSE_A1 = c("region_index", "region_name", "ics_name", "fiscal_year", "ref_std", "ref_rate",
"ref_ci_low", "ref_ci_high", "dec_std", "dec_rate", "dec_ci_low", "dec_ci_high", "pcsp_agree_std",
"pcsp_agree_rate", "pcsp_agree_ci_low", "pcsp_agree_ci_high", "pcsp_review_std",
"pcsp_review_rate", "pcsp_review_ci_low", "pcsp_review_ci_high")
coltypes_NHSE_A1 = c("numeric", "text", "text", "text", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric")
colnames_NHSE_A2 = c("region_index", "region_name", "fiscal_year", "category", "ref_std", "ref_rate",
"ref_ci_low", "ref_ci_high", "dec_std", "dec_rate", "dec_ci_low", "dec_ci_high", "pcsp_agree_std",
"pcsp_agree_rate", "pcsp_agree_ci_low", "pcsp_agree_ci_high", "pcsp_review_std",
"pcsp_review_rate", "pcsp_review_ci_low", "pcsp_review_ci_high")
coltypes_NHSE_A2 = c("numeric", "text", "text", "text", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric")
colnames_NHSE_B =c("region_name", "fiscal_year", "sp_offered_std", "sp_offered_rate", "sp_offered_ci_low",
"sp_offered_ci_high", "sp_signpost_std", "sp_signpost_rate", "sp_signpost_ci_low",
"sp_signpost_ci_high", "sp4mh_std", "sp4mh_rate", "sp4mh_ci_low", "sp4mh_ci_high", "hc_ref_std",
"hc_ref_rate", "hc_ref_ci_low", "hc_ref_ci_high", "seenbyhc_std", "seenbyhc_rate", "seenbyhc_ci_low",
"seenbyhc_ci_high", "seenbyhcwbcoach_std", "seenbyhcwbcoach_rate", "seenbyhcwbcoach_ci_low",
"seenbyhcwbcoach_ci_high", "seenbycc_std", "seenbycc_rate", "seenbycc_ci_low", "seenbycc_ci_high",
"shareddm_std", "shareddm_rate", "shareddm_ci_low", "shareddm_ci_high", "shareddm_supp_std",
"shareddm_supp_rate", "shareddm_supp_ci_low", "shareddm_supp_ci_high", "shareddm_wo_supp_std",
"shareddm_wo_supp_rate", "shareddm_wo_supp_ci_low", "shareddm_wo_supp_ci_high", "shareddm_patdec_std",
"shareddm_patdec_rate", "shareddm_patdec_ci_low", "shareddm_patdec_ci_high", "shareddm_wo_patdec_std",
"shareddm_wo_patdec_rate", "shareddm_wo_patdec_ci_low", "shareddm_wo_patdec_ci_high", "phbudget_std",
"phbudget_rate", "phbudget_ci_low", "phbudget_ci_high")
coltypes_NHSE_B = c("text", "text", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
colnames_NHSE_CD = c("region_index", "region_name", "fiscal_year", "category", "mh_std", "mh_rate", "mh_ci_low",
"mh_ci_high", "subst_std", "subst_rate", "subst_ci_low", "subst_ci_high", "empl_std", "empl_rate",
"empl_ci_low", "empl_ci_high", "money_std", "money_rate", "money_ci_low", "money_ci_high",
"ltc_std", "ltc_rate", "ltc_ci_low", "ltc_ci_high", "abuse_std", "abuse_rate", "abuse_ci_low",
"abuse_ci_high", "housing_std", "housing_rate", "housing_ci_low", "housing_ci_high", "parent_std",
"parent_rate", "parent_ci_low", "parent_ci_high", "benefit_std", "benefit_rate", "benefit_ci_low",
"benefit_ci_high", "physical_std", "physical_rate", "physical_ci_low", "physical_ci_high",
"arts_std", "arts_rate", "arts_ci_low", "arts_ci_high", "sp4mh_std", "sp4mh_rate",
"sp4mh_ci_low", "sp4mh_ci_high", "heoffer_std", "heoffer_rate", "heoffer_ci_low", "heoffer_ci_high")
coltypes_NHSE_CD = c("numeric", "text", "text", "text", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
#### 1. File import 2. Pre-processing splitting category
file="Social Prescribing Report_1.1.xlsx"
dfICSReport_A <-read_excel(file,"ICS report - Group A", col_names = colnames_ICS_A, col_types = coltypes_ICS_A, skip=1)
dfICSReport_A <- dfICSReport_A %>%
separate("category", c("cat","cat_val"),sep="[.]")
dfICSReport_CD <-read_excel(file,"ICS report - Group C and D", col_names = colnames_ICS_CD, col_types = coltypes_ICS_CD,skip = 1)
dfICSReport_CD <- dfICSReport_CD %>%
separate("category", c("cat","cat_val"),sep="[.]")
dfNHSEReport_A1 <-read_excel(file,"NHSE report - Group A", col_names = colnames_NHSE_A1, col_types = coltypes_NHSE_A1,skip = 1)
##dfNHSEReport_A1 <- dfNHSEReport_A1 %>%
##separate("category", c("cat","cat_val"),sep="[.]")
dfNHSEReport_A2 <-read_excel(file,"NHSE report - Group A ii", col_names = colnames_NHSE_A2, col_types = coltypes_NHSE_A2, skip = 1)
dfNHSEReport_A2 <- dfNHSEReport_A2 %>%
separate("category", c("cat","cat_val"),sep="[.]")
dfNHSEReport_B <-read_excel(file,"NHSE report - Group B", col_names = colnames_NHSE_B,col_types = coltypes_NHSE_B, skip = 1)
dfNHSEReport_CD <-read_excel(file,"NHSE report - Group C and D", col_names = colnames_NHSE_CD, col_types = coltypes_NHSE_CD, skip = 1)
dfNHSEReport_CD <- dfNHSEReport_CD %>%
separate("category", c("cat","cat_val"),sep="[.]")
# remove FY from Fiscal Year column
dfNHSEReport_A2$fiscal_year <- gsub("^.{0,2}", "", dfNHSEReport_A2$fiscal_year)
#Fix Qtr to get preferred format
dfNHSEReport_A1 <- dfNHSEReport_A1 %>%
separate("fiscal_year", c("fiscal_year.qtr","fiscal_year.year"))
dfNHSEReport_A1$fiscal_year.year <- gsub("^.{0,2}", "", dfNHSEReport_A1$fiscal_year.year)
dfNHSEReport_A1$qtr <- paste(dfNHSEReport_A1$fiscal_year.year,dfNHSEReport_A1$fiscal_year.qtr)
dfNHSEReport_A1$qtr <- as.yearqtr(dfNHSEReport_A1$qtr)
#Fix Qtr to get preferred format
dfICSReport_A <- dfICSReport_A %>%
separate("fiscal_year", c("fiscal_year.qtr","fiscal_year.year"))
dfICSReport_A$fiscal_year.year <- gsub("^.{0,2}", "", dfICSReport_A$fiscal_year.year)
dfICSReport_A$qtr <- paste(dfICSReport_A$fiscal_year.year,dfICSReport_A$fiscal_year.qtr)
dfICSReport_A$qtr <- as.yearqtr(dfICSReport_A$qtr)
#Fix Qtr to get preferred format
dfICSReport_CD <- dfICSReport_CD %>%
separate("fiscal_year", c("fiscal_year.qtr","fiscal_year.year"))
dfICSReport_CD$fiscal_year.year <- gsub("^.{0,2}", "", dfICSReport_CD$fiscal_year.year)
dfICSReport_CD$qtr <- paste(dfICSReport_CD$fiscal_year.year,dfICSReport_CD$fiscal_year.qtr)
dfICSReport_CD$qtr <- as.yearqtr(dfICSReport_CD$qtr)
## The following code creates a summarised dataframe for reporting ICS group CD report
dfNationalMean_socialneed <- dfICSReport_CD %>%
filter(cat=="Age") %>%
select(region_name, qtr, mh_rate, subst_rate, empl_rate, money_rate, ltc_rate, abuse_rate, housing_rate,
parent_rate, benefit_rate, physical_rate, arts_rate, sp4mh_rate, heoffer_rate) %>%
group_by( qtr) %>%
summarise(nat_mean_mh_rate= mean(mh_rate),
nat_mean_subst_rate = mean(subst_rate),
nat_mean_empl_rate = mean(empl_rate),
nat_mean_money_rate = mean(money_rate),
nat_mean_ltc_rate = mean(ltc_rate),
nat_mean_abuse_rate = mean(abuse_rate),
nat_mean_housing_rate = mean(housing_rate),
nat_mean_parent_rate = mean(parent_rate),
nat_mean_benefit_rate = mean(benefit_rate),
nat_mean_physical_rate = mean(physical_rate),
nat_mean_arts_rate = mean(arts_rate),
nat_mean_sp4mh_rate = mean(sp4mh_rate),
nat_mean_heoffer_rate = mean(heoffer_rate),
.groups = 'keep')
dfICSMean_socialneed <- dfICSReport_CD %>%
filter(cat=="Age") %>%
select(ics_name, qtr, mh_rate, subst_rate, empl_rate, money_rate, ltc_rate, abuse_rate, housing_rate,
parent_rate, benefit_rate, physical_rate, arts_rate, sp4mh_rate, heoffer_rate) %>%
group_by(ics_name, qtr)%>%
summarise(mean_mh_rate= mean(mh_rate),
mean_subst_rate = mean(subst_rate),
mean_empl_rate = mean(empl_rate),
mean_money_rate = mean(money_rate),
mean_ltc_rate = mean(ltc_rate),
mean_abuse_rate = mean(abuse_rate),
mean_housing_rate = mean(housing_rate),
mean_parent_rate = mean(parent_rate),
mean_benefit_rate = mean(benefit_rate),
mean_physical_rate = mean(physical_rate),
mean_arts_rate = mean(arts_rate),
mean_sp4mh_rate = mean(sp4mh_rate),
mean_heoffer_rate = mean(heoffer_rate),
.groups = 'keep')
mergedSociaNeed <- dfICSMean_socialneed %>% inner_join(dfNationalMean_socialneed)
## The following code creates a summarised dataframe for reporting ICS group A report
dfNationalMean_socialprescribing<- dfICSReport_A %>%
filter(cat=="Age") %>%
select(ics_name, qtr, ref_rate, dec_rate) %>%
group_by(qtr) %>%
summarise(nat_mean_ref_rate= mean(ref_rate),
nat_mean_dec_rate = mean(dec_rate),
.groups = 'keep')
dfICSMean_socialprescribing <- dfICSReport_A %>%
filter(cat=="Age") %>%
select(ics_name, qtr, ref_rate, dec_rate) %>%
group_by(ics_name, qtr)%>%
summarise(mean_ref_rate= mean(ref_rate),
mean_dec_rate = mean(dec_rate),
.groups = 'keep')
mergedSocialPrescribing <- dfICSMean_socialprescribing %>% inner_join(dfNationalMean_socialprescribing)
|
require(msm)
estVQ <- function(asremlmodel, dataframe, animalid, snpid){
#~~ estimate allele frequencies
freqs <- table(unique(dataframe[,c(animalid, snpid)])[,2])
if(length(freqs) == 3){
p <- (freqs[1] + 0.5*freqs[2])/sum(freqs)
q <- 1-p
}
if(length(freqs) != 3) stop("not enough genotypes")
#~~ estimate a and d
fixeftab <- summary(asremlmodel, all = T)$coef.fixed
fixeftab <- fixeftab[grep(snpid, row.names(fixeftab)),]
a = (fixeftab[1,1] - fixeftab[3,1])/2
# if(a < 0) a <- a * -1
d = a + fixeftab[2,1]
Vq <- 2*p*q*(a + d*(q - p))^2
Va <- summary(asremlmodel, all = T)$varcomp[paste("ped(", animalid, ", var = T)!ped", sep = ""),]$component
VarExplained <- Vq/(Vq + Va)
C <- MCMCglmm::Tri2M(asremlmodel$Cfixed, FALSE)
diag(C)
sqrt(diag(C)) # match s.e. of models (have a look0
C<-C[2:3,2:3]
C
x1 <- C[1,1] # sampling variance of the het effect
x2 <- C[2,2] # sampling variance of the hom effect
beta <- summary(asremlmodel, all = T)$coef.fixed[2:3, 1]
X <- 2*p*q
Y <- q^2
Vq.se <- deltamethod(~X*(-x2/2 + (-x2/2 + x1)*Y)^2, beta, C) # standard error
results <- list(Vq, Va, VarExplained, Vq.se)
names(results) <- c("Vq", "Va", "VarExplained", "Vq.se")
results
}
|
/MCMCglmm.QTLvariance.R
|
no_license
|
susjoh/r-functions
|
R
| false
| false
| 1,281
|
r
|
require(msm)
estVQ <- function(asremlmodel, dataframe, animalid, snpid){
#~~ estimate allele frequencies
freqs <- table(unique(dataframe[,c(animalid, snpid)])[,2])
if(length(freqs) == 3){
p <- (freqs[1] + 0.5*freqs[2])/sum(freqs)
q <- 1-p
}
if(length(freqs) != 3) stop("not enough genotypes")
#~~ estimate a and d
fixeftab <- summary(asremlmodel, all = T)$coef.fixed
fixeftab <- fixeftab[grep(snpid, row.names(fixeftab)),]
a = (fixeftab[1,1] - fixeftab[3,1])/2
# if(a < 0) a <- a * -1
d = a + fixeftab[2,1]
Vq <- 2*p*q*(a + d*(q - p))^2
Va <- summary(asremlmodel, all = T)$varcomp[paste("ped(", animalid, ", var = T)!ped", sep = ""),]$component
VarExplained <- Vq/(Vq + Va)
C <- MCMCglmm::Tri2M(asremlmodel$Cfixed, FALSE)
diag(C)
sqrt(diag(C)) # match s.e. of models (have a look0
C<-C[2:3,2:3]
C
x1 <- C[1,1] # sampling variance of the het effect
x2 <- C[2,2] # sampling variance of the hom effect
beta <- summary(asremlmodel, all = T)$coef.fixed[2:3, 1]
X <- 2*p*q
Y <- q^2
Vq.se <- deltamethod(~X*(-x2/2 + (-x2/2 + x1)*Y)^2, beta, C) # standard error
results <- list(Vq, Va, VarExplained, Vq.se)
names(results) <- c("Vq", "Va", "VarExplained", "Vq.se")
results
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energybalance_functions.R
\name{Reynolds_number}
\alias{Reynolds_number}
\title{Calculate Reynolds Number}
\usage{
Reynolds_number(u, D, nu)
}
\arguments{
\item{u}{is wind speed in m/s}
\item{D}{is characteristic dimension (e.g., body diameter) (m)}
\item{nu}{is the kinematic viscosity, ratio of dynamic viscosity to density of the fluid (m^2 s^(-1)), can calculate from DRYAIR or WETAIR}
}
\value{
Reynolds number
}
\description{
Calculate Reynolds Number
}
\details{
This function allows you to estimate the Reynolds Number, which describes the dynamic properties of the fluid surrounding the animal as the ratio of internal viscous forces (Gates 1980 Biophysical Ecology).
}
\examples{
\dontrun{
Reynolds_number(u=1, D=0.001, nu=1.2)
}
}
\seealso{
Other biophysical models:
\code{\link{Free_or_forced_convection}()},
\code{\link{Grashof_number_Gates}()},
\code{\link{Grashof_number}()},
\code{\link{Nu_from_Gr}()},
\code{\link{Nu_from_Re}()},
\code{\link{Nusselt_number}()},
\code{\link{Prandtl_number}()},
\code{\link{Qconduction_animal}()},
\code{\link{Qconduction_substrate}()},
\code{\link{Qconvection}()},
\code{\link{Qemitted_thermal_radiation}()},
\code{\link{Qevaporation}()},
\code{\link{Qmetabolism_from_mass_temp}()},
\code{\link{Qmetabolism_from_mass}()},
\code{\link{Qnet_Gates}()},
\code{\link{Qradiation_absorbed}()},
\code{\link{Qthermal_radiation_absorbed}()},
\code{\link{Tb_CampbellNorman}()},
\code{\link{Tb_Fei}()},
\code{\link{Tb_Gates2}()},
\code{\link{Tb_Gates}()},
\code{\link{Tb_butterfly}()},
\code{\link{Tb_grasshopper}()},
\code{\link{Tb_limpetBH}()},
\code{\link{Tb_limpet}()},
\code{\link{Tb_lizard}()},
\code{\link{Tb_mussel}()},
\code{\link{Tb_salamander_humid}()},
\code{\link{Tb_snail}()},
\code{\link{Tbed_mussel}()},
\code{\link{Tsoil}()},
\code{\link{actual_vapor_pressure}()},
\code{\link{boundary_layer_resistance}()},
\code{\link{external_resistance_to_water_vapor_transfer}()},
\code{\link{heat_transfer_coefficient_approximation}()},
\code{\link{heat_transfer_coefficient_simple}()},
\code{\link{heat_transfer_coefficient}()},
\code{\link{saturation_vapor_pressure}()},
\code{\link{saturation_water_vapor_pressure}()}
}
\concept{biophysical models}
\keyword{Reynolds}
\keyword{number}
|
/man/Reynolds_number.Rd
|
permissive
|
ArchiYujie/TrenchR
|
R
| false
| true
| 2,314
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energybalance_functions.R
\name{Reynolds_number}
\alias{Reynolds_number}
\title{Calculate Reynolds Number}
\usage{
Reynolds_number(u, D, nu)
}
\arguments{
\item{u}{is wind speed in m/s}
\item{D}{is characteristic dimension (e.g., body diameter) (m)}
\item{nu}{is the kinematic viscosity, ratio of dynamic viscosity to density of the fluid (m^2 s^(-1)), can calculate from DRYAIR or WETAIR}
}
\value{
Reynolds number
}
\description{
Calculate Reynolds Number
}
\details{
This function allows you to estimate the Reynolds Number, which describes the dynamic properties of the fluid surrounding the animal as the ratio of internal viscous forces (Gates 1980 Biophysical Ecology).
}
\examples{
\dontrun{
Reynolds_number(u=1, D=0.001, nu=1.2)
}
}
\seealso{
Other biophysical models:
\code{\link{Free_or_forced_convection}()},
\code{\link{Grashof_number_Gates}()},
\code{\link{Grashof_number}()},
\code{\link{Nu_from_Gr}()},
\code{\link{Nu_from_Re}()},
\code{\link{Nusselt_number}()},
\code{\link{Prandtl_number}()},
\code{\link{Qconduction_animal}()},
\code{\link{Qconduction_substrate}()},
\code{\link{Qconvection}()},
\code{\link{Qemitted_thermal_radiation}()},
\code{\link{Qevaporation}()},
\code{\link{Qmetabolism_from_mass_temp}()},
\code{\link{Qmetabolism_from_mass}()},
\code{\link{Qnet_Gates}()},
\code{\link{Qradiation_absorbed}()},
\code{\link{Qthermal_radiation_absorbed}()},
\code{\link{Tb_CampbellNorman}()},
\code{\link{Tb_Fei}()},
\code{\link{Tb_Gates2}()},
\code{\link{Tb_Gates}()},
\code{\link{Tb_butterfly}()},
\code{\link{Tb_grasshopper}()},
\code{\link{Tb_limpetBH}()},
\code{\link{Tb_limpet}()},
\code{\link{Tb_lizard}()},
\code{\link{Tb_mussel}()},
\code{\link{Tb_salamander_humid}()},
\code{\link{Tb_snail}()},
\code{\link{Tbed_mussel}()},
\code{\link{Tsoil}()},
\code{\link{actual_vapor_pressure}()},
\code{\link{boundary_layer_resistance}()},
\code{\link{external_resistance_to_water_vapor_transfer}()},
\code{\link{heat_transfer_coefficient_approximation}()},
\code{\link{heat_transfer_coefficient_simple}()},
\code{\link{heat_transfer_coefficient}()},
\code{\link{saturation_vapor_pressure}()},
\code{\link{saturation_water_vapor_pressure}()}
}
\concept{biophysical models}
\keyword{Reynolds}
\keyword{number}
|
## function to prepare predicted fits for plot_fitted_bayes
# modelfit <- surv_dfa[[2]]; names <- surv_tbl$names[[2]];
# years <- surv_tbl$years[[2]]
# descend_order = FALSE
fitted_preds <- function(modelfit, names = NULL, years = NULL,
descend_order = FALSE, subset = NULL,
year1_last_mean = 2011) {
n_ts <- dim(modelfit$data)[1]
n_years <- dim(modelfit$data)[2]
if (is.null(years)) {
years <- seq_len(n_years)
}
pred <- predicted(modelfit)
df_pred <- data.frame(ID = rep(seq_len(n_ts), n_years),
Time = sort(rep(years, n_ts)),
mean = c(t(apply(pred, c(3, 4), mean))),
lo = c(t(apply(pred, c(3, 4), quantile, 0.05))),
hi = c(t(apply(pred, c(3, 4), quantile, 0.95)))
) %>%
mutate(stock = names$stock[ID])
df_obs <- data.frame(ID = rep(seq_len(n_ts), n_years),
Time = sort(rep(years,
n_ts)),
obs_y = c(modelfit$data)) %>%
filter(!is.na(obs_y))
# new categorical version
last_gen_mean <- final_prob(modelfit = modelfit, names = names,
years = years, year1_last_mean = year1_last_mean
) %>%
mutate(prob = ifelse(last_mean > 0, prob_above_0, prob_below_0)) %>%
select(-c(prob_above_0, prob_below_0))
out <- df_pred %>%
left_join(.,
last_gen_mean,
by = "stock") %>%
left_join(., df_obs, by = c("ID", "Time"))
if (!is.null(subset)) {
samp_seq <- sample(unique(df_pred$ID), size = subset)
out <- out %>%
filter(ID %in% samp_seq)
}
out %>%
mutate(ID = names$stock_name[ID] %>%
as.factor(.) %>%
fct_reorder(., last_mean, .desc = descend_order))
}
## function to plot fits in link space (based on bayesdfa::plot_fitted)
# df_pred <- surv_pred_list[[1]]
plot_fitted_pred <- function(df_pred, #ylab = NULL,
print_x = TRUE,
col_ramp = c(-1, 1),
col_ramp_direction = -1,
facet_row = NULL, facet_col = NULL,
leg_name = NULL,
year1_last_mean = 2011,
drop = TRUE) {
#limits for y axis
y_lims <- max(abs(df_pred$obs_y), na.rm = T) * c(-1, 1)
x_int <- year1_last_mean
#make palette for last five year mean based on bins and col_ramp values
breaks <- seq(min(col_ramp), max(col_ramp), length.out = 9)
df_pred$color_ids <- cut(df_pred$last_mean,
breaks=breaks,
include.lowest=TRUE,
right=FALSE)
col_pal <- c("#a50f15", "#de2d26", "#fb6a4a", "#fc9272", "#9ecae1", "#6baed6",
"#3182bd", "#08519c", "grey60")
names(col_pal) <- c(levels(df_pred$color_ids), "historic")
# replace color ID label so that low probabilities are historic (i.e. grey)
df_pred$color_ids2 <- ifelse(df_pred$prob < 0.90,
"historic",
as.character(df_pred$color_ids))
dum <- df_pred %>%
group_by(stock) %>%
#calculate SD of ts for horizontal line
mutate(ts_mean = mean(mean),
ts_mean_sd = sd(mean)) %>%
ungroup()
labs <- df_pred %>%
filter(!is.na(obs_y)) %>%
group_by(ID) %>%
tally()
p <- ggplot(dum %>% filter(Time >= x_int),
aes_string(x = "Time", y = "mean")) +
geom_ribbon(aes_string(ymin = "lo", ymax = "hi", colour = "color_ids2",
fill = "color_ids2"), alpha = 0.6) +
geom_line(aes_string(colour = "color_ids2"),
size = 1.25) +
geom_ribbon(data = dum %>% filter(Time <= x_int),
aes_string(ymin = "lo", ymax = "hi"),
fill = "grey60", colour = "grey60", alpha = 0.6) +
geom_line(data = dum %>% filter(Time <= x_int),
size = 1) +
geom_hline(aes(yintercept = ts_mean), lty = 2) +
# geom_hline(aes(yintercept = ts_mean + ts_mean_sd), lty = 3) +
# geom_hline(aes(yintercept = ts_mean - ts_mean_sd), lty = 3) +
geom_vline(xintercept = x_int, lty = 1, alpha = 0.6) +
scale_fill_manual(values = col_pal) +
scale_colour_manual(values = col_pal) +
scale_x_continuous(limits = c(1972, 2018), expand = c(0, 0)) +
geom_point(data = dum %>% filter(!is.na(obs_y)),
aes_string(x = "Time", y = "obs_y"),
size = 1, alpha = 0.6, shape = 21, fill = "black") +
facet_wrap(~ID, nrow = facet_row, ncol = facet_col, drop = drop) +
ggsidekick::theme_sleek() +
coord_cartesian(y = y_lims) +
theme(axis.title.x = element_blank(),
axis.title.y.left = element_blank(),
legend.position = "none",
axis.text.y.right = element_blank(),
axis.ticks.y.right = element_blank()) +
geom_text(
data = labs, aes(x = -Inf, y = -Inf, label = n),
hjust = -0.2, vjust = -0.4
)
if (print_x == FALSE) {
p <- p +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank())
}
return(p)
}
## function to plot fits in real space (based on bayesdfa::plot_fitted)
# df_pred <- real_surv_pred_list[[1]]
plot_fitted_pred_real <- function(df_pred, #ylab = NULL,
y_lims = NULL,
print_x = TRUE,
facet_row = NULL, facet_col = NULL,
year1_last_mean = 2011
) {
x_int <- year1_last_mean
y_int <- df_pred %>%
group_by(ID) %>%
summarize(ts_mean_logit = mean(uncent_mean_logit),
sd_mean_logit = sd(uncent_mean_logit),
ts_mean_sd_lo = plogis(ts_mean_logit +
(qnorm(0.025) * sd_mean_logit)),
ts_mean_sd_hi = plogis(ts_mean_logit +
(qnorm(0.975) * sd_mean_logit)),
ts_uncent_mean = mean(uncent_mean),
.groups = "drop") %>%
distinct()
# specify that color greyed out if relatively uncertain (in logit space)
df_pred2 <- df_pred %>%
left_join(., y_int, by = c("ID")) %>%
mutate(
color_id = case_when(
prob < 0.9 ~ "historic",
last_mean < ts_mean_sd_lo ~ "very low",
ts_mean_sd_lo < last_mean & last_mean < ts_uncent_mean ~ "low",
ts_mean_sd_hi > last_mean & last_mean > ts_uncent_mean ~ "high",
last_mean > ts_mean_sd_hi ~ "very high"
),
color_id = fct_reorder(as.factor(color_id),
last_mean - ts_uncent_mean),
# necessary to order correctly
ID_key = fct_reorder(as.factor(ID), as.numeric(color_id))
) %>%
droplevels()
y_int2 <- y_int %>%
left_join(., df_pred2 %>% select(ID, ID_key) %>% distinct(), by = "ID")
#make palette for last five year mean based on bins and col_ramp values
col_pal <- c("#a50f15", "#fc9272", "#9ecae1", "#08519c", "grey60")
names(col_pal) <- c("very low", "low", "high", "very high", "historic")
labs <- df_pred2 %>%
filter(!is.na(obs_y)) %>%
group_by(ID_key) %>%
tally()
p <- ggplot(df_pred2 %>% filter(Time >= x_int),
aes_string(x = "Time", y = "uncent_mean")) +
geom_ribbon(aes_string(ymin = "uncent_lo", ymax = "uncent_hi",
colour = "color_id",
fill = "color_id"), alpha = 0.6) +
geom_line(aes_string(colour = "color_id"),
size = 1.25) +
geom_ribbon(data = df_pred2 %>% filter(Time <= x_int),
aes_string(ymin = "uncent_lo", ymax = "uncent_hi"),
fill = "grey60", colour = "grey60", alpha = 0.6) +
geom_line(data = df_pred2 %>% filter(Time <= x_int),
size = 1) +
geom_hline(data = y_int2, aes(yintercept = ts_uncent_mean), lty = 2) +
# geom_hline(data = y_int2, aes(yintercept = ts_mean_sd_hi), lty = 3) +
# geom_hline(data = y_int2, aes(yintercept = ts_mean_sd_lo), lty = 3) +
geom_vline(xintercept = x_int, lty = 1, alpha = 0.6) +
scale_fill_manual(values = col_pal) +
scale_colour_manual(values = col_pal) +
geom_point(data = df_pred2 %>% filter(!is.na(obs_y)),
aes_string(x = "Time", y = "survival"),
size = 1, alpha = 0.6, shape = 21, fill = "black") +
facet_wrap(~ID_key,
nrow = facet_row, ncol = facet_col) +
ggsidekick::theme_sleek() +
coord_cartesian(y = c(0, 0.2), expand = 0) +
scale_x_continuous(limits = c(1972, 2018), expand = c(0, 0)) +
theme(axis.title.x = element_blank(),
axis.title.y.left = element_blank(),
legend.position = "none",
axis.text.y.right = element_blank(),
axis.ticks.y.right = element_blank()) +
geom_text(
data = labs, aes(x = -Inf, y = Inf, label = n),
hjust = -0.2, vjust = 1.1
)
if (print_x == FALSE) {
p <- p +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank())
}
return(p)
}
# as above but for uncentered data
plot_fitted_pred_uncent <- function(df_pred, #ylab = NULL,
print_x = TRUE,
col_ramp = c(-1, 1),
col_ramp_direction = -1,
facet_row = NULL, facet_col = NULL,
leg_name = NULL,
year1_last_mean = 2011,
drop = TRUE) {
#limits for y axis
y_lims <- c(min(abs(df_pred$obs_y), na.rm = T),
max(abs(df_pred$obs_y), na.rm = T))
x_int <- year1_last_mean
#make palette for last five year mean based on bins and col_ramp values
breaks <- seq(min(col_ramp), max(col_ramp), length.out = 9)
df_pred$color_ids <- cut(df_pred$last_mean,
breaks=breaks,
include.lowest=TRUE,
right=FALSE)
col_pal <- c("#a50f15", "#de2d26", "#fb6a4a", "#fc9272", "#9ecae1", "#6baed6",
"#3182bd", "#08519c", "grey60")
names(col_pal) <- c(levels(df_pred$color_ids), "historic")
# replace color ID label so that low probabilities are historic (i.e. grey)
df_pred$color_ids2 <- ifelse(df_pred$prob < 0.90,
"historic",
as.character(df_pred$color_ids))
dum <- df_pred %>%
group_by(stock) %>%
#calculate SD of ts for horizontal line
mutate(ts_mean_sd = sd(mean)) %>%
ungroup()
labs <- dum %>%
filter(!is.na(obs_y)) %>%
group_by(ID) %>%
tally()
p <- ggplot(dum %>% filter(Time >= x_int),
aes_string(x = "Time", y = "mean")) +
geom_ribbon(aes_string(ymin = "lo", ymax = "hi", colour = "color_ids2",
fill = "color_ids2"), alpha = 0.6) +
geom_line(aes_string(colour = "color_ids2"),
size = 1.25) +
geom_ribbon(data = dum %>% filter(Time <= x_int),
aes_string(ymin = "lo", ymax = "hi"),
fill = "grey60", colour = "grey60", alpha = 0.6) +
geom_line(data = dum %>% filter(Time <= x_int),
size = 1) +
geom_hline(aes(yintercept = obs_mean_age), lty = 2) +
# geom_hline(aes(yintercept = obs_mean_age + ts_mean_sd), lty = 3) +
# geom_hline(aes(yintercept = obs_mean_age - ts_mean_sd), lty = 3) +
geom_vline(xintercept = x_int, lty = 1, alpha = 0.6) +
scale_fill_manual(values = col_pal) +
scale_colour_manual(values = col_pal) +
scale_x_continuous(limits = c(1972, 2018), expand = c(0, 0)) +
geom_point(data = dum %>% filter(!is.na(obs_y)),
aes_string(x = "Time", y = "obs_y"),
size = 1, alpha = 0.6, shape = 21, fill = "black") +
facet_wrap(~ID, nrow = facet_row, ncol = facet_col, drop = drop) +
ggsidekick::theme_sleek() +
coord_cartesian(y = y_lims) +
theme(axis.title.x = element_blank(),
axis.title.y.left = element_blank(),
legend.position = "none",
axis.text.y.right = element_blank(),
axis.ticks.y.right = element_blank()) +
geom_text(
data = labs, aes(x = -Inf, y = -Inf, label = n),
hjust = -0.2, vjust = -0.4
)
if (print_x == FALSE) {
p <- p +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank())
}
return(p)
}
## function to calculate probability that estimates below average in last
# n_years
# modelfit = surv_dfa[[2]]; names = surv_tbl$names[[2]]; years = surv_tbl$years[[2]]
final_prob <- function(modelfit, names,
years = years, year1_last_mean = 2010,
year2_last_mean = NULL
) {
tt <- reshape2::melt(predicted(modelfit),
varnames = c("iter", "chain", "time", "stock")) %>%
left_join(.,
data.frame(year = years,
time = unique(.$time)),
by = "time")
tt$stock <- as.factor(names$stock[tt$stock])
# yr_range <- seq(max(tt$year) - (n_years - 1), max(tt$year), by = 1)
if (is.null(year2_last_mean)) {
year2_last_mean <- max(tt$year)
}
yr_range <- seq(year1_last_mean, year2_last_mean, by = 1)
tt %>%
group_by(stock) %>%
filter(!year > year2_last_mean) %>%
mutate(overall_mean = mean(value)) %>%
filter(year %in% yr_range) %>%
group_by(stock, iter) %>%
mutate(mean_value = mean(value)) %>%
group_by(stock) %>%
summarize(
last_mean = mean(mean_value),
prob_below_0 = sum(mean_value < overall_mean) / length(mean_value),
prob_above_0 = sum(mean_value > overall_mean) / length(mean_value)
)
}
## function to prepare rotated model fit for plotting trends (based on
# bayesdfa::plot_trends)
prep_trends <- function (rotated_modelfit, years, group) {
rotated <- rotated_modelfit
n_ts <- dim(rotated$Z_rot)[2]
n_trends <- dim(rotated$Z_rot)[3]
n_years <- dim(rotated$trends_mean)[2]
data.frame(x = c(t(rotated$trends_mean)),
lo = c(t(rotated$trends_lower)),
hi = c(t(rotated$trends_upper)),
trend = paste0("Trend ",
sort(rep(seq_len(n_trends), n_years))),
time = rep(years, n_trends),
group = group)
}
## function to plot trends (based on bayesdfa::plot_trends)
plot_one_trend <- function(trend_dat, facet_var = FALSE) {
p <- ggplot(trend_dat,
aes_string(x = "time", y = "x")) +
geom_ribbon(aes_string(ymin = "lo", ymax = "hi", colour = "life_history",
fill = "life_history"),
alpha = 0.4) +
geom_line(aes_string(colour = "life_history"), size = 1.2) +
# scale_colour_brewer(type = "qual", name = "") +
# scale_fill_brewer(type = "qual", name = "") +
geom_hline(yintercept = 0, lty = 2) +
# xlab("Brood Year") +
ylab("Estimated Trend") +
scale_x_continuous(limits = c(1972, 2018), expand = c(0, 0)) +
facet_wrap(~group, nrow = 1) +
ggsidekick::theme_sleek() +
theme(
legend.position = "none",
strip.background = element_blank(),
strip.text.x = element_blank(),
axis.title.x = element_blank())
if (facet_var == TRUE) {
p <- p +
facet_grid(group~var)
}
return(p)
}
## function to prep regime model fit for plotting (based on
# bayesdfa::plot_regime_model)
prep_regime <- function(regime_model, probs = c(0.05, 0.95),
regime_prob_threshold = 0.9, flip_regimes = FALSE,
years, group) {
gamma_tk <- rstan::extract(regime_model$model, pars = "gamma_tk")[[1]]
mu_k <- rstan::extract(regime_model$model, pars = "mu_k")[[1]]
l <- apply(gamma_tk, 2:3, quantile, probs = probs[[1]])
u <- apply(gamma_tk, 2:3, quantile, probs = probs[[2]])
med <- apply(gamma_tk, 2:3, quantile, probs = 0.5)
range01 <- function(x) (x - min(x))/(max(x) - min(x))
mu_k_low <- apply(mu_k, 2, quantile, probs = probs[[1]])
mu_k_high <- apply(mu_k, 2, quantile, probs = probs[[2]])
mu_k <- apply(mu_k, 2, median)
confident_regimes <- apply(
gamma_tk, 2:3, function(x) mean(x > 0.5) > regime_prob_threshold
)
regime_indexes <- apply(confident_regimes, 1, function(x) {
w <- which(x)
if (length(w) == 0)
NA
else w
})
#should regimes be flipped for plotting
if (flip_regimes) {
mu_k <- 1 - mu_k
u <- 1 - u
l <- 1 - l
med <- 1 - med
}
plot_prob_indices <- seq_len(ncol(med))
df_l <- reshape2::melt(l, varnames = c("Time", "State"),
value.name = "lwr")
df_u <- reshape2::melt(u, varnames = c("Time", "State"),
value.name = "upr")
df_m <- reshape2::melt(med, varnames = c("Time", "State"),
value.name = "median")
df_y <- data.frame(y = range01(regime_model$y),
Time = seq_along(regime_model$y))
dplyr::inner_join(df_l, df_u, by = c("Time", "State")) %>%
dplyr::inner_join(df_m, by = c("Time", "State")) %>%
dplyr::filter(.data$State %in% plot_prob_indices) %>%
dplyr::mutate(State = paste("State", .data$State),
time = rep(years, length(unique(State))),
group = group)
}
## function to plot regimes (based on bayesdfa::plot_trends/plot_regime_model)
plot_one_regime <- function(regime_dat, facet_var = FALSE, y_lab = NULL) {
p <- ggplot(regime_dat,
aes_string(x = "time", y = "median")) +
geom_ribbon(aes_string(ymin = "lwr", ymax = "upr", colour = "life_history",
fill = "life_history"),
alpha = 0.4, lty = 6) +
geom_line(aes_string(colour = "life_history"), size = 1.2, lty = 6) +
# scale_colour_brewer(type = "qual", name = "") +
# scale_fill_brewer(type = "qual", name = "") +
# xlab("Brood Year") +
ylab(y_lab) +
scale_x_continuous(limits = c(1972, 2018), expand = c(0, 0)) +
facet_wrap(~group, nrow = 1) +
ggsidekick::theme_sleek() +
theme(
legend.position = "none",
strip.background = element_blank(),
strip.text.x = element_blank(),
axis.title.x = element_blank()
)
if (facet_var == TRUE) {
p <- p +
facet_grid(group ~ var)
}
return(p)
}
## function to prepare rotated model fit for plotting loadings (based on
# bayesdfa::plot_loadings)
prep_loadings <- function (rotated_modelfit, names, group, conf_level = 0.95) {
v <- reshape2::melt(rotated_modelfit$Z_rot,
varnames = c("iter", "name", "trend"))
v$name <- as.factor(names$stock[v$name])
v %>%
mutate(trend = as.factor(paste0("Trend ", trend))) %>%
group_by(name, trend) %>%
mutate(q_lower = sum(value < 0) / length(value),
q_upper = 1 - q_lower,
prob_diff0 = max(q_lower, q_upper),
group = group)
}
##function to plot loadings
plot_load <- function(x, group = NULL, guides = FALSE, y_lims = c(-0.5, 0.5)) {
p <- ggplot(x, aes_string(x = "name", y = "value", fill = "trend",
alpha = "prob_diff0")) +
scale_alpha_continuous(name = "Probability\nDifferent") +
scale_fill_brewer(name = "", palette = "Paired") +
geom_violin(color = NA, position = position_dodge(0.3)) +
geom_hline(yintercept = 0, lty = 2) +
coord_flip() +
xlab("Time Series") +
ylab("Loading") +
scale_y_continuous(limits = y_lims, expand = c(0, 0)) +
ggsidekick::theme_sleek() +
guides(alpha = guide_legend(override.aes = list(fill = "grey"))) +
theme(#axis.text.y = element_text(angle = 45, vjust = -1, size = 7),
axis.title = element_blank()) +
annotate("text", x = Inf, y = -Inf, label = group, hjust = -0.05,
vjust = 1.1, size = 3.5)
if (guides == FALSE) {
p <- p +
theme(legend.position = "none")
}
return(p)
}
|
/R/functions/plotting_functions.R
|
no_license
|
CamFreshwater/chinDyn
|
R
| false
| false
| 20,307
|
r
|
## function to prepare predicted fits for plot_fitted_bayes
# modelfit <- surv_dfa[[2]]; names <- surv_tbl$names[[2]];
# years <- surv_tbl$years[[2]]
# descend_order = FALSE
fitted_preds <- function(modelfit, names = NULL, years = NULL,
descend_order = FALSE, subset = NULL,
year1_last_mean = 2011) {
n_ts <- dim(modelfit$data)[1]
n_years <- dim(modelfit$data)[2]
if (is.null(years)) {
years <- seq_len(n_years)
}
pred <- predicted(modelfit)
df_pred <- data.frame(ID = rep(seq_len(n_ts), n_years),
Time = sort(rep(years, n_ts)),
mean = c(t(apply(pred, c(3, 4), mean))),
lo = c(t(apply(pred, c(3, 4), quantile, 0.05))),
hi = c(t(apply(pred, c(3, 4), quantile, 0.95)))
) %>%
mutate(stock = names$stock[ID])
df_obs <- data.frame(ID = rep(seq_len(n_ts), n_years),
Time = sort(rep(years,
n_ts)),
obs_y = c(modelfit$data)) %>%
filter(!is.na(obs_y))
# new categorical version
last_gen_mean <- final_prob(modelfit = modelfit, names = names,
years = years, year1_last_mean = year1_last_mean
) %>%
mutate(prob = ifelse(last_mean > 0, prob_above_0, prob_below_0)) %>%
select(-c(prob_above_0, prob_below_0))
out <- df_pred %>%
left_join(.,
last_gen_mean,
by = "stock") %>%
left_join(., df_obs, by = c("ID", "Time"))
if (!is.null(subset)) {
samp_seq <- sample(unique(df_pred$ID), size = subset)
out <- out %>%
filter(ID %in% samp_seq)
}
out %>%
mutate(ID = names$stock_name[ID] %>%
as.factor(.) %>%
fct_reorder(., last_mean, .desc = descend_order))
}
## function to plot fits in link space (based on bayesdfa::plot_fitted)
# df_pred <- surv_pred_list[[1]]
plot_fitted_pred <- function(df_pred, #ylab = NULL,
print_x = TRUE,
col_ramp = c(-1, 1),
col_ramp_direction = -1,
facet_row = NULL, facet_col = NULL,
leg_name = NULL,
year1_last_mean = 2011,
drop = TRUE) {
#limits for y axis
y_lims <- max(abs(df_pred$obs_y), na.rm = T) * c(-1, 1)
x_int <- year1_last_mean
#make palette for last five year mean based on bins and col_ramp values
breaks <- seq(min(col_ramp), max(col_ramp), length.out = 9)
df_pred$color_ids <- cut(df_pred$last_mean,
breaks=breaks,
include.lowest=TRUE,
right=FALSE)
col_pal <- c("#a50f15", "#de2d26", "#fb6a4a", "#fc9272", "#9ecae1", "#6baed6",
"#3182bd", "#08519c", "grey60")
names(col_pal) <- c(levels(df_pred$color_ids), "historic")
# replace color ID label so that low probabilities are historic (i.e. grey)
df_pred$color_ids2 <- ifelse(df_pred$prob < 0.90,
"historic",
as.character(df_pred$color_ids))
dum <- df_pred %>%
group_by(stock) %>%
#calculate SD of ts for horizontal line
mutate(ts_mean = mean(mean),
ts_mean_sd = sd(mean)) %>%
ungroup()
labs <- df_pred %>%
filter(!is.na(obs_y)) %>%
group_by(ID) %>%
tally()
p <- ggplot(dum %>% filter(Time >= x_int),
aes_string(x = "Time", y = "mean")) +
geom_ribbon(aes_string(ymin = "lo", ymax = "hi", colour = "color_ids2",
fill = "color_ids2"), alpha = 0.6) +
geom_line(aes_string(colour = "color_ids2"),
size = 1.25) +
geom_ribbon(data = dum %>% filter(Time <= x_int),
aes_string(ymin = "lo", ymax = "hi"),
fill = "grey60", colour = "grey60", alpha = 0.6) +
geom_line(data = dum %>% filter(Time <= x_int),
size = 1) +
geom_hline(aes(yintercept = ts_mean), lty = 2) +
# geom_hline(aes(yintercept = ts_mean + ts_mean_sd), lty = 3) +
# geom_hline(aes(yintercept = ts_mean - ts_mean_sd), lty = 3) +
geom_vline(xintercept = x_int, lty = 1, alpha = 0.6) +
scale_fill_manual(values = col_pal) +
scale_colour_manual(values = col_pal) +
scale_x_continuous(limits = c(1972, 2018), expand = c(0, 0)) +
geom_point(data = dum %>% filter(!is.na(obs_y)),
aes_string(x = "Time", y = "obs_y"),
size = 1, alpha = 0.6, shape = 21, fill = "black") +
facet_wrap(~ID, nrow = facet_row, ncol = facet_col, drop = drop) +
ggsidekick::theme_sleek() +
coord_cartesian(y = y_lims) +
theme(axis.title.x = element_blank(),
axis.title.y.left = element_blank(),
legend.position = "none",
axis.text.y.right = element_blank(),
axis.ticks.y.right = element_blank()) +
geom_text(
data = labs, aes(x = -Inf, y = -Inf, label = n),
hjust = -0.2, vjust = -0.4
)
if (print_x == FALSE) {
p <- p +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank())
}
return(p)
}
## function to plot fits in real space (based on bayesdfa::plot_fitted)
# df_pred <- real_surv_pred_list[[1]]
plot_fitted_pred_real <- function(df_pred, #ylab = NULL,
y_lims = NULL,
print_x = TRUE,
facet_row = NULL, facet_col = NULL,
year1_last_mean = 2011
) {
x_int <- year1_last_mean
y_int <- df_pred %>%
group_by(ID) %>%
summarize(ts_mean_logit = mean(uncent_mean_logit),
sd_mean_logit = sd(uncent_mean_logit),
ts_mean_sd_lo = plogis(ts_mean_logit +
(qnorm(0.025) * sd_mean_logit)),
ts_mean_sd_hi = plogis(ts_mean_logit +
(qnorm(0.975) * sd_mean_logit)),
ts_uncent_mean = mean(uncent_mean),
.groups = "drop") %>%
distinct()
# specify that color greyed out if relatively uncertain (in logit space)
df_pred2 <- df_pred %>%
left_join(., y_int, by = c("ID")) %>%
mutate(
color_id = case_when(
prob < 0.9 ~ "historic",
last_mean < ts_mean_sd_lo ~ "very low",
ts_mean_sd_lo < last_mean & last_mean < ts_uncent_mean ~ "low",
ts_mean_sd_hi > last_mean & last_mean > ts_uncent_mean ~ "high",
last_mean > ts_mean_sd_hi ~ "very high"
),
color_id = fct_reorder(as.factor(color_id),
last_mean - ts_uncent_mean),
# necessary to order correctly
ID_key = fct_reorder(as.factor(ID), as.numeric(color_id))
) %>%
droplevels()
y_int2 <- y_int %>%
left_join(., df_pred2 %>% select(ID, ID_key) %>% distinct(), by = "ID")
#make palette for last five year mean based on bins and col_ramp values
col_pal <- c("#a50f15", "#fc9272", "#9ecae1", "#08519c", "grey60")
names(col_pal) <- c("very low", "low", "high", "very high", "historic")
labs <- df_pred2 %>%
filter(!is.na(obs_y)) %>%
group_by(ID_key) %>%
tally()
p <- ggplot(df_pred2 %>% filter(Time >= x_int),
aes_string(x = "Time", y = "uncent_mean")) +
geom_ribbon(aes_string(ymin = "uncent_lo", ymax = "uncent_hi",
colour = "color_id",
fill = "color_id"), alpha = 0.6) +
geom_line(aes_string(colour = "color_id"),
size = 1.25) +
geom_ribbon(data = df_pred2 %>% filter(Time <= x_int),
aes_string(ymin = "uncent_lo", ymax = "uncent_hi"),
fill = "grey60", colour = "grey60", alpha = 0.6) +
geom_line(data = df_pred2 %>% filter(Time <= x_int),
size = 1) +
geom_hline(data = y_int2, aes(yintercept = ts_uncent_mean), lty = 2) +
# geom_hline(data = y_int2, aes(yintercept = ts_mean_sd_hi), lty = 3) +
# geom_hline(data = y_int2, aes(yintercept = ts_mean_sd_lo), lty = 3) +
geom_vline(xintercept = x_int, lty = 1, alpha = 0.6) +
scale_fill_manual(values = col_pal) +
scale_colour_manual(values = col_pal) +
geom_point(data = df_pred2 %>% filter(!is.na(obs_y)),
aes_string(x = "Time", y = "survival"),
size = 1, alpha = 0.6, shape = 21, fill = "black") +
facet_wrap(~ID_key,
nrow = facet_row, ncol = facet_col) +
ggsidekick::theme_sleek() +
coord_cartesian(y = c(0, 0.2), expand = 0) +
scale_x_continuous(limits = c(1972, 2018), expand = c(0, 0)) +
theme(axis.title.x = element_blank(),
axis.title.y.left = element_blank(),
legend.position = "none",
axis.text.y.right = element_blank(),
axis.ticks.y.right = element_blank()) +
geom_text(
data = labs, aes(x = -Inf, y = Inf, label = n),
hjust = -0.2, vjust = 1.1
)
if (print_x == FALSE) {
p <- p +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank())
}
return(p)
}
# as above but for uncentered data
plot_fitted_pred_uncent <- function(df_pred, #ylab = NULL,
print_x = TRUE,
col_ramp = c(-1, 1),
col_ramp_direction = -1,
facet_row = NULL, facet_col = NULL,
leg_name = NULL,
year1_last_mean = 2011,
drop = TRUE) {
#limits for y axis
y_lims <- c(min(abs(df_pred$obs_y), na.rm = T),
max(abs(df_pred$obs_y), na.rm = T))
x_int <- year1_last_mean
#make palette for last five year mean based on bins and col_ramp values
breaks <- seq(min(col_ramp), max(col_ramp), length.out = 9)
df_pred$color_ids <- cut(df_pred$last_mean,
breaks=breaks,
include.lowest=TRUE,
right=FALSE)
col_pal <- c("#a50f15", "#de2d26", "#fb6a4a", "#fc9272", "#9ecae1", "#6baed6",
"#3182bd", "#08519c", "grey60")
names(col_pal) <- c(levels(df_pred$color_ids), "historic")
# replace color ID label so that low probabilities are historic (i.e. grey)
df_pred$color_ids2 <- ifelse(df_pred$prob < 0.90,
"historic",
as.character(df_pred$color_ids))
dum <- df_pred %>%
group_by(stock) %>%
#calculate SD of ts for horizontal line
mutate(ts_mean_sd = sd(mean)) %>%
ungroup()
labs <- dum %>%
filter(!is.na(obs_y)) %>%
group_by(ID) %>%
tally()
p <- ggplot(dum %>% filter(Time >= x_int),
aes_string(x = "Time", y = "mean")) +
geom_ribbon(aes_string(ymin = "lo", ymax = "hi", colour = "color_ids2",
fill = "color_ids2"), alpha = 0.6) +
geom_line(aes_string(colour = "color_ids2"),
size = 1.25) +
geom_ribbon(data = dum %>% filter(Time <= x_int),
aes_string(ymin = "lo", ymax = "hi"),
fill = "grey60", colour = "grey60", alpha = 0.6) +
geom_line(data = dum %>% filter(Time <= x_int),
size = 1) +
geom_hline(aes(yintercept = obs_mean_age), lty = 2) +
# geom_hline(aes(yintercept = obs_mean_age + ts_mean_sd), lty = 3) +
# geom_hline(aes(yintercept = obs_mean_age - ts_mean_sd), lty = 3) +
geom_vline(xintercept = x_int, lty = 1, alpha = 0.6) +
scale_fill_manual(values = col_pal) +
scale_colour_manual(values = col_pal) +
scale_x_continuous(limits = c(1972, 2018), expand = c(0, 0)) +
geom_point(data = dum %>% filter(!is.na(obs_y)),
aes_string(x = "Time", y = "obs_y"),
size = 1, alpha = 0.6, shape = 21, fill = "black") +
facet_wrap(~ID, nrow = facet_row, ncol = facet_col, drop = drop) +
ggsidekick::theme_sleek() +
coord_cartesian(y = y_lims) +
theme(axis.title.x = element_blank(),
axis.title.y.left = element_blank(),
legend.position = "none",
axis.text.y.right = element_blank(),
axis.ticks.y.right = element_blank()) +
geom_text(
data = labs, aes(x = -Inf, y = -Inf, label = n),
hjust = -0.2, vjust = -0.4
)
if (print_x == FALSE) {
p <- p +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank())
}
return(p)
}
## function to calculate probability that estimates below average in last
# n_years
# modelfit = surv_dfa[[2]]; names = surv_tbl$names[[2]]; years = surv_tbl$years[[2]]
final_prob <- function(modelfit, names,
years = years, year1_last_mean = 2010,
year2_last_mean = NULL
) {
tt <- reshape2::melt(predicted(modelfit),
varnames = c("iter", "chain", "time", "stock")) %>%
left_join(.,
data.frame(year = years,
time = unique(.$time)),
by = "time")
tt$stock <- as.factor(names$stock[tt$stock])
# yr_range <- seq(max(tt$year) - (n_years - 1), max(tt$year), by = 1)
if (is.null(year2_last_mean)) {
year2_last_mean <- max(tt$year)
}
yr_range <- seq(year1_last_mean, year2_last_mean, by = 1)
tt %>%
group_by(stock) %>%
filter(!year > year2_last_mean) %>%
mutate(overall_mean = mean(value)) %>%
filter(year %in% yr_range) %>%
group_by(stock, iter) %>%
mutate(mean_value = mean(value)) %>%
group_by(stock) %>%
summarize(
last_mean = mean(mean_value),
prob_below_0 = sum(mean_value < overall_mean) / length(mean_value),
prob_above_0 = sum(mean_value > overall_mean) / length(mean_value)
)
}
## function to prepare rotated model fit for plotting trends (based on
# bayesdfa::plot_trends)
prep_trends <- function (rotated_modelfit, years, group) {
rotated <- rotated_modelfit
n_ts <- dim(rotated$Z_rot)[2]
n_trends <- dim(rotated$Z_rot)[3]
n_years <- dim(rotated$trends_mean)[2]
data.frame(x = c(t(rotated$trends_mean)),
lo = c(t(rotated$trends_lower)),
hi = c(t(rotated$trends_upper)),
trend = paste0("Trend ",
sort(rep(seq_len(n_trends), n_years))),
time = rep(years, n_trends),
group = group)
}
## function to plot trends (based on bayesdfa::plot_trends)
plot_one_trend <- function(trend_dat, facet_var = FALSE) {
p <- ggplot(trend_dat,
aes_string(x = "time", y = "x")) +
geom_ribbon(aes_string(ymin = "lo", ymax = "hi", colour = "life_history",
fill = "life_history"),
alpha = 0.4) +
geom_line(aes_string(colour = "life_history"), size = 1.2) +
# scale_colour_brewer(type = "qual", name = "") +
# scale_fill_brewer(type = "qual", name = "") +
geom_hline(yintercept = 0, lty = 2) +
# xlab("Brood Year") +
ylab("Estimated Trend") +
scale_x_continuous(limits = c(1972, 2018), expand = c(0, 0)) +
facet_wrap(~group, nrow = 1) +
ggsidekick::theme_sleek() +
theme(
legend.position = "none",
strip.background = element_blank(),
strip.text.x = element_blank(),
axis.title.x = element_blank())
if (facet_var == TRUE) {
p <- p +
facet_grid(group~var)
}
return(p)
}
## function to prep regime model fit for plotting (based on
# bayesdfa::plot_regime_model)
prep_regime <- function(regime_model, probs = c(0.05, 0.95),
regime_prob_threshold = 0.9, flip_regimes = FALSE,
years, group) {
gamma_tk <- rstan::extract(regime_model$model, pars = "gamma_tk")[[1]]
mu_k <- rstan::extract(regime_model$model, pars = "mu_k")[[1]]
l <- apply(gamma_tk, 2:3, quantile, probs = probs[[1]])
u <- apply(gamma_tk, 2:3, quantile, probs = probs[[2]])
med <- apply(gamma_tk, 2:3, quantile, probs = 0.5)
range01 <- function(x) (x - min(x))/(max(x) - min(x))
mu_k_low <- apply(mu_k, 2, quantile, probs = probs[[1]])
mu_k_high <- apply(mu_k, 2, quantile, probs = probs[[2]])
mu_k <- apply(mu_k, 2, median)
confident_regimes <- apply(
gamma_tk, 2:3, function(x) mean(x > 0.5) > regime_prob_threshold
)
regime_indexes <- apply(confident_regimes, 1, function(x) {
w <- which(x)
if (length(w) == 0)
NA
else w
})
#should regimes be flipped for plotting
if (flip_regimes) {
mu_k <- 1 - mu_k
u <- 1 - u
l <- 1 - l
med <- 1 - med
}
plot_prob_indices <- seq_len(ncol(med))
df_l <- reshape2::melt(l, varnames = c("Time", "State"),
value.name = "lwr")
df_u <- reshape2::melt(u, varnames = c("Time", "State"),
value.name = "upr")
df_m <- reshape2::melt(med, varnames = c("Time", "State"),
value.name = "median")
df_y <- data.frame(y = range01(regime_model$y),
Time = seq_along(regime_model$y))
dplyr::inner_join(df_l, df_u, by = c("Time", "State")) %>%
dplyr::inner_join(df_m, by = c("Time", "State")) %>%
dplyr::filter(.data$State %in% plot_prob_indices) %>%
dplyr::mutate(State = paste("State", .data$State),
time = rep(years, length(unique(State))),
group = group)
}
## function to plot regimes (based on bayesdfa::plot_trends/plot_regime_model)
plot_one_regime <- function(regime_dat, facet_var = FALSE, y_lab = NULL) {
p <- ggplot(regime_dat,
aes_string(x = "time", y = "median")) +
geom_ribbon(aes_string(ymin = "lwr", ymax = "upr", colour = "life_history",
fill = "life_history"),
alpha = 0.4, lty = 6) +
geom_line(aes_string(colour = "life_history"), size = 1.2, lty = 6) +
# scale_colour_brewer(type = "qual", name = "") +
# scale_fill_brewer(type = "qual", name = "") +
# xlab("Brood Year") +
ylab(y_lab) +
scale_x_continuous(limits = c(1972, 2018), expand = c(0, 0)) +
facet_wrap(~group, nrow = 1) +
ggsidekick::theme_sleek() +
theme(
legend.position = "none",
strip.background = element_blank(),
strip.text.x = element_blank(),
axis.title.x = element_blank()
)
if (facet_var == TRUE) {
p <- p +
facet_grid(group ~ var)
}
return(p)
}
## function to prepare rotated model fit for plotting loadings (based on
# bayesdfa::plot_loadings)
prep_loadings <- function (rotated_modelfit, names, group, conf_level = 0.95) {
v <- reshape2::melt(rotated_modelfit$Z_rot,
varnames = c("iter", "name", "trend"))
v$name <- as.factor(names$stock[v$name])
v %>%
mutate(trend = as.factor(paste0("Trend ", trend))) %>%
group_by(name, trend) %>%
mutate(q_lower = sum(value < 0) / length(value),
q_upper = 1 - q_lower,
prob_diff0 = max(q_lower, q_upper),
group = group)
}
##function to plot loadings
plot_load <- function(x, group = NULL, guides = FALSE, y_lims = c(-0.5, 0.5)) {
p <- ggplot(x, aes_string(x = "name", y = "value", fill = "trend",
alpha = "prob_diff0")) +
scale_alpha_continuous(name = "Probability\nDifferent") +
scale_fill_brewer(name = "", palette = "Paired") +
geom_violin(color = NA, position = position_dodge(0.3)) +
geom_hline(yintercept = 0, lty = 2) +
coord_flip() +
xlab("Time Series") +
ylab("Loading") +
scale_y_continuous(limits = y_lims, expand = c(0, 0)) +
ggsidekick::theme_sleek() +
guides(alpha = guide_legend(override.aes = list(fill = "grey"))) +
theme(#axis.text.y = element_text(angle = 45, vjust = -1, size = 7),
axis.title = element_blank()) +
annotate("text", x = Inf, y = -Inf, label = group, hjust = -0.05,
vjust = 1.1, size = 3.5)
if (guides == FALSE) {
p <- p +
theme(legend.position = "none")
}
return(p)
}
|
logisticModel <- function(n0, rd, K, timesteps) {
# iterate logistic model for desired number of timesteps
N <- rep(0,timesteps+1) # preallocate vector N (faster)
N[1] <- n0 # initialize first time point
# use for loop to iterate
for (t in 1:timesteps) {
N[t+1] <- N[t]*(1 + rd*(1 - N[t]/K))
}
# return vector
return(N)
}
|
/Lecture03/Lecture03_Ex02.R
|
no_license
|
luisfreitas07/introduction_to_R_ecology
|
R
| false
| false
| 344
|
r
|
logisticModel <- function(n0, rd, K, timesteps) {
# iterate logistic model for desired number of timesteps
N <- rep(0,timesteps+1) # preallocate vector N (faster)
N[1] <- n0 # initialize first time point
# use for loop to iterate
for (t in 1:timesteps) {
N[t+1] <- N[t]*(1 + rd*(1 - N[t]/K))
}
# return vector
return(N)
}
|
ggplot(data, aes(y=Cain, x=BibScore))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=BOMScore))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=BibScore*BOMScore))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=BibScore*Age))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=Age*BOMScore))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=Age))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=Orthodoxy))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=Priesthood))+geom_smooth()+geom_point()
table(data$Cain, data$College)
table(data$Cain, data$Gender)
table(as.logical(data$Cain), data$Priesthood)
table(data$Cain,data$Progressive)
table(data$Cain, data$Concerns)
table(data$Cain, data$Darwin)
plot(aggregate(Cain~College, data=data, FUN=mean))
plot(aggregate(Cain~Orthodoxy, data=data, FUN=mean))
plot(aggregate(Cain~BOMScore, data=data, FUN=mean))
plot(aggregate(Cain~BibScore, data=data, FUN=mean))
table(data$BibScore)
plot(aggregate(Cain~as.numeric(Gender=="Male"), data=data, FUN=mean))
plot(aggregate(Cain~Darwin, data=data, FUN=mean))
plot(aggregate(Cain~Convert, data=data, FUN=mean))
aggregate(Cain~Concerns, data=data, FUN=mean)
aggregate(Cain~Progressive, data=data, FUN=mean)
table(data$Orthodoxy, data$BOMScore)
table(data$College)/sum(table(data$College))
require(reshape2)
bom=data%>%group_by(BOMScore)%>%summarise(Prop=mean(Cain))
bible=data%>%group_by(BibScore)%>%summarise(Prop=mean(Cain))
ggplot()+geom_line(data=bom, size=2, aes(x=BOMScore, y=Prop, color="BoM"))+
geom_line(data=bible,size=2, aes(x=BibScore, y=Prop, color="Bible"))+ylab("% Supporting Cain Theory")+
xlab("Knowledge Score")+ggtitle("Scriptural knowledge vs. Cain Theory") +
theme(legend.title=element_blank(),text = element_text(size=15))
names(data)
table(data$BOMS)
|
/MormonsData/EDA.R
|
no_license
|
jntrcs/GLMClass
|
R
| false
| false
| 1,820
|
r
|
ggplot(data, aes(y=Cain, x=BibScore))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=BOMScore))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=BibScore*BOMScore))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=BibScore*Age))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=Age*BOMScore))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=Age))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=Orthodoxy))+geom_smooth()+geom_point()
ggplot(data, aes(y=Cain, x=Priesthood))+geom_smooth()+geom_point()
table(data$Cain, data$College)
table(data$Cain, data$Gender)
table(as.logical(data$Cain), data$Priesthood)
table(data$Cain,data$Progressive)
table(data$Cain, data$Concerns)
table(data$Cain, data$Darwin)
plot(aggregate(Cain~College, data=data, FUN=mean))
plot(aggregate(Cain~Orthodoxy, data=data, FUN=mean))
plot(aggregate(Cain~BOMScore, data=data, FUN=mean))
plot(aggregate(Cain~BibScore, data=data, FUN=mean))
table(data$BibScore)
plot(aggregate(Cain~as.numeric(Gender=="Male"), data=data, FUN=mean))
plot(aggregate(Cain~Darwin, data=data, FUN=mean))
plot(aggregate(Cain~Convert, data=data, FUN=mean))
aggregate(Cain~Concerns, data=data, FUN=mean)
aggregate(Cain~Progressive, data=data, FUN=mean)
table(data$Orthodoxy, data$BOMScore)
table(data$College)/sum(table(data$College))
require(reshape2)
bom=data%>%group_by(BOMScore)%>%summarise(Prop=mean(Cain))
bible=data%>%group_by(BibScore)%>%summarise(Prop=mean(Cain))
ggplot()+geom_line(data=bom, size=2, aes(x=BOMScore, y=Prop, color="BoM"))+
geom_line(data=bible,size=2, aes(x=BibScore, y=Prop, color="Bible"))+ylab("% Supporting Cain Theory")+
xlab("Knowledge Score")+ggtitle("Scriptural knowledge vs. Cain Theory") +
theme(legend.title=element_blank(),text = element_text(size=15))
names(data)
table(data$BOMS)
|
#
# Copyright 2007-2018 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require(OpenMx)
options(mxCondenseMatrixSlots=TRUE)
require(mvtnorm)
#Generate data:
set.seed(476)
A1 <- matrix(0,100,100)
A1[lower.tri(A1)] <- runif(4950, -0.025, 0.025)
A1 <- A1 + t(A1)
diag(A1) <- runif(100,0.95,1.05)
A2 <- matrix(0,100,100)
A2[lower.tri(A2)] <- runif(4950, -0.025, 0.025)
A2 <- A2 + t(A2)
diag(A2) <- runif(100,0.95,1.05)
y <- t(rmvnorm(1,sigma=A1*0.25)+rmvnorm(1,sigma=A2*0.25))
y <- y + rnorm(100,sd=sqrt(0.5))
#y[100] <- NA
x <- rnorm(100)
dat <- cbind(y,x)
colnames(dat) <- c("y","x")
#Baseline model:
testmod <- mxModel(
"GREMLtest",
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values =0.5, labels = "ve", lbound = 0.0001,
name = "Ve"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va1", name = "Va1"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va2", name = "Va2"),
mxData(observed = dat, type="raw", sort=FALSE),
mxExpectationGREML(V="V",yvars="y", Xvars="x", addOnes=T),
mxMatrix("Iden",nrow=100,name="I"),
mxMatrix("Symm",nrow=100,free=F,values=A1,name="A1"),
mxMatrix("Symm",nrow=100,free=F,values=A2,name="A2"),
mxAlgebra((A1%x%Va1) + (A2%x%Va2) + (I%x%Ve), name="V"),
mxFitFunctionGREML()
)
testrun <- mxRun(testmod)
#Pointless augmentation that adds a constant to the fitfunction:
testmod2 <- mxModel(
"GREMLtest",
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values =0.5, labels = "ve", lbound = 0.0001,
name = "Ve"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va1", name = "Va1"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va2", name = "Va2"),
mxData(observed = dat, type="raw", sort=FALSE),
mxExpectationGREML(V="V",yvars="y", Xvars="x", addOnes=T),
mxMatrix("Iden",nrow=100,name="I"),
mxMatrix("Symm",nrow=100,free=F,values=A1,name="A1"),
mxMatrix("Symm",nrow=100,free=F,values=A2,name="A2"),
mxAlgebra((A1%x%Va1) + (A2%x%Va2) + (I%x%Ve), name="V"),
mxMatrix(type="Full",nrow=1,ncol=1,free=F,values=0.64,name="aug"),
mxFitFunctionGREML(aug="aug")
)
testrun2 <- mxRun(testmod2)
omxCheckCloseEnough(a=testrun2$output$fit - testrun$output$fit, b=1.28, epsilon=1e-9)
#Baseline model using N-R:
testmod3 <- mxModel(
"GREMLtest",
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values =0.5, labels = "ve", lbound = 0.0001,
name = "Ve"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va1", name = "Va1"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va2", name = "Va2"),
mxData(observed = dat, type="raw", sort=FALSE),
mxExpectationGREML(V="V",yvars="y", Xvars="x", addOnes=T),
mxComputeSequence(steps=list(
mxComputeNewtonRaphson(fitfunction="fitfunction"),
mxComputeOnce('fitfunction', c('fit','gradient','hessian','ihessian')),
mxComputeStandardError(),
mxComputeReportDeriv(),
mxComputeReportExpectation()
)),
mxMatrix("Iden",nrow=100,name="I"),
mxMatrix("Symm",nrow=100,free=F,values=A1,name="A1"),
mxMatrix("Symm",nrow=100,free=F,values=A2,name="A2"),
mxAlgebra((A1%x%Va1) + (A2%x%Va2) + (I%x%Ve), name="V"),
mxFitFunctionGREML(dV=c(va1="A1",va2="A2",ve="I"))
)
testrun3 <- mxRun(testmod3)
#Add augmentation that should nudge free parameters toward summing to 1.0:
testmod4 <- mxModel(
"GREMLtest",
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values =0.5, labels = "ve", lbound = 0.0001,
name = "Ve"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va1", name = "Va1"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va2", name = "Va2"),
mxData(observed = dat, type="raw", sort=FALSE),
mxExpectationGREML(V="V",yvars="y", Xvars="x", addOnes=T),
mxComputeSequence(steps=list(
mxComputeNewtonRaphson(fitfunction="fitfunction"),
mxComputeOnce('fitfunction', c('fit','gradient','hessian','ihessian')),
mxComputeStandardError(),
mxComputeReportDeriv(),
mxComputeReportExpectation()
)),
mxMatrix("Iden",nrow=100,name="I"),
mxMatrix("Symm",nrow=100,free=F,values=A1,name="A1"),
mxMatrix("Symm",nrow=100,free=F,values=A2,name="A2"),
mxAlgebra((A1%x%Va1) + (A2%x%Va2) + (I%x%Ve), name="V"),
mxAlgebra( 3%x%(Va1+Va2+Ve-1)^2, name="aug"),
mxAlgebra( 3%x%rbind(
2*Va1 + 2*Va2 + 2*Ve - 2,
2*Va1 + 2*Va2 + 2*Ve - 2,
2*Va1 + 2*Va2 + 2*Ve - 2), name="daug1"),
mxMatrix(type="Full",nrow=3,ncol=3,free=F,values=6,name="daug2"),
mxFitFunctionGREML(dV=c(va1="A1",va2="A2",ve="I"),aug="aug",augGrad="daug1",augHess="daug2")
)
testrun4 <- mxRun(testmod4)
#The difference between 1.0 and the sum of the parameters should be smaller for model #4:
omxCheckTrue(abs(1-sum(testrun4$output$estimate)) < abs(1-sum(testrun3$output$estimate)))
|
/SilveR/R-3.5.1/library/OpenMx/models/passing/AugmentedGREMLfitfunction.R
|
permissive
|
kevinmiles/SilveR
|
R
| false
| false
| 5,347
|
r
|
#
# Copyright 2007-2018 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require(OpenMx)
options(mxCondenseMatrixSlots=TRUE)
require(mvtnorm)
#Generate data:
set.seed(476)
A1 <- matrix(0,100,100)
A1[lower.tri(A1)] <- runif(4950, -0.025, 0.025)
A1 <- A1 + t(A1)
diag(A1) <- runif(100,0.95,1.05)
A2 <- matrix(0,100,100)
A2[lower.tri(A2)] <- runif(4950, -0.025, 0.025)
A2 <- A2 + t(A2)
diag(A2) <- runif(100,0.95,1.05)
y <- t(rmvnorm(1,sigma=A1*0.25)+rmvnorm(1,sigma=A2*0.25))
y <- y + rnorm(100,sd=sqrt(0.5))
#y[100] <- NA
x <- rnorm(100)
dat <- cbind(y,x)
colnames(dat) <- c("y","x")
#Baseline model:
testmod <- mxModel(
"GREMLtest",
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values =0.5, labels = "ve", lbound = 0.0001,
name = "Ve"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va1", name = "Va1"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va2", name = "Va2"),
mxData(observed = dat, type="raw", sort=FALSE),
mxExpectationGREML(V="V",yvars="y", Xvars="x", addOnes=T),
mxMatrix("Iden",nrow=100,name="I"),
mxMatrix("Symm",nrow=100,free=F,values=A1,name="A1"),
mxMatrix("Symm",nrow=100,free=F,values=A2,name="A2"),
mxAlgebra((A1%x%Va1) + (A2%x%Va2) + (I%x%Ve), name="V"),
mxFitFunctionGREML()
)
testrun <- mxRun(testmod)
#Pointless augmentation that adds a constant to the fitfunction:
testmod2 <- mxModel(
"GREMLtest",
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values =0.5, labels = "ve", lbound = 0.0001,
name = "Ve"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va1", name = "Va1"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va2", name = "Va2"),
mxData(observed = dat, type="raw", sort=FALSE),
mxExpectationGREML(V="V",yvars="y", Xvars="x", addOnes=T),
mxMatrix("Iden",nrow=100,name="I"),
mxMatrix("Symm",nrow=100,free=F,values=A1,name="A1"),
mxMatrix("Symm",nrow=100,free=F,values=A2,name="A2"),
mxAlgebra((A1%x%Va1) + (A2%x%Va2) + (I%x%Ve), name="V"),
mxMatrix(type="Full",nrow=1,ncol=1,free=F,values=0.64,name="aug"),
mxFitFunctionGREML(aug="aug")
)
testrun2 <- mxRun(testmod2)
omxCheckCloseEnough(a=testrun2$output$fit - testrun$output$fit, b=1.28, epsilon=1e-9)
#Baseline model using N-R:
testmod3 <- mxModel(
"GREMLtest",
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values =0.5, labels = "ve", lbound = 0.0001,
name = "Ve"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va1", name = "Va1"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va2", name = "Va2"),
mxData(observed = dat, type="raw", sort=FALSE),
mxExpectationGREML(V="V",yvars="y", Xvars="x", addOnes=T),
mxComputeSequence(steps=list(
mxComputeNewtonRaphson(fitfunction="fitfunction"),
mxComputeOnce('fitfunction', c('fit','gradient','hessian','ihessian')),
mxComputeStandardError(),
mxComputeReportDeriv(),
mxComputeReportExpectation()
)),
mxMatrix("Iden",nrow=100,name="I"),
mxMatrix("Symm",nrow=100,free=F,values=A1,name="A1"),
mxMatrix("Symm",nrow=100,free=F,values=A2,name="A2"),
mxAlgebra((A1%x%Va1) + (A2%x%Va2) + (I%x%Ve), name="V"),
mxFitFunctionGREML(dV=c(va1="A1",va2="A2",ve="I"))
)
testrun3 <- mxRun(testmod3)
#Add augmentation that should nudge free parameters toward summing to 1.0:
testmod4 <- mxModel(
"GREMLtest",
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values =0.5, labels = "ve", lbound = 0.0001,
name = "Ve"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va1", name = "Va1"),
mxMatrix(type = "Full", nrow = 1, ncol=1, free=T, values = 0.25, labels = "va2", name = "Va2"),
mxData(observed = dat, type="raw", sort=FALSE),
mxExpectationGREML(V="V",yvars="y", Xvars="x", addOnes=T),
mxComputeSequence(steps=list(
mxComputeNewtonRaphson(fitfunction="fitfunction"),
mxComputeOnce('fitfunction', c('fit','gradient','hessian','ihessian')),
mxComputeStandardError(),
mxComputeReportDeriv(),
mxComputeReportExpectation()
)),
mxMatrix("Iden",nrow=100,name="I"),
mxMatrix("Symm",nrow=100,free=F,values=A1,name="A1"),
mxMatrix("Symm",nrow=100,free=F,values=A2,name="A2"),
mxAlgebra((A1%x%Va1) + (A2%x%Va2) + (I%x%Ve), name="V"),
mxAlgebra( 3%x%(Va1+Va2+Ve-1)^2, name="aug"),
mxAlgebra( 3%x%rbind(
2*Va1 + 2*Va2 + 2*Ve - 2,
2*Va1 + 2*Va2 + 2*Ve - 2,
2*Va1 + 2*Va2 + 2*Ve - 2), name="daug1"),
mxMatrix(type="Full",nrow=3,ncol=3,free=F,values=6,name="daug2"),
mxFitFunctionGREML(dV=c(va1="A1",va2="A2",ve="I"),aug="aug",augGrad="daug1",augHess="daug2")
)
testrun4 <- mxRun(testmod4)
#The difference between 1.0 and the sum of the parameters should be smaller for model #4:
omxCheckTrue(abs(1-sum(testrun4$output$estimate)) < abs(1-sum(testrun3$output$estimate)))
|
#' Import time series from river flow API.
#'
#' Using the river flow/rainfall API, time series can be extracted, either
#' selecting single dates, periods of record, or entire records for single or
#' multiple sites. Metadata can also be returned for stations in the dataset.
#' All data must be of the same type from the same organisation over the
#' same period.
#'
#' @param ids identifier for stations (not EA refs)
#' @param dat string indicating datatype, as written in metadata.
#' @param org organisation from whom the data is obtained.
#' @param startDate string of the form \code{YYYY-MM-DD} to indicate start of
#' period desired, or single date. Whole record given if no startDate
#' provided.
#' @param endDate string of the form \code{YYYY-MM-DD} to indicate end of
#' period desired. If no startDate provided, this is ignored.
#' @param metadata if \code{TRUE}, returns metadata for each station selected.
#' @param datetime if \code{TRUE}, converts text datetime column into POSIXlt.
#'
#' @return a dataframe containing the dates and magnitudes of the selected
#' data. If multiple stations selected, the dataframes are contained in a
#' named list. If metadata is true, each station will consist of a list
#' containing \code{detail} and \code{data}.
#' If not found, returns NA for each such station.
#'
#' @examples
#' \dontrun{
#' importTimeSeries(ids=c("SX67F051", "SS50F007"), org="EA", dat="gdf",
#' startDate="2017-01-01", endDate="2017-02-01")
#' importTimeSeries(ids="SX67F051", org="EA", dat="gdf", metadata=T)
#' }
#'
#' @export
importTimeSeries <- function(ids, dat, org = c("NRFA", "EA", "SEPA", "COSMOS"),
startDate = NULL, endDate = NULL, metadata=FALSE,
datetime = TRUE){
ids <- as.character(ids) #ids for respective dataset, not refs
org <- match.arg(org)
if (!is.null(startDate)) {
startDate <- lubridate::as_date(x=startDate,
format=lubridate::guess_formats(startDate, c("dmy", "ymd")),
tz="UTC")[1]
}
if (!is.null(endDate)) {
endDate <- lubridate::as_date(x=endDate,
format=lubridate::guess_formats(endDate, c("dmy", "ymd")),
tz="UTC")[1]
}
## should convert likely date strings/objects to date objects
li <- length(ids)
if (li == 0) { stop("Enter valid id for station(s).") }
stationListId <- stationList(org)$id
if (all(!(ids %in% stationListId))) {
# check data available for any stations
stop("No supplied stations available in selected list.")
}
ts <- ts_fetch_internal(ids, org, dat, startDate, endDate)
names(ts) <- ids
if (datetime) {
ts <- lapply(
ts, function(y){y$data <- reformatTimeSeries(y$data);y}) }
if (!metadata) { ts <- lapply(ts, function(y){y['data',drop=F]}) }
if (li == 1) { ts <- ts[[1]] }
return(ts)
}
#' Reformats a time series to have datetime objects.
#'
#' Converts a data.frame with strings for datetimes into one with POSIXlt date
#' objects.
#'
#' @param ts time series data.frame object of two columns: datetime
#' (strings in form \code{YYYY-MM-DDTHH:MM:SSZ} or \code{YYYY-MM-DD})
#' and data (numeric).
#'
#' @return data.frame with replaced datetime column containing equivalent
#' POSIXlt objects.
#'
#' @export
reformatTimeSeries <- function(ts){
cnChange <- FALSE
if (all(is.na(ts))) return(ts)
if (is.data.frame(ts)) {
if(dim(ts)[2]==2){
cn <- colnames(ts)[1]
cnChange <- TRUE
colnames(ts)[1] <- "datetime"}
if (nchar(ts$datetime[1]) == 10) {
ts$datetime <- paste0(ts$datetime,"T00:00:01Z")
}
ts$datetime <- lubridate::as_datetime(ts$datetime,
format="%Y-%m-%dT%H:%M:%OSZ",
tz="UTC")
}else{
#need to reconstruct the data.frame
ts <- lapply(ts, function(l){
l$datetime <- lubridate::as_datetime(l$datetime,
format="%Y-%m-%dT%H:%M:%OSZ",
tz="UTC")
l
})
}
if (cnChange) colnames(ts)[1] <- cn
return(ts)
}
#' Import metadata from river flow API.
#'
#' Using the river flow/rainfall API, station information can be extracted for
#' single or multiple sites. All data must be of the same type from the same
#' organisation.
#'
#' @param ids identifier for stations (not EA refs)
#' @param dat string indicating datatype, as written in metadata.
#' @param org organisation from whom the data is obtained.
#'
#' @return a list, or list of lists, containing:
#' \itemize{
#' \item id - measuring authority station identifier
#' \item ref - API reference string
#' \item name - station name
#' \item organisation
#' \item station aliases under different organisations
#' \item datatype - list of descriptors of data
#' \item startDate - character string of first record
#' \item dataUrl - string of URL to obtain data from API directly.
#' }
#' If not found, returns NA for each such station.
#'
#' @examples
#' \dontrun{
#' importMetadata(ids=c("SX67F051", "SS50F007"), org="EA", dat="gdf")
#' importMetadata(ids="SX67F051", org="EA", dat="gdf")
#' }
#'
#' @export
importMetadata <- function(ids, dat, org = c("NRFA", "EA", "SEPA", "COSMOS")){
ids <- as.character(ids) #ids for respective dataset, not refs
org <- match.arg(org)
li <- length(ids)
if (li == 0) { stop("Enter valid id for station(s).") }
stationListId <- stationList(org)$id
if (all(!(ids %in% stationListId))) {
# check data available for any stations
stop("No supplied stations available in selected list.")
}
ts <- ts_fetch_internal(ids, org, dat, startDate=NULL, endDate=NULL)
names(ts) <- ids
ts <- lapply(ts, function(y){y['detail',drop=F]})
if (li == 1) ts <- ts[[1]]
return(ts)
}
# Import time series directly from river flow API.
#
# Using the river flow/rainfall API, time series can be extracted, either
# selecting single dates, periods of record, or entire records for single
# or multiple sites.
# This function directly calls the API.
#
# @param ids identifier for stations (not EA refs)
# @param dat string indicating datatype, as written in metadata.
# @param org organisation from whom the data is obtained.
# @param startDate string to indicate start of period desired, or
# single date. Whole record given if no startDate provided.
# @param endDate string to indicate end of period desired. If no
# startDate provided, this is ignored.
#
# @return a dataframe containing the dates and magnitudes of the selected
# data.
# If not found, returns NA for each such station.
#
# @examples
# \dontrun{
# startDate <- lubridate::as_datetime("1901-01-01")
# endDate <- lubridate::as_datetime("1901-02-01")
# ts_fetch_internal(ids=c("SX67F051", "SS50F007"), org="EA", dat="gdf",
# startDate=startDate, endDate=endDate)
# }
#
ts_fetch_internal <- function(ids, org, dat, startDate=NULL, endDate=NULL){
# fetches relevant time series and metadata information from API
if (org == "EA") {
refs <- idToRef(ids)
}else{
refs <- ids
}
# generate url to relevant API page
txt <- paste0("https://gateway-staging.ceh.ac.uk/hydrology-ukscape/",
"stations/",org,"/",dat,"/",refs)
if (!is.null(startDate)) {
startDate <- lubridate::as_date(startDate,
format=lubridate::guess_formats(startDate, c("ymd", "dmy"))[1],
tz="UTC")
txt <- paste0(txt,"/",format(startDate, "%Y-%m-%d"))
# if one date provided only gives that date
if (!is.null(endDate)) {
endDate <- lubridate::as_date(endDate,
format=lubridate::guess_formats(endDate, c("ymd", "dmy"))[1],
tz="UTC")
txt <- paste0(txt,"/", format(endDate, "%Y-%m-%d"))
}
}
txt <- as.list(txt)
# checks that address works
accesstest <- sapply(txt, function(y){
class(try(jsonlite::fromJSON(txt=y, simplifyDataFrame=T),
silent=T)) != "try-error"
})
if (sum(!accesstest) > 0) {
message(paste0("Not possible to access ", dat, " data for stations ",
paste(ids[!accesstest], sep=", "), "."))
}
ts_fetch <- vector("list", length(ids))
# get data from successfully tested stations
ts_fetch[accesstest] <- lapply(txt[accesstest],
jsonlite::fromJSON, simplifyDataFrame=T)
ts_fetch[!accesstest] <- NA
# check for wrong period of time
datatest <- sapply(ts_fetch,
function(y){is.list(y) && is.data.frame(y$data)})
if (sum(!datatest & accesstest) > 0) {
message(paste0("No ", dat, " data for stations ",
paste(ids[!datatest & accesstest], sep=", "),
". Check period selected."))
}
# make all stations have same format
#ts_fetch[!accesstest | !datatest] <- list(list("detail"=NULL, "data"=NULL))
ts_fetch <- replace(ts_fetch,
which(!accesstest | !datatest),
list(list("detail"=NA, "data"=NA)))
#if (length(ts_fetch) == 1) ts_fetch <- ts_fetch[[1]]
return(ts_fetch)
}
|
/rfInterface/R/import_ts.R
|
no_license
|
griffada/flowAPIpackage
|
R
| false
| false
| 9,156
|
r
|
#' Import time series from river flow API.
#'
#' Using the river flow/rainfall API, time series can be extracted, either
#' selecting single dates, periods of record, or entire records for single or
#' multiple sites. Metadata can also be returned for stations in the dataset.
#' All data must be of the same type from the same organisation over the
#' same period.
#'
#' @param ids identifier for stations (not EA refs)
#' @param dat string indicating datatype, as written in metadata.
#' @param org organisation from whom the data is obtained.
#' @param startDate string of the form \code{YYYY-MM-DD} to indicate start of
#' period desired, or single date. Whole record given if no startDate
#' provided.
#' @param endDate string of the form \code{YYYY-MM-DD} to indicate end of
#' period desired. If no startDate provided, this is ignored.
#' @param metadata if \code{TRUE}, returns metadata for each station selected.
#' @param datetime if \code{TRUE}, converts text datetime column into POSIXlt.
#'
#' @return a dataframe containing the dates and magnitudes of the selected
#' data. If multiple stations selected, the dataframes are contained in a
#' named list. If metadata is true, each station will consist of a list
#' containing \code{detail} and \code{data}.
#' If not found, returns NA for each such station.
#'
#' @examples
#' \dontrun{
#' importTimeSeries(ids=c("SX67F051", "SS50F007"), org="EA", dat="gdf",
#' startDate="2017-01-01", endDate="2017-02-01")
#' importTimeSeries(ids="SX67F051", org="EA", dat="gdf", metadata=T)
#' }
#'
#' @export
importTimeSeries <- function(ids, dat, org = c("NRFA", "EA", "SEPA", "COSMOS"),
startDate = NULL, endDate = NULL, metadata=FALSE,
datetime = TRUE){
ids <- as.character(ids) #ids for respective dataset, not refs
org <- match.arg(org)
if (!is.null(startDate)) {
startDate <- lubridate::as_date(x=startDate,
format=lubridate::guess_formats(startDate, c("dmy", "ymd")),
tz="UTC")[1]
}
if (!is.null(endDate)) {
endDate <- lubridate::as_date(x=endDate,
format=lubridate::guess_formats(endDate, c("dmy", "ymd")),
tz="UTC")[1]
}
## should convert likely date strings/objects to date objects
li <- length(ids)
if (li == 0) { stop("Enter valid id for station(s).") }
stationListId <- stationList(org)$id
if (all(!(ids %in% stationListId))) {
# check data available for any stations
stop("No supplied stations available in selected list.")
}
ts <- ts_fetch_internal(ids, org, dat, startDate, endDate)
names(ts) <- ids
if (datetime) {
ts <- lapply(
ts, function(y){y$data <- reformatTimeSeries(y$data);y}) }
if (!metadata) { ts <- lapply(ts, function(y){y['data',drop=F]}) }
if (li == 1) { ts <- ts[[1]] }
return(ts)
}
#' Reformats a time series to have datetime objects.
#'
#' Converts a data.frame with strings for datetimes into one with POSIXlt date
#' objects.
#'
#' @param ts time series data.frame object of two columns: datetime
#' (strings in form \code{YYYY-MM-DDTHH:MM:SSZ} or \code{YYYY-MM-DD})
#' and data (numeric).
#'
#' @return data.frame with replaced datetime column containing equivalent
#' POSIXlt objects.
#'
#' @export
reformatTimeSeries <- function(ts){
cnChange <- FALSE
if (all(is.na(ts))) return(ts)
if (is.data.frame(ts)) {
if(dim(ts)[2]==2){
cn <- colnames(ts)[1]
cnChange <- TRUE
colnames(ts)[1] <- "datetime"}
if (nchar(ts$datetime[1]) == 10) {
ts$datetime <- paste0(ts$datetime,"T00:00:01Z")
}
ts$datetime <- lubridate::as_datetime(ts$datetime,
format="%Y-%m-%dT%H:%M:%OSZ",
tz="UTC")
}else{
#need to reconstruct the data.frame
ts <- lapply(ts, function(l){
l$datetime <- lubridate::as_datetime(l$datetime,
format="%Y-%m-%dT%H:%M:%OSZ",
tz="UTC")
l
})
}
if (cnChange) colnames(ts)[1] <- cn
return(ts)
}
#' Import metadata from river flow API.
#'
#' Using the river flow/rainfall API, station information can be extracted for
#' single or multiple sites. All data must be of the same type from the same
#' organisation.
#'
#' @param ids identifier for stations (not EA refs)
#' @param dat string indicating datatype, as written in metadata.
#' @param org organisation from whom the data is obtained.
#'
#' @return a list, or list of lists, containing:
#' \itemize{
#' \item id - measuring authority station identifier
#' \item ref - API reference string
#' \item name - station name
#' \item organisation
#' \item station aliases under different organisations
#' \item datatype - list of descriptors of data
#' \item startDate - character string of first record
#' \item dataUrl - string of URL to obtain data from API directly.
#' }
#' If not found, returns NA for each such station.
#'
#' @examples
#' \dontrun{
#' importMetadata(ids=c("SX67F051", "SS50F007"), org="EA", dat="gdf")
#' importMetadata(ids="SX67F051", org="EA", dat="gdf")
#' }
#'
#' @export
importMetadata <- function(ids, dat, org = c("NRFA", "EA", "SEPA", "COSMOS")){
ids <- as.character(ids) #ids for respective dataset, not refs
org <- match.arg(org)
li <- length(ids)
if (li == 0) { stop("Enter valid id for station(s).") }
stationListId <- stationList(org)$id
if (all(!(ids %in% stationListId))) {
# check data available for any stations
stop("No supplied stations available in selected list.")
}
ts <- ts_fetch_internal(ids, org, dat, startDate=NULL, endDate=NULL)
names(ts) <- ids
ts <- lapply(ts, function(y){y['detail',drop=F]})
if (li == 1) ts <- ts[[1]]
return(ts)
}
# Import time series directly from river flow API.
#
# Using the river flow/rainfall API, time series can be extracted, either
# selecting single dates, periods of record, or entire records for single
# or multiple sites.
# This function directly calls the API.
#
# @param ids identifier for stations (not EA refs)
# @param dat string indicating datatype, as written in metadata.
# @param org organisation from whom the data is obtained.
# @param startDate string to indicate start of period desired, or
# single date. Whole record given if no startDate provided.
# @param endDate string to indicate end of period desired. If no
# startDate provided, this is ignored.
#
# @return a dataframe containing the dates and magnitudes of the selected
# data.
# If not found, returns NA for each such station.
#
# @examples
# \dontrun{
# startDate <- lubridate::as_datetime("1901-01-01")
# endDate <- lubridate::as_datetime("1901-02-01")
# ts_fetch_internal(ids=c("SX67F051", "SS50F007"), org="EA", dat="gdf",
# startDate=startDate, endDate=endDate)
# }
#
ts_fetch_internal <- function(ids, org, dat, startDate=NULL, endDate=NULL){
# fetches relevant time series and metadata information from API
if (org == "EA") {
refs <- idToRef(ids)
}else{
refs <- ids
}
# generate url to relevant API page
txt <- paste0("https://gateway-staging.ceh.ac.uk/hydrology-ukscape/",
"stations/",org,"/",dat,"/",refs)
if (!is.null(startDate)) {
startDate <- lubridate::as_date(startDate,
format=lubridate::guess_formats(startDate, c("ymd", "dmy"))[1],
tz="UTC")
txt <- paste0(txt,"/",format(startDate, "%Y-%m-%d"))
# if one date provided only gives that date
if (!is.null(endDate)) {
endDate <- lubridate::as_date(endDate,
format=lubridate::guess_formats(endDate, c("ymd", "dmy"))[1],
tz="UTC")
txt <- paste0(txt,"/", format(endDate, "%Y-%m-%d"))
}
}
txt <- as.list(txt)
# checks that address works
accesstest <- sapply(txt, function(y){
class(try(jsonlite::fromJSON(txt=y, simplifyDataFrame=T),
silent=T)) != "try-error"
})
if (sum(!accesstest) > 0) {
message(paste0("Not possible to access ", dat, " data for stations ",
paste(ids[!accesstest], sep=", "), "."))
}
ts_fetch <- vector("list", length(ids))
# get data from successfully tested stations
ts_fetch[accesstest] <- lapply(txt[accesstest],
jsonlite::fromJSON, simplifyDataFrame=T)
ts_fetch[!accesstest] <- NA
# check for wrong period of time
datatest <- sapply(ts_fetch,
function(y){is.list(y) && is.data.frame(y$data)})
if (sum(!datatest & accesstest) > 0) {
message(paste0("No ", dat, " data for stations ",
paste(ids[!datatest & accesstest], sep=", "),
". Check period selected."))
}
# make all stations have same format
#ts_fetch[!accesstest | !datatest] <- list(list("detail"=NULL, "data"=NULL))
ts_fetch <- replace(ts_fetch,
which(!accesstest | !datatest),
list(list("detail"=NA, "data"=NA)))
#if (length(ts_fetch) == 1) ts_fetch <- ts_fetch[[1]]
return(ts_fetch)
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
shinyServer(function(input, output){
output$plot1 <- renderPlot({
distype <- input$Distribution
size <- input$sample
if(distype == 'Normal') {
randomvar <- rnorm(size, mean= as.numeric(input$Mean), sd= as.numeric(input$sd))
}
else {
randomvar <- rexp(size, rate = 1 / as.numeric(input$lambda))
}
hist(randomvar, col = "blue")
})
})
|
/My_DDP_Shiny/server.R
|
no_license
|
yogizhere10/my-DDP-Week-4-Project
|
R
| false
| false
| 668
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
shinyServer(function(input, output){
output$plot1 <- renderPlot({
distype <- input$Distribution
size <- input$sample
if(distype == 'Normal') {
randomvar <- rnorm(size, mean= as.numeric(input$Mean), sd= as.numeric(input$sd))
}
else {
randomvar <- rexp(size, rate = 1 / as.numeric(input$lambda))
}
hist(randomvar, col = "blue")
})
})
|
rm(list = ls())
setwd("C:/Users/user/Documents/Github/Bike-Rental")
libraries = c("data.table", "plyr","dplyr", "ggplot2","gridExtra","rpart","dplyr","gbm","DMwR","randomForest","usdm","corrgram","DataCombine")
lapply(libraries, require, character.only = TRUE)
daily_data = read.csv('day.csv', header = T, as.is = T)
head(daily_data)
str(daily_data)
names(daily_data)
summary(daily_data)
sapply(daily_data, function(x) {sum(is.na(x))})
daily_data = subset(daily_data,select = -c(instant,dteday,casual,registered))
setnames(daily_data, old = c("yr", "mnth", "weathersit", "cnt", 'hum'), new = c('year', 'month', 'weather_type', 'total_count', "humidity"))
categorical_features = c("season","year","month","holiday","weekday","workingday","weather_type")
numerical_features = c("temp","atemp","humidity","windspeed")
daily_data[categorical_features] = lapply(daily_data[categorical_features], as.factor)
numeric_index = sapply(daily_data,is.numeric) #selecting only numeric
numeric_data = daily_data[,numeric_index]
cnames = colnames(numeric_data)
for (i in 1:length(numerical_features))
{
assign(paste0("density", i), ggplot(aes_string(x = cnames[i]), data = numeric_data) +
geom_density(color="green", fill="#CCFFFF") +
ggtitle(paste("Density plot for", cnames[i])) +
theme(text=element_text(size=10, family="serif"), plot.title = element_text(hjust = 0.5)) + geom_vline(aes_string(xintercept = mean(numeric_data[,i])), color="blue", linetype="dashed", size=1))
}
grid.arrange(density1, density2, density3, density4,ncol=2)
factor_data = daily_data[,categorical_features]
fcnames = colnames(factor_data)
for( i in 1:length(fcnames))
{
assign(paste0("bar_univarite_", i), ggplot(aes_string(x = fcnames[i]), data = factor_data) +
geom_bar(stat = 'count', position = 'dodge', fill = "#CCFFFF", col = 'black') +
geom_label(stat = 'count', aes(label = ..count..), col = 'black') +
ggtitle(paste("Univarite bar plot for", fcnames[i])) + theme(text=element_text(size=10, family="serif"), plot.title = element_text(hjust = 0.5) ) )
}
grid.arrange(bar_univarite_1, bar_univarite_2, bar_univarite_3, bar_univarite_4, bar_univarite_5, bar_univarite_6, bar_univarite_7,ncol=2)
for(i in 1:ncol(numeric_data)) {
assign(paste0("box",i), ggplot(data = numeric_data, aes_string(y = numeric_data[,i])) +
stat_boxplot(geom = "errorbar", width = 0.5) +
geom_boxplot(outlier.colour = "red", fill = "grey", outlier.size = 1) +
labs(y = colnames(numeric_data[i])) +
ggtitle(paste("Boxplot: ",colnames(numeric_data[i]))))
}
gridExtra::grid.arrange(box1,box2,box3,box4,box5,ncol=2)
gridExtra::grid.arrange(box5)
for(i in cnames){
val = daily_data[,i][daily_data[,i] %in% boxplot.stats(daily_data[,i])$out]
print(paste(i,length(val)))
daily_data[,i][daily_data[,i] %in% val] = NA
}
daily_data = knnImputation(daily_data, k = 5)
sum(is.na(daily_data))
vif(numeric_data)
corrgram(numeric_data, order = F, upper.panel=panel.pie,
text.panel=panel.txt, main = "Correlation Plot")
for(i in categorical_features){
print(i)
aov_summary = summary(aov(daily_data$total_count~daily_data[,i],data = daily_data))
print(aov_summary)
}
daily_data = subset(daily_data,select = -c(temp,weekday))
daily_data$total_count = (daily_data$total_count-min(daily_data$total_count))/(max(daily_data$total_count)-min(daily_data$total_count))
set.seed(123)
train_index = sample(1:nrow(daily_data), 0.8 * nrow(daily_data))
train = daily_data[train_index,]
test = daily_data[-train_index,]
#rpart for regression
dt_model = rpart(total_count ~ ., data = train, method = "anova")
#Predict the test cases
dt_predictions = predict(dt_model, test[,-10])
#Create dataframe for actual and predicted values
df = data.frame("actual"=test[,10], "pred"=dt_predictions)
head(df)
#calculate MAPE
regr.eval(trues = test[,10], preds = dt_predictions, stats = c("mae","mse","rmse","mape"))
#calculate MAPE
MAPE = function(actual, pred){
print(mean(abs((actual - pred)/actual)) * 100)
}
MAPE(test[,10], dt_predictions)
####
rf_model = randomForest(total_count~., data = train, ntree = 500)
#Predict the test cases
rf_predictions = predict(rf_model, test[,-10])
#Create dataframe for actual and predicted values
df = cbind(df,rf_predictions)
head(df)
#Calculate MAPE
regr.eval(trues = test[,10], preds = rf_predictions, stats = c("mae","mse","rmse","mape"))
MAPE(test[,10], rf_predictions)
#Calculate R Squared
1 - (sum((test[,10]-rf_predictions)^2)/sum((test[,10]-mean(test[,10]))^2))
gbm_model = gbm(
formula = train$total_count ~ .,
distribution = "gaussian",
data = train,
n.trees = 100,
interaction.depth = 5,
shrinkage = 0.1,
cv.folds = 10,
n.cores = NULL, # will use all cores by default
verbose = TRUE
)
gbm_pred <- predict(gbm_model, newdata = test[,-10], type = "link")
regr.eval(trues = test[,10], preds = gbm_pred, stats = c("mae","mse","rmse","mape"))
MAPE(test[,10], gbm_pred)
#Calculate R Squared
1 - (sum((test[,10]-gbm_pred)^2)/sum((test[,10]-mean(test[,10]))^2))
|
/Bike_Rental.R
|
no_license
|
sauravjoshi/Bike-Rental
|
R
| false
| false
| 5,100
|
r
|
rm(list = ls())
setwd("C:/Users/user/Documents/Github/Bike-Rental")
libraries = c("data.table", "plyr","dplyr", "ggplot2","gridExtra","rpart","dplyr","gbm","DMwR","randomForest","usdm","corrgram","DataCombine")
lapply(libraries, require, character.only = TRUE)
daily_data = read.csv('day.csv', header = T, as.is = T)
head(daily_data)
str(daily_data)
names(daily_data)
summary(daily_data)
sapply(daily_data, function(x) {sum(is.na(x))})
daily_data = subset(daily_data,select = -c(instant,dteday,casual,registered))
setnames(daily_data, old = c("yr", "mnth", "weathersit", "cnt", 'hum'), new = c('year', 'month', 'weather_type', 'total_count', "humidity"))
categorical_features = c("season","year","month","holiday","weekday","workingday","weather_type")
numerical_features = c("temp","atemp","humidity","windspeed")
daily_data[categorical_features] = lapply(daily_data[categorical_features], as.factor)
numeric_index = sapply(daily_data,is.numeric) #selecting only numeric
numeric_data = daily_data[,numeric_index]
cnames = colnames(numeric_data)
for (i in 1:length(numerical_features))
{
assign(paste0("density", i), ggplot(aes_string(x = cnames[i]), data = numeric_data) +
geom_density(color="green", fill="#CCFFFF") +
ggtitle(paste("Density plot for", cnames[i])) +
theme(text=element_text(size=10, family="serif"), plot.title = element_text(hjust = 0.5)) + geom_vline(aes_string(xintercept = mean(numeric_data[,i])), color="blue", linetype="dashed", size=1))
}
grid.arrange(density1, density2, density3, density4,ncol=2)
factor_data = daily_data[,categorical_features]
fcnames = colnames(factor_data)
for( i in 1:length(fcnames))
{
assign(paste0("bar_univarite_", i), ggplot(aes_string(x = fcnames[i]), data = factor_data) +
geom_bar(stat = 'count', position = 'dodge', fill = "#CCFFFF", col = 'black') +
geom_label(stat = 'count', aes(label = ..count..), col = 'black') +
ggtitle(paste("Univarite bar plot for", fcnames[i])) + theme(text=element_text(size=10, family="serif"), plot.title = element_text(hjust = 0.5) ) )
}
grid.arrange(bar_univarite_1, bar_univarite_2, bar_univarite_3, bar_univarite_4, bar_univarite_5, bar_univarite_6, bar_univarite_7,ncol=2)
for(i in 1:ncol(numeric_data)) {
assign(paste0("box",i), ggplot(data = numeric_data, aes_string(y = numeric_data[,i])) +
stat_boxplot(geom = "errorbar", width = 0.5) +
geom_boxplot(outlier.colour = "red", fill = "grey", outlier.size = 1) +
labs(y = colnames(numeric_data[i])) +
ggtitle(paste("Boxplot: ",colnames(numeric_data[i]))))
}
gridExtra::grid.arrange(box1,box2,box3,box4,box5,ncol=2)
gridExtra::grid.arrange(box5)
for(i in cnames){
val = daily_data[,i][daily_data[,i] %in% boxplot.stats(daily_data[,i])$out]
print(paste(i,length(val)))
daily_data[,i][daily_data[,i] %in% val] = NA
}
daily_data = knnImputation(daily_data, k = 5)
sum(is.na(daily_data))
vif(numeric_data)
corrgram(numeric_data, order = F, upper.panel=panel.pie,
text.panel=panel.txt, main = "Correlation Plot")
for(i in categorical_features){
print(i)
aov_summary = summary(aov(daily_data$total_count~daily_data[,i],data = daily_data))
print(aov_summary)
}
daily_data = subset(daily_data,select = -c(temp,weekday))
daily_data$total_count = (daily_data$total_count-min(daily_data$total_count))/(max(daily_data$total_count)-min(daily_data$total_count))
set.seed(123)
train_index = sample(1:nrow(daily_data), 0.8 * nrow(daily_data))
train = daily_data[train_index,]
test = daily_data[-train_index,]
#rpart for regression
dt_model = rpart(total_count ~ ., data = train, method = "anova")
#Predict the test cases
dt_predictions = predict(dt_model, test[,-10])
#Create dataframe for actual and predicted values
df = data.frame("actual"=test[,10], "pred"=dt_predictions)
head(df)
#calculate MAPE
regr.eval(trues = test[,10], preds = dt_predictions, stats = c("mae","mse","rmse","mape"))
#calculate MAPE
MAPE = function(actual, pred){
print(mean(abs((actual - pred)/actual)) * 100)
}
MAPE(test[,10], dt_predictions)
####
rf_model = randomForest(total_count~., data = train, ntree = 500)
#Predict the test cases
rf_predictions = predict(rf_model, test[,-10])
#Create dataframe for actual and predicted values
df = cbind(df,rf_predictions)
head(df)
#Calculate MAPE
regr.eval(trues = test[,10], preds = rf_predictions, stats = c("mae","mse","rmse","mape"))
MAPE(test[,10], rf_predictions)
#Calculate R Squared
1 - (sum((test[,10]-rf_predictions)^2)/sum((test[,10]-mean(test[,10]))^2))
gbm_model = gbm(
formula = train$total_count ~ .,
distribution = "gaussian",
data = train,
n.trees = 100,
interaction.depth = 5,
shrinkage = 0.1,
cv.folds = 10,
n.cores = NULL, # will use all cores by default
verbose = TRUE
)
gbm_pred <- predict(gbm_model, newdata = test[,-10], type = "link")
regr.eval(trues = test[,10], preds = gbm_pred, stats = c("mae","mse","rmse","mape"))
MAPE(test[,10], gbm_pred)
#Calculate R Squared
1 - (sum((test[,10]-gbm_pred)^2)/sum((test[,10]-mean(test[,10]))^2))
|
#' Funções auxiliares
#'
#' Funções auxiliares para manipulação de textos e números.
#'
#' A função \code{wrap.it} é usada para gerar os nomes em eixos de gráficos, quebrando a linha em
#' blocos de no máximo \code{len} caracteres.
#' A função \code{capitalize} transforma um vetor de texto para Iniciais Maiúsculas.
#' A função \code{trim} remove espaços no início e final de textos.
#' A função \code{split} separa cada item de um vetor usando um padrão regular, e devolve um vetor
#' contendo todos os elementos constituintes.
#' A função \code{to.p} formata um vetor numérico como porcentagem.
#' @param x Vetor de entrada; character para \code{wrap.it}, \code{trim} e \code{capitalize}, numeric para
#' \code{to.p}.
#' @param len Número máximo de caracteres para cada linha
#' @examples
#' wrap.it("Texto muito muito extremamente longo e desnecessariamente comprido", 10)
#' capitalize("texto em minúsculas")
#' trim(" espaços ")
#' split("Um item e outro item, finalmente/no entanto")
#' @export
#' @encoding utf-8
#' @rdname auxiliar
wrap.it <- function(x, len = 12)
{
sapply(x, function(y) paste(strwrap(y, len),
collapse = "\n"),
USE.NAMES = FALSE)
}
#' @export
#' @rdname auxiliar
capitalize <- function(x) {
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
}
capitalize <- Vectorize(capitalize)
#' @export
#' @rdname auxiliar
trim <- function(x) return(gsub("^\\s+", "", gsub("\\s+$", "", x)))
#' @export
#' @param pattern Padrão regular usado para separar os elementos
#' @rdname auxiliar
split <- function(x, pattern="(, )|( e )|/") {
res <- list()
j = 1
for (i in 1:length(x)) {
res[i] = strsplit(trim(x[i]), pattern)
}
return(unlist(res))
}
#' @export
#' @rdname auxiliar
to.p <- function(x) { return(round(x/sum(x)*100,1)) }
#' @export
#' @rdname auxiliar
#' @import utils
rname <- function(x, wrap=12, dictionary="dictionary.txt") {
dict <- read.csv(dictionary, header=FALSE, stringsAsFactors=FALSE)
x <- gsub("\\.", " ", x)
if (x=="") x <- "não respondeu / nenhum"
# Substitui handles
if (x %in% dict[,1])
x <- dict[which(x == dict[,1]) ,2]
x <- wrap.it(x, wrap)
return(x[[1]]) # BUGFIX, as vezes esta retornando uma lista e nao sei pq
}
rname <- Vectorize(rname)
|
/R/auxiliar.R
|
no_license
|
pesquisaR/pesquisaR
|
R
| false
| false
| 2,331
|
r
|
#' Funções auxiliares
#'
#' Funções auxiliares para manipulação de textos e números.
#'
#' A função \code{wrap.it} é usada para gerar os nomes em eixos de gráficos, quebrando a linha em
#' blocos de no máximo \code{len} caracteres.
#' A função \code{capitalize} transforma um vetor de texto para Iniciais Maiúsculas.
#' A função \code{trim} remove espaços no início e final de textos.
#' A função \code{split} separa cada item de um vetor usando um padrão regular, e devolve um vetor
#' contendo todos os elementos constituintes.
#' A função \code{to.p} formata um vetor numérico como porcentagem.
#' @param x Vetor de entrada; character para \code{wrap.it}, \code{trim} e \code{capitalize}, numeric para
#' \code{to.p}.
#' @param len Número máximo de caracteres para cada linha
#' @examples
#' wrap.it("Texto muito muito extremamente longo e desnecessariamente comprido", 10)
#' capitalize("texto em minúsculas")
#' trim(" espaços ")
#' split("Um item e outro item, finalmente/no entanto")
#' @export
#' @encoding utf-8
#' @rdname auxiliar
wrap.it <- function(x, len = 12)
{
sapply(x, function(y) paste(strwrap(y, len),
collapse = "\n"),
USE.NAMES = FALSE)
}
#' @export
#' @rdname auxiliar
capitalize <- function(x) {
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
}
capitalize <- Vectorize(capitalize)
#' @export
#' @rdname auxiliar
trim <- function(x) return(gsub("^\\s+", "", gsub("\\s+$", "", x)))
#' @export
#' @param pattern Padrão regular usado para separar os elementos
#' @rdname auxiliar
split <- function(x, pattern="(, )|( e )|/") {
res <- list()
j = 1
for (i in 1:length(x)) {
res[i] = strsplit(trim(x[i]), pattern)
}
return(unlist(res))
}
#' @export
#' @rdname auxiliar
to.p <- function(x) { return(round(x/sum(x)*100,1)) }
#' @export
#' @rdname auxiliar
#' @import utils
rname <- function(x, wrap=12, dictionary="dictionary.txt") {
dict <- read.csv(dictionary, header=FALSE, stringsAsFactors=FALSE)
x <- gsub("\\.", " ", x)
if (x=="") x <- "não respondeu / nenhum"
# Substitui handles
if (x %in% dict[,1])
x <- dict[which(x == dict[,1]) ,2]
x <- wrap.it(x, wrap)
return(x[[1]]) # BUGFIX, as vezes esta retornando uma lista e nao sei pq
}
rname <- Vectorize(rname)
|
library(shiny)
shinyServer(function(input, output) {
tabla_1 <- reactive({
#Positivos=Estado_V() %>% mutate(contar=1) %>%
# group_by(Estado=PCR1) %>% summarise(PCR=sum(contar))
})
#Infracciones----
output$pie_plot <- renderAmCharts({
cons_pie=infracciones %>% filter(Responsable==input$area) %>% group_by(label=Valor) %>%
summarise(value=as.numeric(length(Codigo))) %>% data.frame() %>% mutate(label=as.character(label))
amPie(data = cons_pie, legend = TRUE, legendPosition = "left",depth = 20, export = TRUE)
})
output$infr_1 <- renderDataTable({
datatable(cons1)
})
output$infr_2 <- renderDataTable({
cons2=infracciones %>% filter(Responsable==input$area) %>% group_by(Responsable,Codigo_M) %>%
summarise(Conteo=length(Codigo))
cons2 <- cons2[with(cons2, order(-cons2$Conteo)), ] # Orden inverso
datatable(cons2)
})
#data_viz----
output$serie_1 <- renderAmCharts({
cons11=data_viz %>% filter(Servicio==input$servicio) %>% group_by(FechaDeIdent) %>%
summarise(Multas=as.numeric(length(DateKey)))
plot(cons11$FechaDeIdent,cons11$Multas,type="line")
cons11$FechaDeIdent=as.POSIXct(cons11$FechaDeIdent)
cons11$Multas_low <- cons11$Multas-2.5
cons11$Multas_up <- cons11$Multas+2.5
color_t=ifelse(input$servicio=="Troncal","red","blue")
amTimeSeries(cons11, "FechaDeIdent", list(c("Multas_low", "Multas", "Multas_up")),
color = color_t, bullet = c("round"), export = TRUE)
})
output$vizu_1 <- renderDataTable({
cons8=data_viz %>% filter(Servicio==input$servicio) %>% group_by(Area,mes) %>%
summarise(conteo=as.numeric(length(DateKey))) %>% spread(Area,conteo)
datatable(cons8)
})
output$vizu_2 <- renderDataTable({
cons10=data_viz %>% filter(Servicio==input$servicio) %>% group_by(Etapa,Area) %>%
summarise(conteo=as.numeric(length(DateKey))) %>% spread(Area,conteo)
datatable(cons10)
})
output$ranking_1 <- renderAmCharts({
cons4=data_viz %>% filter(Servicio==input$servicio) %>% group_by(Ruta) %>%
summarise(conteo=as.numeric(length(DateKey)))
cons4 <- cons4[with(cons4, order(-cons4$conteo)), ] # Orden inverso
amBarplot(x = "Ruta", y = "conteo", data = cons4[1:5,], depth = 15, labelRotation = -90,
show_values = TRUE, export = TRUE)
#datatable(head(cons4))
})
output$ranking_2 <- renderAmCharts({
cons6=data_viz %>% filter(Servicio=="Zonal") %>% group_by(Infraccion) %>%
summarise(conteo=as.numeric(length(DateKey)))
cons6 <- cons6[with(cons6, order(-cons6$conteo)), ] # Orden inverso
amBarplot(x = "Infraccion", y = "conteo", data = cons6[1:5,], depth = 15, labelRotation = -90,
show_values = TRUE, export = TRUE)
#datatable(head(cons4))
})
})
|
/Visualización/server.R
|
no_license
|
Michaelmacm94/prueba
|
R
| false
| false
| 2,940
|
r
|
library(shiny)
shinyServer(function(input, output) {
tabla_1 <- reactive({
#Positivos=Estado_V() %>% mutate(contar=1) %>%
# group_by(Estado=PCR1) %>% summarise(PCR=sum(contar))
})
#Infracciones----
output$pie_plot <- renderAmCharts({
cons_pie=infracciones %>% filter(Responsable==input$area) %>% group_by(label=Valor) %>%
summarise(value=as.numeric(length(Codigo))) %>% data.frame() %>% mutate(label=as.character(label))
amPie(data = cons_pie, legend = TRUE, legendPosition = "left",depth = 20, export = TRUE)
})
output$infr_1 <- renderDataTable({
datatable(cons1)
})
output$infr_2 <- renderDataTable({
cons2=infracciones %>% filter(Responsable==input$area) %>% group_by(Responsable,Codigo_M) %>%
summarise(Conteo=length(Codigo))
cons2 <- cons2[with(cons2, order(-cons2$Conteo)), ] # Orden inverso
datatable(cons2)
})
#data_viz----
output$serie_1 <- renderAmCharts({
cons11=data_viz %>% filter(Servicio==input$servicio) %>% group_by(FechaDeIdent) %>%
summarise(Multas=as.numeric(length(DateKey)))
plot(cons11$FechaDeIdent,cons11$Multas,type="line")
cons11$FechaDeIdent=as.POSIXct(cons11$FechaDeIdent)
cons11$Multas_low <- cons11$Multas-2.5
cons11$Multas_up <- cons11$Multas+2.5
color_t=ifelse(input$servicio=="Troncal","red","blue")
amTimeSeries(cons11, "FechaDeIdent", list(c("Multas_low", "Multas", "Multas_up")),
color = color_t, bullet = c("round"), export = TRUE)
})
output$vizu_1 <- renderDataTable({
cons8=data_viz %>% filter(Servicio==input$servicio) %>% group_by(Area,mes) %>%
summarise(conteo=as.numeric(length(DateKey))) %>% spread(Area,conteo)
datatable(cons8)
})
output$vizu_2 <- renderDataTable({
cons10=data_viz %>% filter(Servicio==input$servicio) %>% group_by(Etapa,Area) %>%
summarise(conteo=as.numeric(length(DateKey))) %>% spread(Area,conteo)
datatable(cons10)
})
output$ranking_1 <- renderAmCharts({
cons4=data_viz %>% filter(Servicio==input$servicio) %>% group_by(Ruta) %>%
summarise(conteo=as.numeric(length(DateKey)))
cons4 <- cons4[with(cons4, order(-cons4$conteo)), ] # Orden inverso
amBarplot(x = "Ruta", y = "conteo", data = cons4[1:5,], depth = 15, labelRotation = -90,
show_values = TRUE, export = TRUE)
#datatable(head(cons4))
})
output$ranking_2 <- renderAmCharts({
cons6=data_viz %>% filter(Servicio=="Zonal") %>% group_by(Infraccion) %>%
summarise(conteo=as.numeric(length(DateKey)))
cons6 <- cons6[with(cons6, order(-cons6$conteo)), ] # Orden inverso
amBarplot(x = "Infraccion", y = "conteo", data = cons6[1:5,], depth = 15, labelRotation = -90,
show_values = TRUE, export = TRUE)
#datatable(head(cons4))
})
})
|
#' @export
run_em <- function(em_names=NULL, input_list=NULL, em_input_filenames=NULL){
if (!file.exists(file.path(maindir, "em_input"))) stop ("Missing estimation model input file!")
if (is.null(em_names)) stop ("Missing EM information!")
maindir <- input_list$maindir
om_sim_num <- input_list$om_sim_num
case_name <- input_list$case_name
casedir <- file.path(maindir, case_name)
em_bias_cor <- input_list$em_bias_cor
initial_equilibrium_F <- input_list$initial_equilibrium_F
invisible(sapply(em_names, function(x) {
if (!file.exists(file.path(casedir, "output", x))) dir.create(file.path(casedir, "output", x))
}))
if("AMAK" %in% em_names) run_amak(maindir=maindir, om_sim_num=om_sim_num, casedir=casedir, input_filename=em_input_filenames$AMAK)
if("ASAP" %in% em_names) run_asap(maindir=maindir, om_sim_num=om_sim_num, casedir=casedir, input_filename=em_input_filenames$ASAP)
if("BAM" %in% em_names) run_bam(maindir=maindir, om_sim_num=om_sim_num, casedir=casedir, em_bias_cor=em_bias_cor, input_filename=em_input_filenames$BAM)
if("SS" %in% em_names) run_ss(maindir=maindir, om_sim_num=om_sim_num, casedir=casedir, em_bias_cor=em_bias_cor, input_filename=em_input_filenames$SS, initial_equilibrium_F=initial_equilibrium_F)
if("MAS" %in% em_names) run_mas(maindir=maindir, om_sim_num=om_sim_num, casedir=casedir)
}
|
/R/run_em.R
|
no_license
|
JonBrodziak/Age_Structured_Stock_Assessment_Model_Comparison
|
R
| false
| false
| 1,356
|
r
|
#' @export
run_em <- function(em_names=NULL, input_list=NULL, em_input_filenames=NULL){
if (!file.exists(file.path(maindir, "em_input"))) stop ("Missing estimation model input file!")
if (is.null(em_names)) stop ("Missing EM information!")
maindir <- input_list$maindir
om_sim_num <- input_list$om_sim_num
case_name <- input_list$case_name
casedir <- file.path(maindir, case_name)
em_bias_cor <- input_list$em_bias_cor
initial_equilibrium_F <- input_list$initial_equilibrium_F
invisible(sapply(em_names, function(x) {
if (!file.exists(file.path(casedir, "output", x))) dir.create(file.path(casedir, "output", x))
}))
if("AMAK" %in% em_names) run_amak(maindir=maindir, om_sim_num=om_sim_num, casedir=casedir, input_filename=em_input_filenames$AMAK)
if("ASAP" %in% em_names) run_asap(maindir=maindir, om_sim_num=om_sim_num, casedir=casedir, input_filename=em_input_filenames$ASAP)
if("BAM" %in% em_names) run_bam(maindir=maindir, om_sim_num=om_sim_num, casedir=casedir, em_bias_cor=em_bias_cor, input_filename=em_input_filenames$BAM)
if("SS" %in% em_names) run_ss(maindir=maindir, om_sim_num=om_sim_num, casedir=casedir, em_bias_cor=em_bias_cor, input_filename=em_input_filenames$SS, initial_equilibrium_F=initial_equilibrium_F)
if("MAS" %in% em_names) run_mas(maindir=maindir, om_sim_num=om_sim_num, casedir=casedir)
}
|
library(dplyr)
library(ggplot2)
library(cowplot)
library(corrplot)
library("MASS")
library(car)
library(caret)
library(Information)
library(ROCR)
# read all data into R
emp_survey <- read.csv("C:\\IIITB\\HR Analytics Case Study\\PA-I_Case_Study_HR_Analytics\\employee_survey_data.csv")
gen_data <- read.csv("C:\\IIITB\\HR Analytics Case Study\\PA-I_Case_Study_HR_Analytics\\general_data.csv")
in_time <- read.csv("C:\\IIITB\\HR Analytics Case Study\\PA-I_Case_Study_HR_Analytics\\in_time.csv", stringsAsFactors=F,header=F)
mgr_survey <- read.csv("C:\\IIITB\\HR Analytics Case Study\\PA-I_Case_Study_HR_Analytics\\manager_survey_data.csv")
out_time <- read.csv("C:\\IIITB\\HR Analytics Case Study\\PA-I_Case_Study_HR_Analytics\\out_time.csv", stringsAsFactors=F,header=F)
# add IN to dates of first row for in_time and OUT to first row of out_time
in_char <- "IN"
in_time[1,] <- sapply(in_time[1,], function(x) x <- paste(x,in_char,sep="_"))
out_char <- "OUT"
out_time[1,] <- sapply(out_time[1,], function(x) x <- paste(x,out_char,sep="_"))
# make first row as table columns for in_time and out_time
colnames(in_time) <- in_time[1,]
in_time <- in_time[-1,]
colnames(out_time) <- out_time[1,]
out_time <- out_time[-1,]
# in_time and out_time: assumption first column is EmployeeId
# assign coumnname 'EmployeeID' to first column for in_time and out_time dataframe
# number of unique values in 'EmployeeId column' for both dataframes is 4410
colnames(in_time)[1] <- 'EmployeeID'
colnames(out_time)[1] <- 'EmployeeID'
setdiff(in_time$EmployeeID,out_time$EmployeeID)
# find and remove all IN_TIME and OUT_TIME columns which have all values as NA
in_time_na <- as.data.frame(sapply(in_time, function(x) sum(is.na(x))))
na_cols_in_time <- which(in_time_na == 4410)
in_time <- in_time[,-na_cols_in_time]
out_time_na <- as.data.frame(sapply(out_time, function(x) sum(is.na(x))))
na_cols_out_time <- which(out_time_na == 4410)
out_time <- out_time[,-na_cols_out_time]
diff_hours <- as.numeric(in_time$EmployeeID)
for (i in 2:250){
act_workHours <- as.numeric(difftime(strptime(out_time[,i],"%Y-%m-%d %H:%M:%S"),
strptime(in_time[,i],"%Y-%m-%d %H:%M:%S")))
diff_hours <- cbind(diff_hours,act_workHours)
}
diff_hours <- as.data.frame(diff_hours)
colnames(diff_hours)[1] <- 'EmployeeID'
diff_hours$ActualWorkingHours <- apply(diff_hours[,-1],1,function(x) mean(x,na.rm=TRUE))
actual_workHours <- diff_hours[,c('EmployeeID','ActualWorkingHours')]
# notice number of rows in EmployeeID column for dataframes - 4410.
length(unique(emp_survey$EmployeeID)) # confirm EmployeeID can be a key to merge different dataframe
length(unique(gen_data$EmployeeID)) # confirm EmployeeID can be a key to merge different dataframe
length(unique(mgr_survey$EmployeeID)) # confirm EmployeeID can be a key to merge different dataframe
length(unique(in_time$EmployeeID)) # confirm EmployeeID can be a key to merge different dataframe
length(unique(out_time$EmployeeID)) # confirm EmployeeID can be a key to merge different dataframe
# check if all values of employeeID are same inall dataframes
setdiff(emp_survey$EmployeeID,gen_data$EmployeeID) # Identical EmployeeID across these datasets
setdiff(gen_data$EmployeeID,in_time$EmployeeID) # Identical customerID across these datasets
setdiff(in_time$EmployeeID,mgr_survey$EmployeeID) # Identical customerID across these datasets
setdiff(mgr_survey$EmployeeID,out_time$EmployeeID) # Identical customerID across these datasets
# merge into single dataframe, joined by EmployeeID values.
emp_ef <- merge(emp_survey,gen_data,by="EmployeeID", all = F)
emp_ef <- merge(emp_ef,mgr_survey,by="EmployeeID", all = F)
# emp_ef <- merge(emp_ef,in_time,by="EmployeeID", all = F)
# emp_ef <- merge(emp_ef,out_time,by="EmployeeID", all = F)
# remove EmployeeCount, Over18 and StandardHours column since they hold same value for all rows.
unique(emp_ef$EmployeeCount)
unique(emp_ef$Over18)
unique(emp_ef$StandardHours)
emp_ef <- emp_ef[,-c(12,19,21)]
# summary of emp_ef
summary(emp_ef)
# structure of emp_ef
str(emp_ef)
########################## Missing Value Imputation ##########################
# find columns containing NA with number of NA
sapply(emp_ef, function(x) sum(is.na(x)))
# number of rows removed .03 % of total observations (4410)
emp_no_na <- na.omit(emp_ef)
levels(emp_no_na$Attrition) <-c(0,1)
emp_no_na$Attrition <- as.numeric(levels(emp_no_na$Attrition))[emp_no_na$Attrition]
IV <- create_infotables(emp_no_na[,-1], y="Attrition", bins=10, parallel=FALSE)
# custom function to compute WoE
# compute total_good for all '1' values and
# total_bad for '0' values
computeWoE <- function(local_good, local_bad){
total_good <- length(emp_no_na$Attrition[which(emp_no_na$Attrition == 1)])
total_bad <- length(emp_no_na$Attrition[which(emp_no_na$Attrition == 0)])
woe = log(local_good/total_good) - log(local_bad/total_bad)
return(woe)
}
######################### outliner treatment ##############################
# outliner check for MonthlyIncome
quantile(emp_no_na$MonthlyIncome,seq(0,1,.01))
# jump at 90% to 91%, replacing all greater than 137756.0 with 137756.0
emp_no_na$MonthlyIncome[which(emp_no_na$MonthlyIncome>137756.0)] <- 137756.0
# binning values of Totalworkingyears based on WOE
#TotalWorkingYears N Percent WOE IV
#1 [0,2] 363 0.08441860 1.3969494 0.2405392
#2 [3,4] 308 0.07162791 0.2880417 0.2470738
#3 [5,5] 255 0.05930233 0.1587747 0.2486502
#4 [6,7] 602 0.14000000 0.1811905 0.2535323
#5 [8,9] 577 0.13418605 -0.2703599 0.2624687
#6 [10,12] 837 0.19465116 -0.2422809 0.2729815
#7 [13,16] 423 0.09837209 -0.4820665 0.2923153
#8 [17,22] 487 0.11325581 -0.6384822 0.3292575
#9 [23,40] 448 0.10418605 -0.7039883 0.3696231
emp_no_na$TotalWorkingYears <- as.factor(emp_no_na$TotalWorkingYears)
# for coarse classing, compute WOE for 5,6 and 7 values
TotalWorkingYears_567 <- emp_no_na[which(emp_no_na$TotalWorkingYears==6 | emp_no_na$TotalWorkingYears==7 |
emp_no_na$TotalWorkingYears==5 ),c(20,6)]
loc_good <- length(TotalWorkingYears_567$Attrition[which(TotalWorkingYears_567$Attrition==1)])
loc_bad <- length(TotalWorkingYears_567$Attrition[which(TotalWorkingYears_567$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # .18
emp_no_na$TotalWorkingYears <- as.numeric(emp_no_na$TotalWorkingYears)
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=0 & emp_no_na$TotalWorkingYears<=2)] <- '0-2'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=3 & emp_no_na$TotalWorkingYears<=4)] <- '3-4'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=5 & emp_no_na$TotalWorkingYears<=7)] <- '5-7'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=8 & emp_no_na$TotalWorkingYears<=9)] <- '8-9'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=10 & emp_no_na$TotalWorkingYears<=12)] <- '10-12'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=13 & emp_no_na$TotalWorkingYears<=16)] <- '13-16'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=17 & emp_no_na$TotalWorkingYears<=22)] <- '17-22'
# replace all values greater than 23 years with 23+ years
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=23)] <- '23+'
#YearsAtCompany N Percent WOE IV
#1 [0,0] 126 0.02930233 1.092779966 0.04807756
#2 [1,1] 499 0.11604651 1.030207662 0.21489228
#3 [2,2] 369 0.08581395 0.345732134 0.22637931
#4 [3,4] 700 0.16279070 0.009043863 0.22639267
#5 [5,6] 799 0.18581395 -0.468968920 0.26111506
#6 [7,8] 498 0.11581395 -0.360796765 0.27442241
#7 [9,9] 234 0.05441860 -0.570198713 0.28892615
#8 [10,14] 610 0.14186047 -0.380870490 0.30696276
#9 [15,40] 465 0.10813953 -0.663537357 0.34472160
# Coarse Classing: Category for 0,1 and 5,6,7 and 8 to be combined.
# Category 9,10,11,12,13,14 to be combined.
# After coarse classing, WOE trend is monotonic
emp_no_na$YearsAtCompany <- as.numeric(emp_no_na$YearsAtCompany)
# check quantile distribution for YearsAtCompany
quantile(emp_no_na$YearsAtCompany,seq(0,1,.01))
# for coarse classing, compute WOE 0,1
YearsAtCompany_01 <- emp_no_na[which(emp_no_na$YearsAtCompany==0 | emp_no_na$YearsAtCompany==1),c(22,6)]
loc_good <- length(YearsAtCompany_01$Attrition[which(YearsAtCompany_01$Attrition==1)])
loc_bad <- length(YearsAtCompany_01$Attrition[which(YearsAtCompany_01$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # 1.04
# for coarse classing, compute WOE 7 till 14
YearsAtCompany_5678 <- emp_no_na[which(emp_no_na$YearsAtCompany>=7 & emp_no_na$YearsAtCompany<=14 ),c(22,6)]
loc_good <- length(YearsAtCompany_5678$Attrition[which(YearsAtCompany_5678$Attrition==1)])
loc_bad <- length(YearsAtCompany_5678$Attrition[which(YearsAtCompany_5678$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.42
temp_yrs <- emp_no_na$YearsAtCompany
emp_no_na$YearsAtCompany[which(temp_yrs>=0 & temp_yrs<=1)] <- '0-1'
emp_no_na$YearsAtCompany[which(temp_yrs>=3 & temp_yrs<=4)] <- '3-4'
emp_no_na$YearsAtCompany[which(temp_yrs>=5 & temp_yrs<=6)] <- '5-6'
emp_no_na$YearsAtCompany[which(temp_yrs>=7 & temp_yrs<=14)] <- '7-14'
# replace all values greater than 15 years with 15+ years
emp_no_na$YearsAtCompany[which(temp_yrs>=15)] <- '15+'
# check quantile distribution for YearsSinceLastPromotion
emp_no_na$YearsSinceLastPromotion <- as.numeric((emp_no_na$YearsSinceLastPromotion))
quantile(emp_no_na$YearsSinceLastPromotion,seq(0,1,.01))
# binning values of YearsSinceLastPromotion
#YearsSinceLastPromotion N Percent WOE IV
#1 [0,0] 1697 0.39465116 0.186823701 0.01465859
#2 [1,1] 1050 0.24418605 -0.193060802 0.02317709
#3 [2,3] 618 0.14372093 0.071279673 0.02392502
#4 [4,6] 400 0.09302326 -0.579151108 0.04942133
#5 [7,15] 535 0.12441860 -0.006510387 0.04942660
# for coarse classing, compute WOE 1 to 3 for binning
YearsSinceLastPromotion_123 <- emp_no_na[which(emp_no_na$YearsSinceLastPromotion>=1 & emp_no_na$YearsSinceLastPromotion<=3),c(23,6)]
loc_good <- length(YearsSinceLastPromotion_123$Attrition[which(YearsSinceLastPromotion_123$Attrition==1)])
loc_bad <- length(YearsSinceLastPromotion_123$Attrition[which(YearsSinceLastPromotion_123$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.09
# for coarse classing, compute WOE 4 to 15 for binning
YearsSinceLastPromotion_4_15 <- emp_no_na[which(emp_no_na$YearsSinceLastPromotion>=4 & emp_no_na$YearsSinceLastPromotion<=15),c(23,6)]
loc_good <- length(YearsSinceLastPromotion_4_15$Attrition[which(YearsSinceLastPromotion_4_15$Attrition==1)])
loc_bad <- length(YearsSinceLastPromotion_4_15$Attrition[which(YearsSinceLastPromotion_4_15$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.22
temp_yrsPromotion <- emp_no_na$YearsSinceLastPromotion
emp_no_na$YearsSinceLastPromotion[which(temp_yrsPromotion>=1 & temp_yrsPromotion<=3)] <- '1-3'
# replace all values greater than 11 years with 4+ years
emp_no_na$YearsSinceLastPromotion[which(temp_yrsPromotion>=4)] <- '4+'
# check quantile distribution for YearsWithCurrManager
emp_no_na$YearsWithCurrManager <- as.numeric(emp_no_na$YearsWithCurrManager)
quantile(emp_no_na$YearsWithCurrManager,seq(0,1,.01))
#YearsWithCurrManager N Percent WOE IV
#1 [0,0] 760 0.17674419 0.9272485 0.2007732
#2 [1,1] 222 0.05162791 -0.1351230 0.2016733
#3 [2,2] 1009 0.23465116 -0.1306429 0.2055035
#4 [3,3] 419 0.09744186 -0.2436555 0.2108235
#5 [4,6] 465 0.10813953 -0.3626588 0.2233694
#6 [7,8] 943 0.21930233 -0.2603369 0.2369589
#7 [9,17] 482 0.11209302 -0.8706348 0.2995737
# for coarse classing, combine 1 and 2 to make WOE trend monotonic
YearsWithCurrManager_12 <- emp_no_na[which(emp_no_na$YearsWithCurrManager==1 |
emp_no_na$YearsWithCurrManager==2),c(24,6)]
loc_good <- length(YearsWithCurrManager_12$Attrition[which(YearsWithCurrManager_12$Attrition==1)])
loc_bad <- length(YearsWithCurrManager_12$Attrition[which(YearsWithCurrManager_12$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.13
YearsWithCurrManager_4_8 <- emp_no_na[which(emp_no_na$YearsWithCurrManager>=4 & emp_no_na$YearsWithCurrManager<=8),c(24,6)]
loc_good <- length(YearsWithCurrManager_4_8$Attrition[which(YearsWithCurrManager_4_8$Attrition==1)])
loc_bad <- length(YearsWithCurrManager_4_8$Attrition[which(YearsWithCurrManager_4_8$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.29
# binning values of YearsWithCurrManager as per WOE
# 1&2 to be combined and 4-8 to be combined
temp_yrsCurMgr <- emp_no_na$YearsWithCurrManager
emp_no_na$YearsWithCurrManager[which(temp_yrsCurMgr>=1 & temp_yrsCurMgr<=2)] <- '1-2'
emp_no_na$YearsWithCurrManager[which(temp_yrsCurMgr>=4 & temp_yrsCurMgr<=8)] <- '4-8'
# replace all values greater than 9 years with 9+ years
emp_no_na$YearsWithCurrManager[which(temp_yrsCurMgr>=9)] <- '9+'
# check quantile distribution for PercentSalaryHike
emp_no_na$PercentSalaryHike <- as.numeric(emp_no_na$PercentSalaryHike)
quantile(emp_no_na$PercentSalaryHike,seq(0,1,.01))
#PercentSalaryHike N Percent WOE IV
#1 [11,11] 616 0.14325581 -0.11932634 0.001958391
#2 [12,12] 577 0.13418605 -0.09593163 0.003153576
#3 [13,13] 616 0.14325581 -0.01884256 0.003204114
#4 [14,14] 583 0.13558140 -0.10807753 0.004730500
#5 [15,16] 526 0.12232558 0.08167868 0.005569300
#6 [17,18] 496 0.11534884 0.01233584 0.005586927
#7 [19,20] 382 0.08883721 0.09828518 0.006473875
#8 [21,25] 504 0.11720930 0.19924622 0.011445736
# for coarse classing, combine 13 and 14 to make WOE tend monotonic
PercentSalaryHike_13_14 <- emp_no_na[which(emp_no_na$PercentSalaryHike==13 |
emp_no_na$PercentSalaryHike==14),c(18,6)]
loc_good <- length(PercentSalaryHike_13_14$Attrition[which(PercentSalaryHike_13_14$Attrition==1)])
loc_bad <- length(PercentSalaryHike_13_14$Attrition[which(PercentSalaryHike_13_14$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.06
# for coarse classing, comvine 15 till 18 ro make WOE tend monotonic
PercentSalaryHike_15_18 <- emp_no_na[which(emp_no_na$PercentSalaryHike>=15 & emp_no_na$PercentSalaryHike<=18),c(18,6)]
loc_good <- length(PercentSalaryHike_15_18$Attrition[which(PercentSalaryHike_15_18$Attrition==1)])
loc_bad <- length(PercentSalaryHike_15_18$Attrition[which(PercentSalaryHike_15_18$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # .05
# binning values of PercentSalaryHike
temp_perHike <- emp_no_na$PercentSalaryHike
emp_no_na$PercentSalaryHike[which(temp_perHike>=13 & temp_perHike<=14)] <- '13-14'
emp_no_na$PercentSalaryHike[which(temp_perHike>=15 & temp_perHike<=18)] <- '15-18'
emp_no_na$PercentSalaryHike[which(temp_perHike>=19 & temp_perHike<=20)] <- '19-20'
# replace all values greater than 21 years with 21+
emp_no_na$PercentSalaryHike[which(temp_perHike>=21)] <- '21'
# check quantile distribution for DistanceFromHome
emp_no_na$DistanceFromHome <- as.numeric(emp_no_na$DistanceFromHome)
quantile(emp_no_na$DistanceFromHome,seq(0,1,.01))
# binning values of DistanceFromHome
#DistanceFromHome N Percent WOE IV
#1 [1,1] 612 0.14232558 -0.07313919 0.000742638
#2 [2,2] 614 0.14279070 0.15694692 0.004449140
#3 [3,4] 428 0.09953488 -0.18709400 0.007716877
#4 [5,6] 358 0.08325581 -0.14885691 0.009470145
#5 [7,8] 481 0.11186047 0.03423041 0.009602737
#6 [9,10] 507 0.11790698 0.15289872 0.012503600
#7 [11,16] 433 0.10069767 0.08312033 0.013219029
#8 [17,22] 382 0.08883721 0.13406852 0.014889083
#9 [23,29] 485 0.11279070 -0.27407259 0.022598307
# for coarse classing, comvine 11 till 29 to make WOE tend monotonic
DistanceFromHome_11_29 <- emp_no_na[which(emp_no_na$DistanceFromHome>=11 & emp_no_na$DistanceFromHome<=29),c(9,6)]
loc_good <- length(DistanceFromHome_11_29$Attrition[which(DistanceFromHome_11_29$Attrition==1)])
loc_bad <- length(DistanceFromHome_11_29$Attrition[which(DistanceFromHome_11_29$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.02
# for coarse classing, comvine 3 till 10 to make WOE tend monotonic
DistanceFromHome_3_10 <- emp_no_na[which(emp_no_na$DistanceFromHome>=3 & emp_no_na$DistanceFromHome<=10),c(9,6)]
loc_good <- length(DistanceFromHome_3_10$Attrition[which(DistanceFromHome_3_10$Attrition==1)])
loc_bad <- length(DistanceFromHome_3_10$Attrition[which(DistanceFromHome_3_10$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.02
# for coarse classing, comvine 1 till 2 to make WOE tend monotonic
DistanceFromHome_12 <- emp_no_na[which(emp_no_na$DistanceFromHome>=1 & emp_no_na$DistanceFromHome<=2),c(9,6)]
loc_good <- length(DistanceFromHome_12$Attrition[which(DistanceFromHome_12$Attrition==1)])
loc_bad <- length(DistanceFromHome_12$Attrition[which(DistanceFromHome_12$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # .05
# assigning bins
temp_dist <- emp_no_na$DistanceFromHome
emp_no_na$DistanceFromHome[which(temp_dist>=1 & temp_dist<=2)] <- '1-2'
emp_no_na$DistanceFromHome[which(temp_dist>=3 & temp_dist<=10)] <- '3-10'
# replace all values greater than 20 with 20+
emp_no_na$DistanceFromHome[which(temp_dist>=11)] <- '11+'
# check quantile distribution for DistanceFromHome
emp_no_na$Age <- as.numeric(emp_no_na$Age)
quantile(emp_no_na$Age,seq(0,1,.01))
boxplot(emp_no_na$Age)
#Age N Percent WOE IV
#1 [18,25] 363 0.08441860 1.0626612 0.1300888
#2 [26,28] 393 0.09139535 0.2976112 0.1390172
#3 [29,30] 374 0.08697674 0.3286377 0.1494804
#4 [31,33] 551 0.12813953 0.3992264 0.1727371
#5 [34,35] 455 0.10581395 -0.3799950 0.1861330
#6 [36,37] 347 0.08069767 -0.5414899 0.2057278
#7 [38,40] 457 0.10627907 -0.7257546 0.2491533
#8 [41,44] 439 0.10209302 -0.4513413 0.2669342
#9 [45,49] 415 0.09651163 -0.6484938 0.2992945
#10 [50,60] 506 0.11767442 -0.1996615 0.3036751
# for coarse classing, combine 26 till 33 to make WOE tend monotonic
Age_26_33 <- emp_no_na[which(emp_no_na$Age>=26 & emp_no_na$Age<=33),c(5,6)]
loc_good <- length(Age_26_33$Attrition[which(Age_26_33$Attrition==1)])
loc_bad <- length(Age_26_33$Attrition[which(Age_26_33$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # .35
# for coarse classing, comvine 34 till 37 to make WOE tend monotonic
Age_ <- emp_no_na[which(emp_no_na$Age>=34 & emp_no_na$Age<=37),c(5,6)]
loc_good <- length(Age_$Attrition[which(Age_$Attrition==1)])
loc_bad <- length(Age_$Attrition[which(Age_$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.45
# for coarse classing, comvine 38 till 50 to make WOE tend monotonic
Age_ <- emp_no_na[which(emp_no_na$Age>=38 & emp_no_na$Age<=60),c(5,6)]
loc_good <- length(Age_$Attrition[which(Age_$Attrition==1)])
loc_bad <- length(Age_$Attrition[which(Age_$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.48
# binning values of Age
temp_age <- emp_no_na$Age
emp_no_na$Age[which(temp_age>=18 & temp_age<=25)] <- '18-25'
emp_no_na$Age[which(temp_age>=26 & temp_age<=33)] <- '26-33'
emp_no_na$Age[which(temp_age>=34 & temp_age<=37)] <- '34-37'
# replace all values greater than 38 with 38+
emp_no_na$Age[which(temp_age>=38)] <- '38+'
########################## Dummy Variable Creation ############################
# converting Education into factor.
# Converting "Education" into dummies .
emp_no_na$Education <- as.factor(emp_no_na$Education)
dummy_education <- data.frame(model.matrix( ~Education, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_education <- dummy_education[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-10], dummy_education)
# converting EnvironmentSatisfaction into factor.
# Converting "EnvironmentSatisfaction" into dummies .
emp_no_na$EnvironmentSatisfaction <- as.factor(emp_no_na$EnvironmentSatisfaction)
dummy_EnvironmentSatisfaction <- data.frame(model.matrix( ~EnvironmentSatisfaction, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_EnvironmentSatisfaction <- dummy_EnvironmentSatisfaction[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-2], dummy_EnvironmentSatisfaction)
# converting JobSatisfaction into factor.
# Converting "JobSatisfaction" into dummies .
emp_no_na$JobSatisfaction <- as.factor(emp_no_na$JobSatisfaction)
dummy_JobSatisfaction <- data.frame(model.matrix( ~JobSatisfaction, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_JobSatisfaction <- dummy_JobSatisfaction[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-2], dummy_JobSatisfaction)
# converting WorkLifeBalance into factor.
# Converting "WorkLifeBalance" into dummies .
emp_no_na$WorkLifeBalance <- as.factor(emp_no_na$WorkLifeBalance)
dummy_WorkLifeBalance <- data.frame(model.matrix( ~WorkLifeBalance, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_WorkLifeBalance <- dummy_WorkLifeBalance[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-2], dummy_WorkLifeBalance)
# converting BusinessTravel into factor.
# Converting "BusinessTravel" into dummies .
emp_no_na$BusinessTravel <- as.factor(emp_no_na$BusinessTravel)
dummy_BusinessTravel <- data.frame(model.matrix( ~BusinessTravel, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_BusinessTravel <- dummy_BusinessTravel[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-4], dummy_BusinessTravel)
# converting Department into factor.
# Converting "Department" into dummies .
emp_no_na$Department <- as.factor(emp_no_na$Department)
dummy_Department <- data.frame(model.matrix( ~Department, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_Department <- dummy_Department[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-4], dummy_Department)
# converting EducationField into factor.
# Converting "EducationField" into dummies .
emp_no_na$EducationField <- as.factor(emp_no_na$EducationField)
dummy_EducationField <- data.frame(model.matrix( ~EducationField, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_EducationField <- dummy_EducationField[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-5], dummy_EducationField)
# variables with 2 levels are assigned 1 and 0.
# Gender: Male - 0; Female - 1
emp_no_na$Gender <- as.factor(emp_no_na$Gender)
levels(emp_no_na$Gender) <-c(1,0)
emp_no_na$Gender<- as.numeric(levels(emp_no_na$Gender))[emp_no_na$Gender]
# converting JobLevel into factor.
# Converting "JobLevel" into dummies .
emp_no_na$JobLevel <- as.factor(emp_no_na$JobLevel)
dummy_JobLevel <- data.frame(model.matrix( ~JobLevel, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_JobLevel <- dummy_JobLevel[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-6], dummy_JobLevel)
# converting JobRole into factor.
# Converting "JobRole" into dummies .
emp_no_na$JobRole <- as.factor(emp_no_na$JobRole)
dummy_JobRole <- data.frame(model.matrix( ~JobRole, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_JobRole <- dummy_JobRole[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-6], dummy_JobRole)
# converting MaritalStatus into factor.
# Converting "MaritalStatus" into dummies .
emp_no_na$MaritalStatus <- as.factor(emp_no_na$MaritalStatus)
dummy_MaritalStatus <- data.frame(model.matrix( ~MaritalStatus, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_MaritalStatus <- dummy_MaritalStatus[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-6], dummy_MaritalStatus)
# converting NumCompaniesWorked into factor.
# Converting "NumCompaniesWorked" into dummies .
emp_no_na$NumCompaniesWorked <- as.factor(emp_no_na$NumCompaniesWorked)
dummy_NumCompaniesWorked <- data.frame(model.matrix( ~NumCompaniesWorked, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_NumCompaniesWorked <- dummy_NumCompaniesWorked[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-7], dummy_NumCompaniesWorked)
# converting StockOptionLevel into factor.
# Converting "StockOptionLevel" into dummies .
emp_no_na$StockOptionLevel <- as.factor(emp_no_na$StockOptionLevel)
dummy_StockOptionLevel <- data.frame(model.matrix( ~StockOptionLevel, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_StockOptionLevel <- dummy_StockOptionLevel[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-8], dummy_StockOptionLevel)
# converting TrainingTimesLastYear into factor.
# Converting "TrainingTimesLastYear" into dummies .
emp_no_na$TrainingTimesLastYear <- as.factor(emp_no_na$TrainingTimesLastYear)
dummy_TrainingTimesLastYear <- data.frame(model.matrix( ~TrainingTimesLastYear, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_TrainingTimesLastYear <- dummy_TrainingTimesLastYear[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-9], dummy_TrainingTimesLastYear)
# converting JobInvolvement into factor.
# Converting "JobInvolvement" into dummies .
emp_no_na$JobInvolvement <- as.factor(emp_no_na$JobInvolvement)
dummy_JobInvolvement <- data.frame(model.matrix( ~JobInvolvement, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_JobInvolvement <- dummy_JobInvolvement[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-12], dummy_JobInvolvement)
# converting PerformanceRating into factor.
# Converting "PerformanceRating" into dummies .
# PerformanceRating has only 3 or 4 values
emp_no_na$PerformanceRating <- as.factor(emp_no_na$PerformanceRating)
levels(emp_no_na$PerformanceRating) <-c(1,0)
emp_no_na$PerformanceRating<- as.numeric(levels(emp_no_na$PerformanceRating))[emp_no_na$PerformanceRating]
# converting PercentSalaryHike into factor.
# Converting "PercentSalaryHike" into dummies .
emp_no_na$PercentSalaryHike <- as.factor(emp_no_na$PercentSalaryHike)
dummy_PercentSalaryHike <- data.frame(model.matrix( ~PercentSalaryHike, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_PercentSalaryHike <- dummy_PercentSalaryHike[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-7], dummy_PercentSalaryHike)
# converting TotalWorkingYears into factor.
# Converting "TotalWorkingYears" into dummies .
emp_no_na$TotalWorkingYears <- as.factor(emp_no_na$TotalWorkingYears)
dummy_TotalWorkingYears <- data.frame(model.matrix( ~TotalWorkingYears, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_TotalWorkingYears <- dummy_TotalWorkingYears[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-7], dummy_TotalWorkingYears)
# converting YearsAtCompany into factor.
# Converting "YearsAtCompany" into dummies .
emp_no_na$YearsAtCompany <- as.factor(emp_no_na$YearsAtCompany)
dummy_YearsAtCompany <- data.frame(model.matrix( ~YearsAtCompany, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_YearsAtCompany <- dummy_YearsAtCompany[,-1]
emp_no_na <- cbind(emp_no_na[,-7], dummy_YearsAtCompany)
# converting YearsSinceLastPromotion into factor.
# Converting "YearsSinceLastPromotion" into dummies .
emp_no_na$YearsSinceLastPromotion <- as.factor(emp_no_na$YearsSinceLastPromotion)
dummy_YearsSinceLastPromotion <- data.frame(model.matrix( ~YearsSinceLastPromotion, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_YearsSinceLastPromotion <- dummy_YearsSinceLastPromotion[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-7], dummy_YearsSinceLastPromotion)
# Combine the dummy variables to the main data set, after removing the original column
# converting YearsWithCurrManager into factor.
# Converting "YearsWithCurrManager" into dummies .
emp_no_na$YearsWithCurrManager <- as.factor(emp_no_na$YearsWithCurrManager)
dummy_YearsWithCurrManager <- data.frame(model.matrix( ~YearsWithCurrManager, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_YearsWithCurrManager <- dummy_YearsWithCurrManager[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-7], dummy_YearsWithCurrManager)
# converting DistanceFromHome into factor.
# Converting "DistanceFromHome" into dummies .
emp_no_na$DistanceFromHome <- as.factor(emp_no_na$DistanceFromHome)
dummy_DistanceFromHome <- data.frame(model.matrix( ~DistanceFromHome, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_DistanceFromHome <- dummy_DistanceFromHome[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-4], dummy_DistanceFromHome)
# converting Age into factor.
# Converting "Age" into dummies .
emp_no_na$Age <- as.factor(emp_no_na$Age)
dummy_Age <- data.frame(model.matrix( ~Age, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_Age <- dummy_Age[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-2], dummy_Age)
###################### Dummy Variable Creation - End ##########################
# If working hours are greater that 8.5, mark difference in hours 1 else zero
actual_workHours[which(actual_workHours$ActualWorkingHours > 8.5),2] <- 1
actual_workHours[which(actual_workHours$ActualWorkingHours != 1.0),2] <- 0
# scale MonthlyIncome
emp_no_na$MonthlyIncome <- scale(emp_no_na$MonthlyIncome)
# final dataframe to be used for model generation
emp_final <- merge(emp_no_na,actual_workHours,by="EmployeeID", all = F)
# remove EmployeeId column
emp_final <- emp_final[,-1]
# Correlation Matrix:
cor_matrix_dataframe <- emp_final[,-1]
cor_matrix_dataframe$Attrition <- as.numeric(cor_matrix_dataframe$Attrition)
cor_df <- cor(cor_matrix_dataframe)
###################### Logistic Regression ############################
# splitting the data between train and test
set.seed(100)
indices= sample(1:nrow(emp_final), 0.7*nrow(emp_final))
train = emp_final[indices,]
test = emp_final[-(indices),]
# first model
model_1 = glm(Attrition ~ ., data = train, family = "binomial")
summary(model_1)
# Stepwise selection
model_2<- stepAIC(model_1, direction="both")
summary(model_2)
vif(model_2)
# remove MaritalStatusMarried it has high p-value
model_3 <- glm(Attrition ~ PerformanceRating + Education3 + Education4 +
EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsSinceLastPromotion1.3 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager3 + YearsWithCurrManager4.8 +
YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_3)
vif(model_3)
# remove Education4 it has high p-value
model_4 <- glm(Attrition ~ PerformanceRating + Education3 +
EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsSinceLastPromotion1.3 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager3 + YearsWithCurrManager4.8 +
YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_4)
vif(model_4)
# remove Education3 it has high p-value
model_5 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsSinceLastPromotion1.3 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager3 + YearsWithCurrManager4.8 +
YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_5)
vif(model_5)
#remove YearsWithCurrManager4.8 it has high p-value
model_6 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsSinceLastPromotion1.3 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager3 +
YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_6)
vif(model_6)
# remove YearsWithCurrManager3 it has high p-value
model_7 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsSinceLastPromotion1.3 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_7)
vif(model_7)
# remove YearsSinceLastPromotion1.3 it has high p-value
model_8 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_8)
vif(model_8)
# remove NumCompaniesWorked8 it has high p-value
model_9 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_9)
vif(model_9)
# remove NumCompaniesWorked4 it has high p-value
model_10 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_10)
vif(model_10)
# remove TrainingTimesLastYear4 it has high p-value
model_11 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_11)
vif(model_11)
# remove JobRoleSales.Representative it has high p-value
model_12 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_12)
vif(model_12)
# remove JobSatisfaction2 it has high p-value
model_13 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction3 + JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_13)
vif(model_13)
# remove JobSatisfaction3 it has high p-value
model_14 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_14)
vif(model_14)
# remove NumCompaniesWorked1 it has high p-value
model_15 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_15)
vif(model_15)
# remove JobLevel2 it has high p-value
model_16 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_16)
vif(model_16)
# remove JobRoleManager it has high p-value
model_17 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_17)
vif(model_17)
# remove EnvironmentSatisfaction3 since it is related to EnvironmentSatisfaction4
model_18 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_18)
vif(model_18)
# remove EnvironmentSatisfaction2 since it is insignificant
model_19 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_19)
vif(model_19)
# remove JobInvolvement3 since it is insignificant
model_20 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_20)
vif(model_20)
# remove WorkLifeBalance2 since it is related to WorkLifeBalance3
model_21 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_21)
vif(model_21)
# remove WorkLifeBalance4 since it is insignificant
model_22 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_22)
vif(model_22)
# remove BusinessTravelTravel_Rarelysince it is related to BusinessTravelTravel_Frequently
model_23 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_23)
vif(model_23)
# remove DepartmentSales it is related to DepartmentResearch...Development
model_24 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_24)
vif(model_24)
# remove JobRoleResearch.Director sincie it is insignificant
model_25 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_25)
vif(model_25)
# remove EducationFieldLife.Sciences sincie it is related to EducationFieldMedical
model_26 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_26)
vif(model_26)
# remove EducationFieldMedical since it became insignificant
model_27 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_27)
vif(model_27)
# remove TotalWorkingYears23. since it is related to TotalWorkingYears10.12
model_28 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_28)
vif(model_28)
# remove TotalWorkingYears13.16 it is insignificant
model_29 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 + TotalWorkingYears17.22 +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_29)
vif(model_29)
# remove TotalWorkingYears10.12 since it is insignificant
model_30 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears17.22 +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_30)
vif(model_30)
# remove TotalWorkingYears17.22 since it is related to YearsAtCompany15.
model_31 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_31)
vif(model_31)
#remove StockOptionLevel1 since it is insignificant.
model_32 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_32)
vif(model_32)
# remove YearsWithCurrManager9. since it is related to YearsSinceLastPromotion4.
model_33 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_33)
vif(model_33)
# remove YearsSinceLastPromotion4. since it is related to YearsAtCompany15. and YearsAtCompany7.14
model_34 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsWithCurrManager1.2 + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_34)
vif(model_34)
# remove YearsAtCompany7.14 since it is related to YearsAtCompany15. and YearsAtCompany5.6
model_35 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_35)
vif(model_35)
# remove PerformanceRating since it is related to YearsAtCompany15. and YearsAtCompany5.6
model_36 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_36)
vif(model_36)
# remove YearsAtCompany15. since it is related to Age38.
model_37 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_37)
vif(model_37)
# remove Age26.33 since it is related to Age38.
model_38 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_38)
vif(model_38)
# remove Age34.37 since it is related to Age38.
model_39 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_39)
vif(model_39)
# remove JobLevel5 since it is insignificant
model_40 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_40)
vif(model_40)
#remove DepartmentResearch...Development since it is insignificant
model_41 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_41)
vif(model_41)
final_model <- model_41
########################## Model Evaluation ###########################
# predicted probabilities of Churn 1 for test data
test_pred = predict(final_model, test[,-1],type = "response")
summary(test_pred)
test$prob <- test_pred
# probability greaer than .5 is 1 (employee will leave)
test_pred_attrition_50 <- factor(ifelse(test_pred >= 0.50, 1,0))
# confusion matrix
test_conf <- confusionMatrix(test_pred_attrition_50, test$Attrition, positive = "1")
#Sensitivity : 0.12273
#Specificity : 0.98411
#Accuracy : 0.8372
test_conf
# compute optimal probalility cutoff for better model reliability
perform_fn <- function(cutoff)
{
pred_attrition <- factor(ifelse(test_pred >= cutoff, 1,0))
conf <- confusionMatrix(pred_attrition, test$Attrition, positive = "1")
acc <- conf$overall[1]
sens <- conf$byClass[1]
spec <- conf$byClass[2]
out <- t(as.matrix(c(sens, spec, acc)))
colnames(out) <- c("sensitivity", "specificity", "accuracy")
return(out)
}
# Creating cutoff values from 0.003575 to 0.812100 for plotting and initiallizing a matrix of 100 X 3.
prob_seq = seq(.006,.82,length=100)
OUT = matrix(0,100,3)
for(i in 1:100)
{
OUT[i,] = perform_fn(prob_seq[i])
}
# plot sensitivity , specificity and accuracy with different values of probability
plot(prob_seq, OUT[,1],xlab="Cutoff",ylab="Value",cex.lab=1.5,cex.axis=1.5,ylim=c(0,1),type="l",lwd=2,axes=FALSE,col=2)
axis(1,seq(0,1,length=5),seq(0,1,length=5),cex.lab=1.5)
axis(2,seq(0,1,length=5),seq(0,1,length=5),cex.lab=1.5)
lines(prob_seq,OUT[,2],col="darkgreen",lwd=2)
lines(prob_seq,OUT[,3],col=4,lwd=2)
box()
legend(0,.50,col=c(2,"darkgreen",4,"darkred"),lwd=c(2,2,2,2),c("Sensitivity","Specificity","Accuracy"))
# find cutoff probability for threshold value above which represents that employee will leave
# value: .19
cutoff <- prob_seq[which(abs(OUT[,1]-OUT[,2])<0.01)]
# probability greaer than .16 is 1 (employee will leave)
test_pred_attrition <- factor(ifelse(test_pred >= 0.16, 1,0))
# confusion matrix
test_conf <- confusionMatrix(test_pred_attrition, test$Attrition, positive = "1")
#Accuracy : 0.7335
#Sensitivity : 0.7512
# Specificity : 0.7303
test_conf
########################## KS -statistic ######################
ks_stat(test$Attrition,test_pred_attrition)
ks_plot(test$Attrition,test_pred_attrition)
k_stat_prd <- as.vector(as.numeric(test_pred_attrition))
k_stat_act <- as.vector(as.numeric(test$Attrition))
pred_object_test<- prediction(k_stat_prd, k_stat_act)
performance_measures_test<- performance(pred_object_test, "tpr", "fpr")
ks_table_test <- attr(performance_measures_test, "y.values")[[1]] -
(attr(performance_measures_test, "x.values")[[1]])
max(ks_table_test)
############################ Lift & Gain Chart ########################################
# plotting the lift chart
lift <- function(labels , predicted_prob,groups=10) {
if(is.factor(labels)) labels <- as.integer(as.character(labels ))
if(is.factor(predicted_prob)) predicted_prob <- as.integer(as.character(predicted_prob))
helper = data.frame(cbind(labels , predicted_prob))
helper[,"bucket"] = ntile(-helper[,"predicted_prob"], groups)
gaintable = helper %>% group_by(bucket) %>%
summarise_at(vars(labels ), funs(total = n(),
totalresp=sum(., na.rm = TRUE))) %>%
mutate(Cumresp = cumsum(totalresp),
Gain=Cumresp/sum(totalresp)*100,
Cumlift=Gain/(bucket*(100/groups)))
return(gaintable)
}
attrition_decile = lift(test$Attrition, test_pred_attrition, groups = 10)
|
/HR Analytics Case Study/PA-I_Case_Study_HR_Analytics/hr_Analytics_missingRemoved_WOE.R
|
no_license
|
nitinsriv/R
|
R
| false
| false
| 79,144
|
r
|
library(dplyr)
library(ggplot2)
library(cowplot)
library(corrplot)
library("MASS")
library(car)
library(caret)
library(Information)
library(ROCR)
# read all data into R
emp_survey <- read.csv("C:\\IIITB\\HR Analytics Case Study\\PA-I_Case_Study_HR_Analytics\\employee_survey_data.csv")
gen_data <- read.csv("C:\\IIITB\\HR Analytics Case Study\\PA-I_Case_Study_HR_Analytics\\general_data.csv")
in_time <- read.csv("C:\\IIITB\\HR Analytics Case Study\\PA-I_Case_Study_HR_Analytics\\in_time.csv", stringsAsFactors=F,header=F)
mgr_survey <- read.csv("C:\\IIITB\\HR Analytics Case Study\\PA-I_Case_Study_HR_Analytics\\manager_survey_data.csv")
out_time <- read.csv("C:\\IIITB\\HR Analytics Case Study\\PA-I_Case_Study_HR_Analytics\\out_time.csv", stringsAsFactors=F,header=F)
# add IN to dates of first row for in_time and OUT to first row of out_time
in_char <- "IN"
in_time[1,] <- sapply(in_time[1,], function(x) x <- paste(x,in_char,sep="_"))
out_char <- "OUT"
out_time[1,] <- sapply(out_time[1,], function(x) x <- paste(x,out_char,sep="_"))
# make first row as table columns for in_time and out_time
colnames(in_time) <- in_time[1,]
in_time <- in_time[-1,]
colnames(out_time) <- out_time[1,]
out_time <- out_time[-1,]
# in_time and out_time: assumption first column is EmployeeId
# assign coumnname 'EmployeeID' to first column for in_time and out_time dataframe
# number of unique values in 'EmployeeId column' for both dataframes is 4410
colnames(in_time)[1] <- 'EmployeeID'
colnames(out_time)[1] <- 'EmployeeID'
setdiff(in_time$EmployeeID,out_time$EmployeeID)
# find and remove all IN_TIME and OUT_TIME columns which have all values as NA
in_time_na <- as.data.frame(sapply(in_time, function(x) sum(is.na(x))))
na_cols_in_time <- which(in_time_na == 4410)
in_time <- in_time[,-na_cols_in_time]
out_time_na <- as.data.frame(sapply(out_time, function(x) sum(is.na(x))))
na_cols_out_time <- which(out_time_na == 4410)
out_time <- out_time[,-na_cols_out_time]
diff_hours <- as.numeric(in_time$EmployeeID)
for (i in 2:250){
act_workHours <- as.numeric(difftime(strptime(out_time[,i],"%Y-%m-%d %H:%M:%S"),
strptime(in_time[,i],"%Y-%m-%d %H:%M:%S")))
diff_hours <- cbind(diff_hours,act_workHours)
}
diff_hours <- as.data.frame(diff_hours)
colnames(diff_hours)[1] <- 'EmployeeID'
diff_hours$ActualWorkingHours <- apply(diff_hours[,-1],1,function(x) mean(x,na.rm=TRUE))
actual_workHours <- diff_hours[,c('EmployeeID','ActualWorkingHours')]
# notice number of rows in EmployeeID column for dataframes - 4410.
length(unique(emp_survey$EmployeeID)) # confirm EmployeeID can be a key to merge different dataframe
length(unique(gen_data$EmployeeID)) # confirm EmployeeID can be a key to merge different dataframe
length(unique(mgr_survey$EmployeeID)) # confirm EmployeeID can be a key to merge different dataframe
length(unique(in_time$EmployeeID)) # confirm EmployeeID can be a key to merge different dataframe
length(unique(out_time$EmployeeID)) # confirm EmployeeID can be a key to merge different dataframe
# check if all values of employeeID are same inall dataframes
setdiff(emp_survey$EmployeeID,gen_data$EmployeeID) # Identical EmployeeID across these datasets
setdiff(gen_data$EmployeeID,in_time$EmployeeID) # Identical customerID across these datasets
setdiff(in_time$EmployeeID,mgr_survey$EmployeeID) # Identical customerID across these datasets
setdiff(mgr_survey$EmployeeID,out_time$EmployeeID) # Identical customerID across these datasets
# merge into single dataframe, joined by EmployeeID values.
emp_ef <- merge(emp_survey,gen_data,by="EmployeeID", all = F)
emp_ef <- merge(emp_ef,mgr_survey,by="EmployeeID", all = F)
# emp_ef <- merge(emp_ef,in_time,by="EmployeeID", all = F)
# emp_ef <- merge(emp_ef,out_time,by="EmployeeID", all = F)
# remove EmployeeCount, Over18 and StandardHours column since they hold same value for all rows.
unique(emp_ef$EmployeeCount)
unique(emp_ef$Over18)
unique(emp_ef$StandardHours)
emp_ef <- emp_ef[,-c(12,19,21)]
# summary of emp_ef
summary(emp_ef)
# structure of emp_ef
str(emp_ef)
########################## Missing Value Imputation ##########################
# find columns containing NA with number of NA
sapply(emp_ef, function(x) sum(is.na(x)))
# number of rows removed .03 % of total observations (4410)
emp_no_na <- na.omit(emp_ef)
levels(emp_no_na$Attrition) <-c(0,1)
emp_no_na$Attrition <- as.numeric(levels(emp_no_na$Attrition))[emp_no_na$Attrition]
IV <- create_infotables(emp_no_na[,-1], y="Attrition", bins=10, parallel=FALSE)
# custom function to compute WoE
# compute total_good for all '1' values and
# total_bad for '0' values
computeWoE <- function(local_good, local_bad){
total_good <- length(emp_no_na$Attrition[which(emp_no_na$Attrition == 1)])
total_bad <- length(emp_no_na$Attrition[which(emp_no_na$Attrition == 0)])
woe = log(local_good/total_good) - log(local_bad/total_bad)
return(woe)
}
######################### outliner treatment ##############################
# outliner check for MonthlyIncome
quantile(emp_no_na$MonthlyIncome,seq(0,1,.01))
# jump at 90% to 91%, replacing all greater than 137756.0 with 137756.0
emp_no_na$MonthlyIncome[which(emp_no_na$MonthlyIncome>137756.0)] <- 137756.0
# binning values of Totalworkingyears based on WOE
#TotalWorkingYears N Percent WOE IV
#1 [0,2] 363 0.08441860 1.3969494 0.2405392
#2 [3,4] 308 0.07162791 0.2880417 0.2470738
#3 [5,5] 255 0.05930233 0.1587747 0.2486502
#4 [6,7] 602 0.14000000 0.1811905 0.2535323
#5 [8,9] 577 0.13418605 -0.2703599 0.2624687
#6 [10,12] 837 0.19465116 -0.2422809 0.2729815
#7 [13,16] 423 0.09837209 -0.4820665 0.2923153
#8 [17,22] 487 0.11325581 -0.6384822 0.3292575
#9 [23,40] 448 0.10418605 -0.7039883 0.3696231
emp_no_na$TotalWorkingYears <- as.factor(emp_no_na$TotalWorkingYears)
# for coarse classing, compute WOE for 5,6 and 7 values
TotalWorkingYears_567 <- emp_no_na[which(emp_no_na$TotalWorkingYears==6 | emp_no_na$TotalWorkingYears==7 |
emp_no_na$TotalWorkingYears==5 ),c(20,6)]
loc_good <- length(TotalWorkingYears_567$Attrition[which(TotalWorkingYears_567$Attrition==1)])
loc_bad <- length(TotalWorkingYears_567$Attrition[which(TotalWorkingYears_567$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # .18
emp_no_na$TotalWorkingYears <- as.numeric(emp_no_na$TotalWorkingYears)
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=0 & emp_no_na$TotalWorkingYears<=2)] <- '0-2'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=3 & emp_no_na$TotalWorkingYears<=4)] <- '3-4'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=5 & emp_no_na$TotalWorkingYears<=7)] <- '5-7'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=8 & emp_no_na$TotalWorkingYears<=9)] <- '8-9'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=10 & emp_no_na$TotalWorkingYears<=12)] <- '10-12'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=13 & emp_no_na$TotalWorkingYears<=16)] <- '13-16'
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=17 & emp_no_na$TotalWorkingYears<=22)] <- '17-22'
# replace all values greater than 23 years with 23+ years
emp_no_na$TotalWorkingYears[which(emp_no_na$TotalWorkingYears>=23)] <- '23+'
#YearsAtCompany N Percent WOE IV
#1 [0,0] 126 0.02930233 1.092779966 0.04807756
#2 [1,1] 499 0.11604651 1.030207662 0.21489228
#3 [2,2] 369 0.08581395 0.345732134 0.22637931
#4 [3,4] 700 0.16279070 0.009043863 0.22639267
#5 [5,6] 799 0.18581395 -0.468968920 0.26111506
#6 [7,8] 498 0.11581395 -0.360796765 0.27442241
#7 [9,9] 234 0.05441860 -0.570198713 0.28892615
#8 [10,14] 610 0.14186047 -0.380870490 0.30696276
#9 [15,40] 465 0.10813953 -0.663537357 0.34472160
# Coarse Classing: Category for 0,1 and 5,6,7 and 8 to be combined.
# Category 9,10,11,12,13,14 to be combined.
# After coarse classing, WOE trend is monotonic
emp_no_na$YearsAtCompany <- as.numeric(emp_no_na$YearsAtCompany)
# check quantile distribution for YearsAtCompany
quantile(emp_no_na$YearsAtCompany,seq(0,1,.01))
# for coarse classing, compute WOE 0,1
YearsAtCompany_01 <- emp_no_na[which(emp_no_na$YearsAtCompany==0 | emp_no_na$YearsAtCompany==1),c(22,6)]
loc_good <- length(YearsAtCompany_01$Attrition[which(YearsAtCompany_01$Attrition==1)])
loc_bad <- length(YearsAtCompany_01$Attrition[which(YearsAtCompany_01$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # 1.04
# for coarse classing, compute WOE 7 till 14
YearsAtCompany_5678 <- emp_no_na[which(emp_no_na$YearsAtCompany>=7 & emp_no_na$YearsAtCompany<=14 ),c(22,6)]
loc_good <- length(YearsAtCompany_5678$Attrition[which(YearsAtCompany_5678$Attrition==1)])
loc_bad <- length(YearsAtCompany_5678$Attrition[which(YearsAtCompany_5678$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.42
temp_yrs <- emp_no_na$YearsAtCompany
emp_no_na$YearsAtCompany[which(temp_yrs>=0 & temp_yrs<=1)] <- '0-1'
emp_no_na$YearsAtCompany[which(temp_yrs>=3 & temp_yrs<=4)] <- '3-4'
emp_no_na$YearsAtCompany[which(temp_yrs>=5 & temp_yrs<=6)] <- '5-6'
emp_no_na$YearsAtCompany[which(temp_yrs>=7 & temp_yrs<=14)] <- '7-14'
# replace all values greater than 15 years with 15+ years
emp_no_na$YearsAtCompany[which(temp_yrs>=15)] <- '15+'
# check quantile distribution for YearsSinceLastPromotion
emp_no_na$YearsSinceLastPromotion <- as.numeric((emp_no_na$YearsSinceLastPromotion))
quantile(emp_no_na$YearsSinceLastPromotion,seq(0,1,.01))
# binning values of YearsSinceLastPromotion
#YearsSinceLastPromotion N Percent WOE IV
#1 [0,0] 1697 0.39465116 0.186823701 0.01465859
#2 [1,1] 1050 0.24418605 -0.193060802 0.02317709
#3 [2,3] 618 0.14372093 0.071279673 0.02392502
#4 [4,6] 400 0.09302326 -0.579151108 0.04942133
#5 [7,15] 535 0.12441860 -0.006510387 0.04942660
# for coarse classing, compute WOE 1 to 3 for binning
YearsSinceLastPromotion_123 <- emp_no_na[which(emp_no_na$YearsSinceLastPromotion>=1 & emp_no_na$YearsSinceLastPromotion<=3),c(23,6)]
loc_good <- length(YearsSinceLastPromotion_123$Attrition[which(YearsSinceLastPromotion_123$Attrition==1)])
loc_bad <- length(YearsSinceLastPromotion_123$Attrition[which(YearsSinceLastPromotion_123$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.09
# for coarse classing, compute WOE 4 to 15 for binning
YearsSinceLastPromotion_4_15 <- emp_no_na[which(emp_no_na$YearsSinceLastPromotion>=4 & emp_no_na$YearsSinceLastPromotion<=15),c(23,6)]
loc_good <- length(YearsSinceLastPromotion_4_15$Attrition[which(YearsSinceLastPromotion_4_15$Attrition==1)])
loc_bad <- length(YearsSinceLastPromotion_4_15$Attrition[which(YearsSinceLastPromotion_4_15$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.22
temp_yrsPromotion <- emp_no_na$YearsSinceLastPromotion
emp_no_na$YearsSinceLastPromotion[which(temp_yrsPromotion>=1 & temp_yrsPromotion<=3)] <- '1-3'
# replace all values greater than 11 years with 4+ years
emp_no_na$YearsSinceLastPromotion[which(temp_yrsPromotion>=4)] <- '4+'
# check quantile distribution for YearsWithCurrManager
emp_no_na$YearsWithCurrManager <- as.numeric(emp_no_na$YearsWithCurrManager)
quantile(emp_no_na$YearsWithCurrManager,seq(0,1,.01))
#YearsWithCurrManager N Percent WOE IV
#1 [0,0] 760 0.17674419 0.9272485 0.2007732
#2 [1,1] 222 0.05162791 -0.1351230 0.2016733
#3 [2,2] 1009 0.23465116 -0.1306429 0.2055035
#4 [3,3] 419 0.09744186 -0.2436555 0.2108235
#5 [4,6] 465 0.10813953 -0.3626588 0.2233694
#6 [7,8] 943 0.21930233 -0.2603369 0.2369589
#7 [9,17] 482 0.11209302 -0.8706348 0.2995737
# for coarse classing, combine 1 and 2 to make WOE trend monotonic
YearsWithCurrManager_12 <- emp_no_na[which(emp_no_na$YearsWithCurrManager==1 |
emp_no_na$YearsWithCurrManager==2),c(24,6)]
loc_good <- length(YearsWithCurrManager_12$Attrition[which(YearsWithCurrManager_12$Attrition==1)])
loc_bad <- length(YearsWithCurrManager_12$Attrition[which(YearsWithCurrManager_12$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.13
YearsWithCurrManager_4_8 <- emp_no_na[which(emp_no_na$YearsWithCurrManager>=4 & emp_no_na$YearsWithCurrManager<=8),c(24,6)]
loc_good <- length(YearsWithCurrManager_4_8$Attrition[which(YearsWithCurrManager_4_8$Attrition==1)])
loc_bad <- length(YearsWithCurrManager_4_8$Attrition[which(YearsWithCurrManager_4_8$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.29
# binning values of YearsWithCurrManager as per WOE
# 1&2 to be combined and 4-8 to be combined
temp_yrsCurMgr <- emp_no_na$YearsWithCurrManager
emp_no_na$YearsWithCurrManager[which(temp_yrsCurMgr>=1 & temp_yrsCurMgr<=2)] <- '1-2'
emp_no_na$YearsWithCurrManager[which(temp_yrsCurMgr>=4 & temp_yrsCurMgr<=8)] <- '4-8'
# replace all values greater than 9 years with 9+ years
emp_no_na$YearsWithCurrManager[which(temp_yrsCurMgr>=9)] <- '9+'
# check quantile distribution for PercentSalaryHike
emp_no_na$PercentSalaryHike <- as.numeric(emp_no_na$PercentSalaryHike)
quantile(emp_no_na$PercentSalaryHike,seq(0,1,.01))
#PercentSalaryHike N Percent WOE IV
#1 [11,11] 616 0.14325581 -0.11932634 0.001958391
#2 [12,12] 577 0.13418605 -0.09593163 0.003153576
#3 [13,13] 616 0.14325581 -0.01884256 0.003204114
#4 [14,14] 583 0.13558140 -0.10807753 0.004730500
#5 [15,16] 526 0.12232558 0.08167868 0.005569300
#6 [17,18] 496 0.11534884 0.01233584 0.005586927
#7 [19,20] 382 0.08883721 0.09828518 0.006473875
#8 [21,25] 504 0.11720930 0.19924622 0.011445736
# for coarse classing, combine 13 and 14 to make WOE tend monotonic
PercentSalaryHike_13_14 <- emp_no_na[which(emp_no_na$PercentSalaryHike==13 |
emp_no_na$PercentSalaryHike==14),c(18,6)]
loc_good <- length(PercentSalaryHike_13_14$Attrition[which(PercentSalaryHike_13_14$Attrition==1)])
loc_bad <- length(PercentSalaryHike_13_14$Attrition[which(PercentSalaryHike_13_14$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.06
# for coarse classing, comvine 15 till 18 ro make WOE tend monotonic
PercentSalaryHike_15_18 <- emp_no_na[which(emp_no_na$PercentSalaryHike>=15 & emp_no_na$PercentSalaryHike<=18),c(18,6)]
loc_good <- length(PercentSalaryHike_15_18$Attrition[which(PercentSalaryHike_15_18$Attrition==1)])
loc_bad <- length(PercentSalaryHike_15_18$Attrition[which(PercentSalaryHike_15_18$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # .05
# binning values of PercentSalaryHike
temp_perHike <- emp_no_na$PercentSalaryHike
emp_no_na$PercentSalaryHike[which(temp_perHike>=13 & temp_perHike<=14)] <- '13-14'
emp_no_na$PercentSalaryHike[which(temp_perHike>=15 & temp_perHike<=18)] <- '15-18'
emp_no_na$PercentSalaryHike[which(temp_perHike>=19 & temp_perHike<=20)] <- '19-20'
# replace all values greater than 21 years with 21+
emp_no_na$PercentSalaryHike[which(temp_perHike>=21)] <- '21'
# check quantile distribution for DistanceFromHome
emp_no_na$DistanceFromHome <- as.numeric(emp_no_na$DistanceFromHome)
quantile(emp_no_na$DistanceFromHome,seq(0,1,.01))
# binning values of DistanceFromHome
#DistanceFromHome N Percent WOE IV
#1 [1,1] 612 0.14232558 -0.07313919 0.000742638
#2 [2,2] 614 0.14279070 0.15694692 0.004449140
#3 [3,4] 428 0.09953488 -0.18709400 0.007716877
#4 [5,6] 358 0.08325581 -0.14885691 0.009470145
#5 [7,8] 481 0.11186047 0.03423041 0.009602737
#6 [9,10] 507 0.11790698 0.15289872 0.012503600
#7 [11,16] 433 0.10069767 0.08312033 0.013219029
#8 [17,22] 382 0.08883721 0.13406852 0.014889083
#9 [23,29] 485 0.11279070 -0.27407259 0.022598307
# for coarse classing, comvine 11 till 29 to make WOE tend monotonic
DistanceFromHome_11_29 <- emp_no_na[which(emp_no_na$DistanceFromHome>=11 & emp_no_na$DistanceFromHome<=29),c(9,6)]
loc_good <- length(DistanceFromHome_11_29$Attrition[which(DistanceFromHome_11_29$Attrition==1)])
loc_bad <- length(DistanceFromHome_11_29$Attrition[which(DistanceFromHome_11_29$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.02
# for coarse classing, comvine 3 till 10 to make WOE tend monotonic
DistanceFromHome_3_10 <- emp_no_na[which(emp_no_na$DistanceFromHome>=3 & emp_no_na$DistanceFromHome<=10),c(9,6)]
loc_good <- length(DistanceFromHome_3_10$Attrition[which(DistanceFromHome_3_10$Attrition==1)])
loc_bad <- length(DistanceFromHome_3_10$Attrition[which(DistanceFromHome_3_10$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.02
# for coarse classing, comvine 1 till 2 to make WOE tend monotonic
DistanceFromHome_12 <- emp_no_na[which(emp_no_na$DistanceFromHome>=1 & emp_no_na$DistanceFromHome<=2),c(9,6)]
loc_good <- length(DistanceFromHome_12$Attrition[which(DistanceFromHome_12$Attrition==1)])
loc_bad <- length(DistanceFromHome_12$Attrition[which(DistanceFromHome_12$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # .05
# assigning bins
temp_dist <- emp_no_na$DistanceFromHome
emp_no_na$DistanceFromHome[which(temp_dist>=1 & temp_dist<=2)] <- '1-2'
emp_no_na$DistanceFromHome[which(temp_dist>=3 & temp_dist<=10)] <- '3-10'
# replace all values greater than 20 with 20+
emp_no_na$DistanceFromHome[which(temp_dist>=11)] <- '11+'
# check quantile distribution for DistanceFromHome
emp_no_na$Age <- as.numeric(emp_no_na$Age)
quantile(emp_no_na$Age,seq(0,1,.01))
boxplot(emp_no_na$Age)
#Age N Percent WOE IV
#1 [18,25] 363 0.08441860 1.0626612 0.1300888
#2 [26,28] 393 0.09139535 0.2976112 0.1390172
#3 [29,30] 374 0.08697674 0.3286377 0.1494804
#4 [31,33] 551 0.12813953 0.3992264 0.1727371
#5 [34,35] 455 0.10581395 -0.3799950 0.1861330
#6 [36,37] 347 0.08069767 -0.5414899 0.2057278
#7 [38,40] 457 0.10627907 -0.7257546 0.2491533
#8 [41,44] 439 0.10209302 -0.4513413 0.2669342
#9 [45,49] 415 0.09651163 -0.6484938 0.2992945
#10 [50,60] 506 0.11767442 -0.1996615 0.3036751
# for coarse classing, combine 26 till 33 to make WOE tend monotonic
Age_26_33 <- emp_no_na[which(emp_no_na$Age>=26 & emp_no_na$Age<=33),c(5,6)]
loc_good <- length(Age_26_33$Attrition[which(Age_26_33$Attrition==1)])
loc_bad <- length(Age_26_33$Attrition[which(Age_26_33$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # .35
# for coarse classing, comvine 34 till 37 to make WOE tend monotonic
Age_ <- emp_no_na[which(emp_no_na$Age>=34 & emp_no_na$Age<=37),c(5,6)]
loc_good <- length(Age_$Attrition[which(Age_$Attrition==1)])
loc_bad <- length(Age_$Attrition[which(Age_$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.45
# for coarse classing, comvine 38 till 50 to make WOE tend monotonic
Age_ <- emp_no_na[which(emp_no_na$Age>=38 & emp_no_na$Age<=60),c(5,6)]
loc_good <- length(Age_$Attrition[which(Age_$Attrition==1)])
loc_bad <- length(Age_$Attrition[which(Age_$Attrition==0)])
combined_woe <- computeWoE(loc_good,loc_bad) # -.48
# binning values of Age
temp_age <- emp_no_na$Age
emp_no_na$Age[which(temp_age>=18 & temp_age<=25)] <- '18-25'
emp_no_na$Age[which(temp_age>=26 & temp_age<=33)] <- '26-33'
emp_no_na$Age[which(temp_age>=34 & temp_age<=37)] <- '34-37'
# replace all values greater than 38 with 38+
emp_no_na$Age[which(temp_age>=38)] <- '38+'
########################## Dummy Variable Creation ############################
# converting Education into factor.
# Converting "Education" into dummies .
emp_no_na$Education <- as.factor(emp_no_na$Education)
dummy_education <- data.frame(model.matrix( ~Education, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_education <- dummy_education[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-10], dummy_education)
# converting EnvironmentSatisfaction into factor.
# Converting "EnvironmentSatisfaction" into dummies .
emp_no_na$EnvironmentSatisfaction <- as.factor(emp_no_na$EnvironmentSatisfaction)
dummy_EnvironmentSatisfaction <- data.frame(model.matrix( ~EnvironmentSatisfaction, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_EnvironmentSatisfaction <- dummy_EnvironmentSatisfaction[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-2], dummy_EnvironmentSatisfaction)
# converting JobSatisfaction into factor.
# Converting "JobSatisfaction" into dummies .
emp_no_na$JobSatisfaction <- as.factor(emp_no_na$JobSatisfaction)
dummy_JobSatisfaction <- data.frame(model.matrix( ~JobSatisfaction, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_JobSatisfaction <- dummy_JobSatisfaction[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-2], dummy_JobSatisfaction)
# converting WorkLifeBalance into factor.
# Converting "WorkLifeBalance" into dummies .
emp_no_na$WorkLifeBalance <- as.factor(emp_no_na$WorkLifeBalance)
dummy_WorkLifeBalance <- data.frame(model.matrix( ~WorkLifeBalance, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_WorkLifeBalance <- dummy_WorkLifeBalance[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-2], dummy_WorkLifeBalance)
# converting BusinessTravel into factor.
# Converting "BusinessTravel" into dummies .
emp_no_na$BusinessTravel <- as.factor(emp_no_na$BusinessTravel)
dummy_BusinessTravel <- data.frame(model.matrix( ~BusinessTravel, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_BusinessTravel <- dummy_BusinessTravel[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-4], dummy_BusinessTravel)
# converting Department into factor.
# Converting "Department" into dummies .
emp_no_na$Department <- as.factor(emp_no_na$Department)
dummy_Department <- data.frame(model.matrix( ~Department, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_Department <- dummy_Department[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-4], dummy_Department)
# converting EducationField into factor.
# Converting "EducationField" into dummies .
emp_no_na$EducationField <- as.factor(emp_no_na$EducationField)
dummy_EducationField <- data.frame(model.matrix( ~EducationField, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_EducationField <- dummy_EducationField[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-5], dummy_EducationField)
# variables with 2 levels are assigned 1 and 0.
# Gender: Male - 0; Female - 1
emp_no_na$Gender <- as.factor(emp_no_na$Gender)
levels(emp_no_na$Gender) <-c(1,0)
emp_no_na$Gender<- as.numeric(levels(emp_no_na$Gender))[emp_no_na$Gender]
# converting JobLevel into factor.
# Converting "JobLevel" into dummies .
emp_no_na$JobLevel <- as.factor(emp_no_na$JobLevel)
dummy_JobLevel <- data.frame(model.matrix( ~JobLevel, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_JobLevel <- dummy_JobLevel[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-6], dummy_JobLevel)
# converting JobRole into factor.
# Converting "JobRole" into dummies .
emp_no_na$JobRole <- as.factor(emp_no_na$JobRole)
dummy_JobRole <- data.frame(model.matrix( ~JobRole, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_JobRole <- dummy_JobRole[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-6], dummy_JobRole)
# converting MaritalStatus into factor.
# Converting "MaritalStatus" into dummies .
emp_no_na$MaritalStatus <- as.factor(emp_no_na$MaritalStatus)
dummy_MaritalStatus <- data.frame(model.matrix( ~MaritalStatus, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_MaritalStatus <- dummy_MaritalStatus[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-6], dummy_MaritalStatus)
# converting NumCompaniesWorked into factor.
# Converting "NumCompaniesWorked" into dummies .
emp_no_na$NumCompaniesWorked <- as.factor(emp_no_na$NumCompaniesWorked)
dummy_NumCompaniesWorked <- data.frame(model.matrix( ~NumCompaniesWorked, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_NumCompaniesWorked <- dummy_NumCompaniesWorked[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-7], dummy_NumCompaniesWorked)
# converting StockOptionLevel into factor.
# Converting "StockOptionLevel" into dummies .
emp_no_na$StockOptionLevel <- as.factor(emp_no_na$StockOptionLevel)
dummy_StockOptionLevel <- data.frame(model.matrix( ~StockOptionLevel, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_StockOptionLevel <- dummy_StockOptionLevel[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-8], dummy_StockOptionLevel)
# converting TrainingTimesLastYear into factor.
# Converting "TrainingTimesLastYear" into dummies .
emp_no_na$TrainingTimesLastYear <- as.factor(emp_no_na$TrainingTimesLastYear)
dummy_TrainingTimesLastYear <- data.frame(model.matrix( ~TrainingTimesLastYear, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_TrainingTimesLastYear <- dummy_TrainingTimesLastYear[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-9], dummy_TrainingTimesLastYear)
# converting JobInvolvement into factor.
# Converting "JobInvolvement" into dummies .
emp_no_na$JobInvolvement <- as.factor(emp_no_na$JobInvolvement)
dummy_JobInvolvement <- data.frame(model.matrix( ~JobInvolvement, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_JobInvolvement <- dummy_JobInvolvement[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-12], dummy_JobInvolvement)
# converting PerformanceRating into factor.
# Converting "PerformanceRating" into dummies .
# PerformanceRating has only 3 or 4 values
emp_no_na$PerformanceRating <- as.factor(emp_no_na$PerformanceRating)
levels(emp_no_na$PerformanceRating) <-c(1,0)
emp_no_na$PerformanceRating<- as.numeric(levels(emp_no_na$PerformanceRating))[emp_no_na$PerformanceRating]
# converting PercentSalaryHike into factor.
# Converting "PercentSalaryHike" into dummies .
emp_no_na$PercentSalaryHike <- as.factor(emp_no_na$PercentSalaryHike)
dummy_PercentSalaryHike <- data.frame(model.matrix( ~PercentSalaryHike, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_PercentSalaryHike <- dummy_PercentSalaryHike[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-7], dummy_PercentSalaryHike)
# converting TotalWorkingYears into factor.
# Converting "TotalWorkingYears" into dummies .
emp_no_na$TotalWorkingYears <- as.factor(emp_no_na$TotalWorkingYears)
dummy_TotalWorkingYears <- data.frame(model.matrix( ~TotalWorkingYears, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_TotalWorkingYears <- dummy_TotalWorkingYears[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-7], dummy_TotalWorkingYears)
# converting YearsAtCompany into factor.
# Converting "YearsAtCompany" into dummies .
emp_no_na$YearsAtCompany <- as.factor(emp_no_na$YearsAtCompany)
dummy_YearsAtCompany <- data.frame(model.matrix( ~YearsAtCompany, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_YearsAtCompany <- dummy_YearsAtCompany[,-1]
emp_no_na <- cbind(emp_no_na[,-7], dummy_YearsAtCompany)
# converting YearsSinceLastPromotion into factor.
# Converting "YearsSinceLastPromotion" into dummies .
emp_no_na$YearsSinceLastPromotion <- as.factor(emp_no_na$YearsSinceLastPromotion)
dummy_YearsSinceLastPromotion <- data.frame(model.matrix( ~YearsSinceLastPromotion, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_YearsSinceLastPromotion <- dummy_YearsSinceLastPromotion[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-7], dummy_YearsSinceLastPromotion)
# Combine the dummy variables to the main data set, after removing the original column
# converting YearsWithCurrManager into factor.
# Converting "YearsWithCurrManager" into dummies .
emp_no_na$YearsWithCurrManager <- as.factor(emp_no_na$YearsWithCurrManager)
dummy_YearsWithCurrManager <- data.frame(model.matrix( ~YearsWithCurrManager, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_YearsWithCurrManager <- dummy_YearsWithCurrManager[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-7], dummy_YearsWithCurrManager)
# converting DistanceFromHome into factor.
# Converting "DistanceFromHome" into dummies .
emp_no_na$DistanceFromHome <- as.factor(emp_no_na$DistanceFromHome)
dummy_DistanceFromHome <- data.frame(model.matrix( ~DistanceFromHome, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_DistanceFromHome <- dummy_DistanceFromHome[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-4], dummy_DistanceFromHome)
# converting Age into factor.
# Converting "Age" into dummies .
emp_no_na$Age <- as.factor(emp_no_na$Age)
dummy_Age <- data.frame(model.matrix( ~Age, data = emp_no_na))
#This column should be removed from the newly created dummy_carbody dataframe.
dummy_Age <- dummy_Age[,-1]
# Combine the dummy variables to the main data set, after removing the original column
emp_no_na <- cbind(emp_no_na[,-2], dummy_Age)
###################### Dummy Variable Creation - End ##########################
# If working hours are greater that 8.5, mark difference in hours 1 else zero
actual_workHours[which(actual_workHours$ActualWorkingHours > 8.5),2] <- 1
actual_workHours[which(actual_workHours$ActualWorkingHours != 1.0),2] <- 0
# scale MonthlyIncome
emp_no_na$MonthlyIncome <- scale(emp_no_na$MonthlyIncome)
# final dataframe to be used for model generation
emp_final <- merge(emp_no_na,actual_workHours,by="EmployeeID", all = F)
# remove EmployeeId column
emp_final <- emp_final[,-1]
# Correlation Matrix:
cor_matrix_dataframe <- emp_final[,-1]
cor_matrix_dataframe$Attrition <- as.numeric(cor_matrix_dataframe$Attrition)
cor_df <- cor(cor_matrix_dataframe)
###################### Logistic Regression ############################
# splitting the data between train and test
set.seed(100)
indices= sample(1:nrow(emp_final), 0.7*nrow(emp_final))
train = emp_final[indices,]
test = emp_final[-(indices),]
# first model
model_1 = glm(Attrition ~ ., data = train, family = "binomial")
summary(model_1)
# Stepwise selection
model_2<- stepAIC(model_1, direction="both")
summary(model_2)
vif(model_2)
# remove MaritalStatusMarried it has high p-value
model_3 <- glm(Attrition ~ PerformanceRating + Education3 + Education4 +
EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsSinceLastPromotion1.3 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager3 + YearsWithCurrManager4.8 +
YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_3)
vif(model_3)
# remove Education4 it has high p-value
model_4 <- glm(Attrition ~ PerformanceRating + Education3 +
EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsSinceLastPromotion1.3 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager3 + YearsWithCurrManager4.8 +
YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_4)
vif(model_4)
# remove Education3 it has high p-value
model_5 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsSinceLastPromotion1.3 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager3 + YearsWithCurrManager4.8 +
YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_5)
vif(model_5)
#remove YearsWithCurrManager4.8 it has high p-value
model_6 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsSinceLastPromotion1.3 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager3 +
YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_6)
vif(model_6)
# remove YearsWithCurrManager3 it has high p-value
model_7 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsSinceLastPromotion1.3 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_7)
vif(model_7)
# remove YearsSinceLastPromotion1.3 it has high p-value
model_8 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked8 +
NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_8)
vif(model_8)
# remove NumCompaniesWorked8 it has high p-value
model_9 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked4 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_9)
vif(model_9)
# remove NumCompaniesWorked4 it has high p-value
model_10 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 + TrainingTimesLastYear4 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_10)
vif(model_10)
# remove TrainingTimesLastYear4 it has high p-value
model_11 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director +
JobRoleSales.Representative + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_11)
vif(model_11)
# remove JobRoleSales.Representative it has high p-value
model_12 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction2 + JobSatisfaction3 + JobSatisfaction4 +
WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_12)
vif(model_12)
# remove JobSatisfaction2 it has high p-value
model_13 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction3 + JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_13)
vif(model_13)
# remove JobSatisfaction3 it has high p-value
model_14 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked1 + NumCompaniesWorked5 +
NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_14)
vif(model_14)
# remove NumCompaniesWorked1 it has high p-value
model_15 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel2 + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_15)
vif(model_15)
# remove JobLevel2 it has high p-value
model_16 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManager +
JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_16)
vif(model_16)
# remove JobRoleManager it has high p-value
model_17 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction3 + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_17)
vif(model_17)
# remove EnvironmentSatisfaction3 since it is related to EnvironmentSatisfaction4
model_18 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction2 + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_18)
vif(model_18)
# remove EnvironmentSatisfaction2 since it is insignificant
model_19 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + JobInvolvement3 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_19)
vif(model_19)
# remove JobInvolvement3 since it is insignificant
model_20 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance2 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_20)
vif(model_20)
# remove WorkLifeBalance2 since it is related to WorkLifeBalance3
model_21 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + WorkLifeBalance4 +
BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_21)
vif(model_21)
# remove WorkLifeBalance4 since it is insignificant
model_22 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently + BusinessTravelTravel_Rarely +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_22)
vif(model_22)
# remove BusinessTravelTravel_Rarelysince it is related to BusinessTravelTravel_Frequently
model_23 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development + DepartmentSales + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_23)
vif(model_23)
# remove DepartmentSales it is related to DepartmentResearch...Development
model_24 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + JobRoleResearch.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_24)
vif(model_24)
# remove JobRoleResearch.Director sincie it is insignificant
model_25 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development + EducationFieldLife.Sciences +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_25)
vif(model_25)
# remove EducationFieldLife.Sciences sincie it is related to EducationFieldMedical
model_26 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +
EducationFieldMedical + JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_26)
vif(model_26)
# remove EducationFieldMedical since it became insignificant
model_27 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 + TotalWorkingYears23. +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_27)
vif(model_27)
# remove TotalWorkingYears23. since it is related to TotalWorkingYears10.12
model_28 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 +
TotalWorkingYears13.16 + TotalWorkingYears17.22 +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_28)
vif(model_28)
# remove TotalWorkingYears13.16 it is insignificant
model_29 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears10.12 + TotalWorkingYears17.22 +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_29)
vif(model_29)
# remove TotalWorkingYears10.12 since it is insignificant
model_30 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + TotalWorkingYears17.22 +
YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_30)
vif(model_30)
# remove TotalWorkingYears17.22 since it is related to YearsAtCompany15.
model_31 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 + StockOptionLevel1 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_31)
vif(model_31)
#remove StockOptionLevel1 since it is insignificant.
model_32 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + YearsWithCurrManager9. + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_32)
vif(model_32)
# remove YearsWithCurrManager9. since it is related to YearsSinceLastPromotion4.
model_33 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 + YearsSinceLastPromotion4. +
YearsWithCurrManager1.2 + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_33)
vif(model_33)
# remove YearsSinceLastPromotion4. since it is related to YearsAtCompany15. and YearsAtCompany7.14
model_34 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 + YearsAtCompany7.14 +
YearsWithCurrManager1.2 + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_34)
vif(model_34)
# remove YearsAtCompany7.14 since it is related to YearsAtCompany15. and YearsAtCompany5.6
model_35 <- glm(Attrition ~ PerformanceRating + EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_35)
vif(model_35)
# remove PerformanceRating since it is related to YearsAtCompany15. and YearsAtCompany5.6
model_36 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany15. + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_36)
vif(model_36)
# remove YearsAtCompany15. since it is related to Age38.
model_37 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age26.33 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_37)
vif(model_37)
# remove Age26.33 since it is related to Age38.
model_38 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age34.37 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_38)
vif(model_38)
# remove Age34.37 since it is related to Age38.
model_39 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development +JobLevel5 + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_39)
vif(model_39)
# remove JobLevel5 since it is insignificant
model_40 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
DepartmentResearch...Development + JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_40)
vif(model_40)
#remove DepartmentResearch...Development since it is insignificant
model_41 <- glm(Attrition ~ EnvironmentSatisfaction4 +
JobSatisfaction4 + WorkLifeBalance3 + BusinessTravelTravel_Frequently +
JobRoleManufacturing.Director + MaritalStatusSingle +
NumCompaniesWorked5 + NumCompaniesWorked6 + NumCompaniesWorked7 + NumCompaniesWorked9 +
TrainingTimesLastYear6 + YearsAtCompany5.6 +
YearsWithCurrManager1.2 + Age38. + ActualWorkingHours,
family = "binomial", data = train)
summary(model_41)
vif(model_41)
final_model <- model_41
########################## Model Evaluation ###########################
# predicted probabilities of Churn 1 for test data
test_pred = predict(final_model, test[,-1],type = "response")
summary(test_pred)
test$prob <- test_pred
# probability greaer than .5 is 1 (employee will leave)
test_pred_attrition_50 <- factor(ifelse(test_pred >= 0.50, 1,0))
# confusion matrix
test_conf <- confusionMatrix(test_pred_attrition_50, test$Attrition, positive = "1")
#Sensitivity : 0.12273
#Specificity : 0.98411
#Accuracy : 0.8372
test_conf
# compute optimal probalility cutoff for better model reliability
perform_fn <- function(cutoff)
{
pred_attrition <- factor(ifelse(test_pred >= cutoff, 1,0))
conf <- confusionMatrix(pred_attrition, test$Attrition, positive = "1")
acc <- conf$overall[1]
sens <- conf$byClass[1]
spec <- conf$byClass[2]
out <- t(as.matrix(c(sens, spec, acc)))
colnames(out) <- c("sensitivity", "specificity", "accuracy")
return(out)
}
# Creating cutoff values from 0.003575 to 0.812100 for plotting and initiallizing a matrix of 100 X 3.
prob_seq = seq(.006,.82,length=100)
OUT = matrix(0,100,3)
for(i in 1:100)
{
OUT[i,] = perform_fn(prob_seq[i])
}
# plot sensitivity , specificity and accuracy with different values of probability
plot(prob_seq, OUT[,1],xlab="Cutoff",ylab="Value",cex.lab=1.5,cex.axis=1.5,ylim=c(0,1),type="l",lwd=2,axes=FALSE,col=2)
axis(1,seq(0,1,length=5),seq(0,1,length=5),cex.lab=1.5)
axis(2,seq(0,1,length=5),seq(0,1,length=5),cex.lab=1.5)
lines(prob_seq,OUT[,2],col="darkgreen",lwd=2)
lines(prob_seq,OUT[,3],col=4,lwd=2)
box()
legend(0,.50,col=c(2,"darkgreen",4,"darkred"),lwd=c(2,2,2,2),c("Sensitivity","Specificity","Accuracy"))
# find cutoff probability for threshold value above which represents that employee will leave
# value: .19
cutoff <- prob_seq[which(abs(OUT[,1]-OUT[,2])<0.01)]
# probability greaer than .16 is 1 (employee will leave)
test_pred_attrition <- factor(ifelse(test_pred >= 0.16, 1,0))
# confusion matrix
test_conf <- confusionMatrix(test_pred_attrition, test$Attrition, positive = "1")
#Accuracy : 0.7335
#Sensitivity : 0.7512
# Specificity : 0.7303
test_conf
########################## KS -statistic ######################
ks_stat(test$Attrition,test_pred_attrition)
ks_plot(test$Attrition,test_pred_attrition)
k_stat_prd <- as.vector(as.numeric(test_pred_attrition))
k_stat_act <- as.vector(as.numeric(test$Attrition))
pred_object_test<- prediction(k_stat_prd, k_stat_act)
performance_measures_test<- performance(pred_object_test, "tpr", "fpr")
ks_table_test <- attr(performance_measures_test, "y.values")[[1]] -
(attr(performance_measures_test, "x.values")[[1]])
max(ks_table_test)
############################ Lift & Gain Chart ########################################
# plotting the lift chart
lift <- function(labels , predicted_prob,groups=10) {
if(is.factor(labels)) labels <- as.integer(as.character(labels ))
if(is.factor(predicted_prob)) predicted_prob <- as.integer(as.character(predicted_prob))
helper = data.frame(cbind(labels , predicted_prob))
helper[,"bucket"] = ntile(-helper[,"predicted_prob"], groups)
gaintable = helper %>% group_by(bucket) %>%
summarise_at(vars(labels ), funs(total = n(),
totalresp=sum(., na.rm = TRUE))) %>%
mutate(Cumresp = cumsum(totalresp),
Gain=Cumresp/sum(totalresp)*100,
Cumlift=Gain/(bucket*(100/groups)))
return(gaintable)
}
attrition_decile = lift(test$Attrition, test_pred_attrition, groups = 10)
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(-1.72131968218895e+83, -7.88781071482504e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615854584-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 757
|
r
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(-1.72131968218895e+83, -7.88781071482504e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
program <- readLines("day23.txt")
program <- gsub(",", "", program)
program <- strsplit(program, " ")
N <- length(program)
reg <- c(a=0, b=0)
address <- 1
hlf <- function(r){
reg[r] <<- max(0, round(reg[r] / 2))
address + 1
}
tpl <- function(r){
reg[r] <<- reg[r] * 3
address + 1
}
inc <- function(r){
reg[r] <<- reg[r] + 1
address + 1
}
jmp <- function(r) {
address + as.numeric(r)
}
jie <- function(r) {
if(! reg[r[1]] %% 2 ){
address + as.numeric(r[2])
} else {
address + 1
}
}
jio <- function(r) {
if( reg[r[1]] == 1 ){
address + as.numeric(r[2])
} else {
address + 1
}
}
instr <- list(hlf=hlf, tpl=tpl, inc=inc, jmp=jmp, jie=jie, jio=jio)
while(TRUE){
x <- program[[address]]
address <- instr[[x[1]]](x[-1])
if(address < 1 | address > N) {break}
}
print(reg)
# Part 2
reg <- c(a=1, b=0)
address <- 1
while(TRUE){
x <- program[[address]]
address <- instr[[x[1]]](x[-1])
if(address < 1 | address > N) {break}
}
print(reg)
|
/day23.R
|
no_license
|
sethmcg/advent-2015
|
R
| false
| false
| 1,061
|
r
|
program <- readLines("day23.txt")
program <- gsub(",", "", program)
program <- strsplit(program, " ")
N <- length(program)
reg <- c(a=0, b=0)
address <- 1
hlf <- function(r){
reg[r] <<- max(0, round(reg[r] / 2))
address + 1
}
tpl <- function(r){
reg[r] <<- reg[r] * 3
address + 1
}
inc <- function(r){
reg[r] <<- reg[r] + 1
address + 1
}
jmp <- function(r) {
address + as.numeric(r)
}
jie <- function(r) {
if(! reg[r[1]] %% 2 ){
address + as.numeric(r[2])
} else {
address + 1
}
}
jio <- function(r) {
if( reg[r[1]] == 1 ){
address + as.numeric(r[2])
} else {
address + 1
}
}
instr <- list(hlf=hlf, tpl=tpl, inc=inc, jmp=jmp, jie=jie, jio=jio)
while(TRUE){
x <- program[[address]]
address <- instr[[x[1]]](x[-1])
if(address < 1 | address > N) {break}
}
print(reg)
# Part 2
reg <- c(a=1, b=0)
address <- 1
while(TRUE){
x <- program[[address]]
address <- instr[[x[1]]](x[-1])
if(address < 1 | address > N) {break}
}
print(reg)
|
search_WD=function(string, language="en", what="itemLabel", partial=FALSE,limit=10){
query <- rselect("item","itemLabel", "itemDescription") %>%
rspecify("?item rdfs:label ?itemLabel") %>%
rspecify("?item schema:description ?itemDescription")
if(partial){
query <- query %>%
rfilter(condition=str_c('REGEX(?',
what,
',"',string,'")'))
}else{
query <- query %>%
rfilter(condition=str_c('?',
what,
'="',
string,
'"@',
language))
}
query <- query %>%
rfilter(str_c('LANG(?itemDescription)="',language,'"')) %>%
rlimit(limit)
tib <- query %>% query_wikidata()
return(tib)
}
|
/R/search_WD.R
|
no_license
|
lvaudor/wikiquery
|
R
| false
| false
| 835
|
r
|
search_WD=function(string, language="en", what="itemLabel", partial=FALSE,limit=10){
query <- rselect("item","itemLabel", "itemDescription") %>%
rspecify("?item rdfs:label ?itemLabel") %>%
rspecify("?item schema:description ?itemDescription")
if(partial){
query <- query %>%
rfilter(condition=str_c('REGEX(?',
what,
',"',string,'")'))
}else{
query <- query %>%
rfilter(condition=str_c('?',
what,
'="',
string,
'"@',
language))
}
query <- query %>%
rfilter(str_c('LANG(?itemDescription)="',language,'"')) %>%
rlimit(limit)
tib <- query %>% query_wikidata()
return(tib)
}
|
\name{do.small.world}
\alias{do.small.world}
\title{ Performs the small world test of the given network. }
\description{
This function provides the ratio of the average path length and the clustering coefficient to verify the small world behavior of the network
}
\usage{
do.small.world(graph, filename = NULL)
}
\arguments{
\item{graph}{ Igraph network object }
\item{filename}{ If it is specified, a file in csv format is created with the results}
}
\details{
}
\value{
Dataframe containing the names(var) and the results is returned.
}
\references{
Watts, D. (2004): Small worlds, the dynamics of networks between order and randomness. Princenton University Press.
}
\author{ Domingo Vargas }
\note{
}
\seealso{
}
\examples{
data(test.net,package="netmodels")
v <- do.small.world(test.net)
}
\keyword{ graphs }
|
/man/do.small.world.Rd
|
no_license
|
cran/netmodels
|
R
| false
| false
| 821
|
rd
|
\name{do.small.world}
\alias{do.small.world}
\title{ Performs the small world test of the given network. }
\description{
This function provides the ratio of the average path length and the clustering coefficient to verify the small world behavior of the network
}
\usage{
do.small.world(graph, filename = NULL)
}
\arguments{
\item{graph}{ Igraph network object }
\item{filename}{ If it is specified, a file in csv format is created with the results}
}
\details{
}
\value{
Dataframe containing the names(var) and the results is returned.
}
\references{
Watts, D. (2004): Small worlds, the dynamics of networks between order and randomness. Princenton University Press.
}
\author{ Domingo Vargas }
\note{
}
\seealso{
}
\examples{
data(test.net,package="netmodels")
v <- do.small.world(test.net)
}
\keyword{ graphs }
|
#' Loughran-McDonald Polarity Table
#'
#' A \pkg{data.table} dataset containing an filtered version of Loughran &
#' McDonald's (2016) positive/negative financial word list as sentiment lookup
#' values.
#'
#' @details
#' \itemize{
#' \item x. Words
#' \item y. Sentiment values
#' }
#'
#' @section License: The original authors note the data is available for
#' non-commercial, research use: "The data compilations provided on
#' this website are for use by individual researchers.". For more details see:
#' https://sraf.nd.edu/textual-analysis/resources/#Master%20Dictionary.
#' @section Copyright: Copyright holder University of Notre Dame
#' @docType data
#' @keywords datasets
#' @name hash_sentiment_loughran_mcdonald
#' @usage data(hash_sentiment_loughran_mcdonald)
#' @format A data frame with 2,702 rows and 2 variables
#' @references Loughran, T. and McDonald, B. (2016). Textual analysis in
#' accounting and finance: A survey. Journal of Accounting Research 54(4),
#' 1187-1230. doi: 10.2139/ssrn.2504147 \cr \cr
#' \url{https://sraf.nd.edu/textual-analysis/resources/#Master\%20Dictionary}
NULL
|
/R/hash_sentiment_loughran_mcdonald.R
|
no_license
|
cran/lexicon
|
R
| false
| false
| 1,147
|
r
|
#' Loughran-McDonald Polarity Table
#'
#' A \pkg{data.table} dataset containing an filtered version of Loughran &
#' McDonald's (2016) positive/negative financial word list as sentiment lookup
#' values.
#'
#' @details
#' \itemize{
#' \item x. Words
#' \item y. Sentiment values
#' }
#'
#' @section License: The original authors note the data is available for
#' non-commercial, research use: "The data compilations provided on
#' this website are for use by individual researchers.". For more details see:
#' https://sraf.nd.edu/textual-analysis/resources/#Master%20Dictionary.
#' @section Copyright: Copyright holder University of Notre Dame
#' @docType data
#' @keywords datasets
#' @name hash_sentiment_loughran_mcdonald
#' @usage data(hash_sentiment_loughran_mcdonald)
#' @format A data frame with 2,702 rows and 2 variables
#' @references Loughran, T. and McDonald, B. (2016). Textual analysis in
#' accounting and finance: A survey. Journal of Accounting Research 54(4),
#' 1187-1230. doi: 10.2139/ssrn.2504147 \cr \cr
#' \url{https://sraf.nd.edu/textual-analysis/resources/#Master\%20Dictionary}
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s.H2ODL.R
\name{s.H2ODL}
\alias{s.H2ODL}
\title{Deep Learning on H2O [C, R]}
\usage{
s.H2ODL(
x,
y = NULL,
x.test = NULL,
y.test = NULL,
x.valid = NULL,
y.valid = NULL,
x.name = NULL,
y.name = NULL,
ip = "localhost",
port = 54321,
n.hidden.nodes = c(20, 20),
epochs = 1000,
activation = "Rectifier",
mini.batch.size = 1,
learning.rate = 0.005,
adaptive.rate = TRUE,
rho = 0.99,
epsilon = 1e-08,
rate.annealing = 1e-06,
rate.decay = 1,
momentum.start = 0,
momentum.ramp = 1e+06,
momentum.stable = 0,
nesterov.accelerated.gradient = TRUE,
input.dropout.ratio = 0,
hidden.dropout.ratios = NULL,
l1 = 0,
l2 = 0,
max.w2 = 3.4028235e+38,
nfolds = 0,
initial.biases = NULL,
initial.weights = NULL,
loss = "Automatic",
distribution = "AUTO",
stopping.rounds = 5,
stopping.metric = "AUTO",
upsample = FALSE,
downsample = FALSE,
resample.seed = NULL,
na.action = na.fail,
n.cores = rtCores,
print.plot = TRUE,
plot.fitted = NULL,
plot.predicted = NULL,
plot.theme = getOption("rt.fit.theme", "lightgrid"),
question = NULL,
verbose = TRUE,
trace = 0,
outdir = NULL,
save.mod = ifelse(!is.null(outdir), TRUE, FALSE),
...
)
}
\arguments{
\item{x}{Vector / Matrix / Data Frame: Training set Predictors}
\item{y}{Vector: Training set outcome}
\item{x.test}{Vector / Matrix / Data Frame: Testing set Predictors}
\item{y.test}{Vector: Testing set outcome}
\item{x.valid}{Vector / Matrix / Data Frame: Validation set Predictors}
\item{y.valid}{Vector: Validation set outcome}
\item{x.name}{Character: Name for feature set}
\item{y.name}{Character: Name for outcome}
\item{ip}{Character: IP address of H2O server. Default = "localhost"}
\item{port}{Integer: Port number for server. Default = 54321}
\item{n.hidden.nodes}{Integer vector of length equal to the number of hidden layers you wish to create}
\item{epochs}{Integer: How many times to iterate through the dataset. Default = 1000}
\item{activation}{Character: Activation function to use: "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout",
"Maxout", "MaxoutWithDropout". Default = "Rectifier"}
\item{learning.rate}{Float: Learning rate to use for training. Default = .005}
\item{adaptive.rate}{Logical: If TRUE, use adaptive learning rate. Default = TRUE}
\item{rate.annealing}{Float: Learning rate annealing: rate / (1 + rate_annealing * samples). Default = 1e-6}
\item{input.dropout.ratio}{Float (0, 1): Dropout ratio for inputs}
\item{hidden.dropout.ratios}{Vector, Float (0, 2): Dropout ratios for hidden layers}
\item{l1}{Float (0, 1): L1 regularization
(introduces sparseness; i.e. sets many weights to 0; reduces variance, increases generalizability)}
\item{l2}{Float (0, 1): L2 regularization
(prevents very large absolute weights; reduces variance, increases generalizability)}
\item{upsample}{Logical: If TRUE, upsample cases to balance outcome classes (for Classification only)
Note: upsample will randomly sample with replacement if the length of the majority class is more than double
the length of the class you are upsampling, thereby introducing randomness}
\item{downsample}{Logical: If TRUE, downsample majority class to match size of minority class}
\item{resample.seed}{Integer: If provided, will be used to set the seed during upsampling.
Default = NULL (random seed)}
\item{na.action}{How to handle missing values. See \code{?na.fail}}
\item{n.cores}{Integer: Number of cores to use}
\item{print.plot}{Logical: if TRUE, produce plot using \code{mplot3}
Takes precedence over \code{plot.fitted} and \code{plot.predicted}. Default = TRUE}
\item{plot.fitted}{Logical: if TRUE, plot True (y) vs Fitted}
\item{plot.predicted}{Logical: if TRUE, plot True (y.test) vs Predicted.
Requires \code{x.test} and \code{y.test}}
\item{plot.theme}{Character: "zero", "dark", "box", "darkbox"}
\item{question}{Character: the question you are attempting to answer with this model, in plain language.}
\item{verbose}{Logical: If TRUE, print summary to screen.}
\item{trace}{Integer: If higher than 0, will print more information to the console. Default = 0}
\item{outdir}{Path to output directory.
If defined, will save Predicted vs. True plot, if available,
as well as full model output, if \code{save.mod} is TRUE}
\item{save.mod}{Logical: If TRUE, save all output to an RDS file in \code{outdir}
\code{save.mod} is TRUE by default if an \code{outdir} is defined. If set to TRUE, and no \code{outdir}
is defined, outdir defaults to \code{paste0("./s.", mod.name)}}
\item{...}{Additional parameters to pass to \code{h2o::h2o.deeplearning}}
}
\value{
\link{rtMod} object
}
\description{
Trains a Deep Neural Net using H2O (http://www.h2o.ai)
Check out the H2O Flow at \code{[ip]:[port]}, Default IP:port is "localhost:54321"
e.g. if running on localhost, point your web browser to \code{localhost:54321}
}
\details{
x & y form the training set.
x.test & y.test form the testing set used only to test model generalizability.
x.valid & y.valid form the validation set used to monitor training progress
}
\seealso{
\link{elevate} for external cross-validation
Other Supervised Learning:
\code{\link{s.ADABOOST}()},
\code{\link{s.ADDTREE}()},
\code{\link{s.BART}()},
\code{\link{s.BAYESGLM}()},
\code{\link{s.BRUTO}()},
\code{\link{s.C50}()},
\code{\link{s.CART}()},
\code{\link{s.CTREE}()},
\code{\link{s.DA}()},
\code{\link{s.ET}()},
\code{\link{s.EVTREE}()},
\code{\link{s.GAM.default}()},
\code{\link{s.GAM.formula}()},
\code{\link{s.GAMSELX2}()},
\code{\link{s.GAMSELX}()},
\code{\link{s.GAMSEL}()},
\code{\link{s.GAM}()},
\code{\link{s.GBM3}()},
\code{\link{s.GBM}()},
\code{\link{s.GLMNET}()},
\code{\link{s.GLM}()},
\code{\link{s.GLS}()},
\code{\link{s.H2OGBM}()},
\code{\link{s.H2ORF}()},
\code{\link{s.IRF}()},
\code{\link{s.KNN}()},
\code{\link{s.LDA}()},
\code{\link{s.LM}()},
\code{\link{s.MARS}()},
\code{\link{s.MLRF}()},
\code{\link{s.NBAYES}()},
\code{\link{s.NLA}()},
\code{\link{s.NLS}()},
\code{\link{s.NW}()},
\code{\link{s.POLYMARS}()},
\code{\link{s.PPR}()},
\code{\link{s.PPTREE}()},
\code{\link{s.QDA}()},
\code{\link{s.QRNN}()},
\code{\link{s.RANGER}()},
\code{\link{s.RFSRC}()},
\code{\link{s.RF}()},
\code{\link{s.SGD}()},
\code{\link{s.SPLS}()},
\code{\link{s.SVM}()},
\code{\link{s.TFN}()},
\code{\link{s.XGBLIN}()},
\code{\link{s.XGB}()}
Other Deep Learning:
\code{\link{d.H2OAE}()},
\code{\link{s.TFN}()}
}
\author{
E.D. Gennatas
}
\concept{Deep Learning}
\concept{Supervised Learning}
|
/man/s.H2ODL.Rd
|
no_license
|
tlarzg/rtemis
|
R
| false
| true
| 6,586
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s.H2ODL.R
\name{s.H2ODL}
\alias{s.H2ODL}
\title{Deep Learning on H2O [C, R]}
\usage{
s.H2ODL(
x,
y = NULL,
x.test = NULL,
y.test = NULL,
x.valid = NULL,
y.valid = NULL,
x.name = NULL,
y.name = NULL,
ip = "localhost",
port = 54321,
n.hidden.nodes = c(20, 20),
epochs = 1000,
activation = "Rectifier",
mini.batch.size = 1,
learning.rate = 0.005,
adaptive.rate = TRUE,
rho = 0.99,
epsilon = 1e-08,
rate.annealing = 1e-06,
rate.decay = 1,
momentum.start = 0,
momentum.ramp = 1e+06,
momentum.stable = 0,
nesterov.accelerated.gradient = TRUE,
input.dropout.ratio = 0,
hidden.dropout.ratios = NULL,
l1 = 0,
l2 = 0,
max.w2 = 3.4028235e+38,
nfolds = 0,
initial.biases = NULL,
initial.weights = NULL,
loss = "Automatic",
distribution = "AUTO",
stopping.rounds = 5,
stopping.metric = "AUTO",
upsample = FALSE,
downsample = FALSE,
resample.seed = NULL,
na.action = na.fail,
n.cores = rtCores,
print.plot = TRUE,
plot.fitted = NULL,
plot.predicted = NULL,
plot.theme = getOption("rt.fit.theme", "lightgrid"),
question = NULL,
verbose = TRUE,
trace = 0,
outdir = NULL,
save.mod = ifelse(!is.null(outdir), TRUE, FALSE),
...
)
}
\arguments{
\item{x}{Vector / Matrix / Data Frame: Training set Predictors}
\item{y}{Vector: Training set outcome}
\item{x.test}{Vector / Matrix / Data Frame: Testing set Predictors}
\item{y.test}{Vector: Testing set outcome}
\item{x.valid}{Vector / Matrix / Data Frame: Validation set Predictors}
\item{y.valid}{Vector: Validation set outcome}
\item{x.name}{Character: Name for feature set}
\item{y.name}{Character: Name for outcome}
\item{ip}{Character: IP address of H2O server. Default = "localhost"}
\item{port}{Integer: Port number for server. Default = 54321}
\item{n.hidden.nodes}{Integer vector of length equal to the number of hidden layers you wish to create}
\item{epochs}{Integer: How many times to iterate through the dataset. Default = 1000}
\item{activation}{Character: Activation function to use: "Tanh", "TanhWithDropout", "Rectifier", "RectifierWithDropout",
"Maxout", "MaxoutWithDropout". Default = "Rectifier"}
\item{learning.rate}{Float: Learning rate to use for training. Default = .005}
\item{adaptive.rate}{Logical: If TRUE, use adaptive learning rate. Default = TRUE}
\item{rate.annealing}{Float: Learning rate annealing: rate / (1 + rate_annealing * samples). Default = 1e-6}
\item{input.dropout.ratio}{Float (0, 1): Dropout ratio for inputs}
\item{hidden.dropout.ratios}{Vector, Float (0, 2): Dropout ratios for hidden layers}
\item{l1}{Float (0, 1): L1 regularization
(introduces sparseness; i.e. sets many weights to 0; reduces variance, increases generalizability)}
\item{l2}{Float (0, 1): L2 regularization
(prevents very large absolute weights; reduces variance, increases generalizability)}
\item{upsample}{Logical: If TRUE, upsample cases to balance outcome classes (for Classification only)
Note: upsample will randomly sample with replacement if the length of the majority class is more than double
the length of the class you are upsampling, thereby introducing randomness}
\item{downsample}{Logical: If TRUE, downsample majority class to match size of minority class}
\item{resample.seed}{Integer: If provided, will be used to set the seed during upsampling.
Default = NULL (random seed)}
\item{na.action}{How to handle missing values. See \code{?na.fail}}
\item{n.cores}{Integer: Number of cores to use}
\item{print.plot}{Logical: if TRUE, produce plot using \code{mplot3}
Takes precedence over \code{plot.fitted} and \code{plot.predicted}. Default = TRUE}
\item{plot.fitted}{Logical: if TRUE, plot True (y) vs Fitted}
\item{plot.predicted}{Logical: if TRUE, plot True (y.test) vs Predicted.
Requires \code{x.test} and \code{y.test}}
\item{plot.theme}{Character: "zero", "dark", "box", "darkbox"}
\item{question}{Character: the question you are attempting to answer with this model, in plain language.}
\item{verbose}{Logical: If TRUE, print summary to screen.}
\item{trace}{Integer: If higher than 0, will print more information to the console. Default = 0}
\item{outdir}{Path to output directory.
If defined, will save Predicted vs. True plot, if available,
as well as full model output, if \code{save.mod} is TRUE}
\item{save.mod}{Logical: If TRUE, save all output to an RDS file in \code{outdir}
\code{save.mod} is TRUE by default if an \code{outdir} is defined. If set to TRUE, and no \code{outdir}
is defined, outdir defaults to \code{paste0("./s.", mod.name)}}
\item{...}{Additional parameters to pass to \code{h2o::h2o.deeplearning}}
}
\value{
\link{rtMod} object
}
\description{
Trains a Deep Neural Net using H2O (http://www.h2o.ai)
Check out the H2O Flow at \code{[ip]:[port]}, Default IP:port is "localhost:54321"
e.g. if running on localhost, point your web browser to \code{localhost:54321}
}
\details{
x & y form the training set.
x.test & y.test form the testing set used only to test model generalizability.
x.valid & y.valid form the validation set used to monitor training progress
}
\seealso{
\link{elevate} for external cross-validation
Other Supervised Learning:
\code{\link{s.ADABOOST}()},
\code{\link{s.ADDTREE}()},
\code{\link{s.BART}()},
\code{\link{s.BAYESGLM}()},
\code{\link{s.BRUTO}()},
\code{\link{s.C50}()},
\code{\link{s.CART}()},
\code{\link{s.CTREE}()},
\code{\link{s.DA}()},
\code{\link{s.ET}()},
\code{\link{s.EVTREE}()},
\code{\link{s.GAM.default}()},
\code{\link{s.GAM.formula}()},
\code{\link{s.GAMSELX2}()},
\code{\link{s.GAMSELX}()},
\code{\link{s.GAMSEL}()},
\code{\link{s.GAM}()},
\code{\link{s.GBM3}()},
\code{\link{s.GBM}()},
\code{\link{s.GLMNET}()},
\code{\link{s.GLM}()},
\code{\link{s.GLS}()},
\code{\link{s.H2OGBM}()},
\code{\link{s.H2ORF}()},
\code{\link{s.IRF}()},
\code{\link{s.KNN}()},
\code{\link{s.LDA}()},
\code{\link{s.LM}()},
\code{\link{s.MARS}()},
\code{\link{s.MLRF}()},
\code{\link{s.NBAYES}()},
\code{\link{s.NLA}()},
\code{\link{s.NLS}()},
\code{\link{s.NW}()},
\code{\link{s.POLYMARS}()},
\code{\link{s.PPR}()},
\code{\link{s.PPTREE}()},
\code{\link{s.QDA}()},
\code{\link{s.QRNN}()},
\code{\link{s.RANGER}()},
\code{\link{s.RFSRC}()},
\code{\link{s.RF}()},
\code{\link{s.SGD}()},
\code{\link{s.SPLS}()},
\code{\link{s.SVM}()},
\code{\link{s.TFN}()},
\code{\link{s.XGBLIN}()},
\code{\link{s.XGB}()}
Other Deep Learning:
\code{\link{d.H2OAE}()},
\code{\link{s.TFN}()}
}
\author{
E.D. Gennatas
}
\concept{Deep Learning}
\concept{Supervised Learning}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.