content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(gamlss)
library(gamlss.dist)
library(ggplot2)
rtweibull <- function(n,qt,alpha,t){
# n = número de observaciones
# qt > 0; cuantil
# alpha = parámetro weibull
# t = entre 0 y 1
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (alpha < 0){
stop("parámetro alpha solo puede ser superior a 0")
}
U = runif(n)
qm = qt * (-((log(U+1))/(log(1-t))))^(1/alpha)
return(qm)
}
rtweibull2 <- function(n,qt,sigma,t){
# n = número de observaciones
# qt > 0; cuantil
# alpha = parámetro weibull
# t = entre 0 y 1
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (sigma < 0){
stop("parámetro sigma solo puede ser superior a 0")
}
alpha = 1/(log(sigma+1))
beta = qt/(-log(1-t))^(1/alpha)
qm = rweibull(n = n,scale = beta,shape = alpha)
return(qm)
}
library(BB)
library(numDeriv)
muestra <- rtweibull2(10000,2,2,0.1)
hist(muestra)
n <- length(muestra)
few <- function(x,param){
qt <- param[1]
sigma <- param[2]
t <- param[3]
ct <- -(log(1-t))
a <- 1/(log(sigma+1))
val <- (a) * ((ct/qt)^(a-1))*exp((-ct)*(x/qt)^(a))
return(val)
}
lfew <- function(param){
val = 0
for(i in 1:n){
val = val + log(few(muestra[i],param))
}
val = - val
return(val)
}
ll=function(param,x,t){
qt <- param[1]
sigma <- param[2]
ct <- -(log(1-t))
a <- 1/(log(sigma+1))
-sum(log(a)+log(ct)-a*log(qt)+(a-1)*log(x)-ct*(x/qt)**a)
}
inicial = c(4,3)
esti_mv = nlminb(inicial,ll,x=muestra,t=0.5,lower=c(0,0),upper = c(Inf,Inf))
esti_mv$par
## Control: qt = 2, sigma = 2, t = 0.000002
esti_mv$par
hist(rtweibull2(10000,esti_mv$par[1],esti_mv$par[2],esti_mv$par[3]))
dtweibull <- function(value,qt,alpha,t){
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (alpha < 0){
stop("parámetro alpha solo puede ser superior a 0")
}
ct=(-log(1-t))^(1/alpha)
b= qt/ct
a=alpha
dt = dweibull(x = value,a,b)
return(dt)
}
ptweibull <- function(value,qt,alpha,t){
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (alpha < 0){
stop("parámetro alpha solo puede ser superior a 0")
}
ct=(-log(1-t))^(1/alpha)
b= qt/ct
a= alpha
pt = pweibull(x = value,a,b)
}
vtweibull <- function(qt,alpha,t){
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (alpha < 0){
stop("parámetro alpha solo puede ser superior a 0")
}
ct=(-log(1-t))^(1/alpha)
var=((qt^2)/(ct)^(1/alpha))*(gamma(1+(2/alpha))-(gamma(1+(1/alpha)))^2)
return(var)
}
mtweibull <- function(qt,alpha,t){
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (alpha < 0){
stop("parámetro alpha solo puede ser superior a 0")
}
ct=(-log(1-t))^(1/alpha)
mt=((qt)/(ct^(1/alpha)))*gamma(1+1/alpha)
return(mt)
}
mtweibull(1,2,0.5)
qtseq = seq(1,10,0.1)
e_x1=data.frame(a=mtweibull(qtseq,1,0.5))
e_x1=cbind(e_x1,data.frame(b=mtweibull(qtseq,2,0.5)))
e_x1=cbind(e_x1,data.frame(c=mtweibull(qtseq,3,0.5)))
e_x1=cbind(e_x1,data.frame(d=mtweibull(qtseq,4,0.5)))
e_x1=cbind(e_x1,qtseq)
## Valor esperado; eje horizontal es qt
ggplot(e_x1)+
geom_line(aes(x=qtseq,y=a),colour="red",size=1)+ #alpha =1
geom_line(aes(x=qtseq,y=b),colour="blue",size=1)+ #alpha =2
geom_line(aes(x=qtseq,y=c),colour="green",size=1)+ #alpha =3
geom_line(aes(x=qtseq,y=d),colour="black",size=1) # alpha=4
e_x2=data.frame(a=mtweibull(1,qtseq,0.5))
e_x2=cbind(e_x2,data.frame(b=mtweibull(2,qtseq,0.5)))
e_x2=cbind(e_x2,data.frame(c=mtweibull(3,qtseq,0.5)))
e_x2=cbind(e_x2,data.frame(d=mtweibull(4,qtseq,0.5)))
e_x2=cbind(e_x2,qtseq)
## Valor esperado; eje horizontal es alpha
ggplot(e_x2)+
geom_line(aes(x=qtseq,y=a),colour="red",size=1)+ #qt =1
geom_line(aes(x=qtseq,y=b),colour="blue",size=1)+ #qt =2
geom_line(aes(x=qtseq,y=c),colour="green",size=1)+ #qt =3
geom_line(aes(x=qtseq,y=d),colour="black",size=1) #qt= 4
v_x1=data.frame(a=vtweibull(qtseq,1,0.5))
v_x1=cbind(v_x1,data.frame(b=vtweibull(qtseq,2,0.5)))
v_x1=cbind(v_x1,data.frame(c=vtweibull(qtseq,3,0.5)))
v_x1=cbind(v_x1,data.frame(d=vtweibull(qtseq,4,0.5)))
v_x1=cbind(v_x1,qtseq)
## Varianza; eje horizontal es qt
ggplot(v_x1)+
geom_line(aes(x=qtseq,y=a),colour="red",size=1)+ #alpha =1
geom_line(aes(x=qtseq,y=b),colour="blue",size=1)+ #alpha =2
geom_line(aes(x=qtseq,y=c),colour="green",size=1)+ #alpha =3
geom_line(aes(x=qtseq,y=d),colour="black",size=1) # alpha=4
v_x2=data.frame(a=vtweibull(1,qtseq,0.5))
v_x2=cbind(v_x2,data.frame(b=vtweibull(2,qtseq,0.5)))
v_x2=cbind(v_x2,data.frame(c=vtweibull(3,qtseq,0.5)))
v_x2=cbind(v_x2,data.frame(d=vtweibull(4,qtseq,0.5)))
v_x2=cbind(v_x2,qtseq)
## Varianza; eje horizontal es alpha
ggplot(v_x2)+
geom_line(aes(x=qtseq,y=a),colour="red",size=1)+ #qt =1
geom_line(aes(x=qtseq,y=b),colour="blue",size=1)+ #qt =2
geom_line(aes(x=qtseq,y=c),colour="green",size=1)+ #qt =3
geom_line(aes(x=qtseq,y=d),colour="black",size=1) #qt= 4
qrv=data.frame(a=dtweibull(qtseq,qt=1,alpha=5,t=0.5))
qrv=cbind(qrv,data.frame(b=dtweibull(qtseq,qt=2,alpha=5,t=0.5)))
qrv=cbind(qrv,data.frame(c=dtweibull(qtseq,qt=3,alpha=5,t=0.5)))
qrv=cbind(qrv,data.frame(d=dtweibull(qtseq,qt=4,alpha=5,t=0.5)))
qrv=cbind(qrv,data.frame(e=dtweibull(qtseq,qt=5,alpha=5,t=0.5)))
#plot de densidad, alpha = 5, t=0.5
ggplot(qrv)+
geom_line(aes(y=a,x=qtseq),colour="red")+ #qt =1
geom_line(aes(y=b,x=qtseq),colour="blue")+ #qt = 2
geom_line(aes(y=c,x=qtseq),colour="green")+ #qt = 3
geom_line(aes(y=d,x=qtseq),colour="black")+# qt=4
geom_line(aes(y=e,x=qtseq),colour="yellow") #qt=5
arv=data.frame(a=dtweibull(qtseq,qt=3,alpha=1,t=0.5))
arv=cbind(arv,data.frame(b=dtweibull(qtseq,qt=3,alpha=2,t=0.5)))
arv=cbind(arv,data.frame(c=dtweibull(qtseq,qt=3,alpha=3,t=0.5)))
arv=cbind(arv,data.frame(d=dtweibull(qtseq,qt=3,alpha=4,t=0.5)))
arv=cbind(arv,data.frame(e=dtweibull(qtseq,qt=3,alpha=5,t=0.5)))
#plot de densidad, qt = 3, t=0.5
ggplot(arv)+
geom_line(aes(y=a,x=qtseq),colour="red")+ #alpha =1
geom_line(aes(y=b,x=qtseq),colour="blue")+ #alpha = 2
geom_line(aes(y=c,x=qtseq),colour="green")+ #alpha = 3
geom_line(aes(y=d,x=qtseq),colour="black")+# alpha=4
geom_line(aes(y=e,x=qtseq),colour="yellow") # alpha=5
| /QReg-Bayes,Sal y Rosas/Profesor Bayes.R | no_license | jamanrique/maestria-pucp | R | false | false | 6,793 | r | library(gamlss)
library(gamlss.dist)
library(ggplot2)
rtweibull <- function(n,qt,alpha,t){
# n = número de observaciones
# qt > 0; cuantil
# alpha = parámetro weibull
# t = entre 0 y 1
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (alpha < 0){
stop("parámetro alpha solo puede ser superior a 0")
}
U = runif(n)
qm = qt * (-((log(U+1))/(log(1-t))))^(1/alpha)
return(qm)
}
rtweibull2 <- function(n,qt,sigma,t){
# n = número de observaciones
# qt > 0; cuantil
# alpha = parámetro weibull
# t = entre 0 y 1
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (sigma < 0){
stop("parámetro sigma solo puede ser superior a 0")
}
alpha = 1/(log(sigma+1))
beta = qt/(-log(1-t))^(1/alpha)
qm = rweibull(n = n,scale = beta,shape = alpha)
return(qm)
}
library(BB)
library(numDeriv)
muestra <- rtweibull2(10000,2,2,0.1)
hist(muestra)
n <- length(muestra)
few <- function(x,param){
qt <- param[1]
sigma <- param[2]
t <- param[3]
ct <- -(log(1-t))
a <- 1/(log(sigma+1))
val <- (a) * ((ct/qt)^(a-1))*exp((-ct)*(x/qt)^(a))
return(val)
}
lfew <- function(param){
val = 0
for(i in 1:n){
val = val + log(few(muestra[i],param))
}
val = - val
return(val)
}
ll=function(param,x,t){
qt <- param[1]
sigma <- param[2]
ct <- -(log(1-t))
a <- 1/(log(sigma+1))
-sum(log(a)+log(ct)-a*log(qt)+(a-1)*log(x)-ct*(x/qt)**a)
}
inicial = c(4,3)
esti_mv = nlminb(inicial,ll,x=muestra,t=0.5,lower=c(0,0),upper = c(Inf,Inf))
esti_mv$par
## Control: qt = 2, sigma = 2, t = 0.000002
esti_mv$par
hist(rtweibull2(10000,esti_mv$par[1],esti_mv$par[2],esti_mv$par[3]))
dtweibull <- function(value,qt,alpha,t){
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (alpha < 0){
stop("parámetro alpha solo puede ser superior a 0")
}
ct=(-log(1-t))^(1/alpha)
b= qt/ct
a=alpha
dt = dweibull(x = value,a,b)
return(dt)
}
ptweibull <- function(value,qt,alpha,t){
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (alpha < 0){
stop("parámetro alpha solo puede ser superior a 0")
}
ct=(-log(1-t))^(1/alpha)
b= qt/ct
a= alpha
pt = pweibull(x = value,a,b)
}
vtweibull <- function(qt,alpha,t){
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (alpha < 0){
stop("parámetro alpha solo puede ser superior a 0")
}
ct=(-log(1-t))^(1/alpha)
var=((qt^2)/(ct)^(1/alpha))*(gamma(1+(2/alpha))-(gamma(1+(1/alpha)))^2)
return(var)
}
mtweibull <- function(qt,alpha,t){
if (t > 1 | t < 0) {
stop("parámetro t solo puede contener valores entre 0 y 1")
}
if (qt < 0){
stop("parámetro qt solo puede ser superior a 0")
}
if (alpha < 0){
stop("parámetro alpha solo puede ser superior a 0")
}
ct=(-log(1-t))^(1/alpha)
mt=((qt)/(ct^(1/alpha)))*gamma(1+1/alpha)
return(mt)
}
mtweibull(1,2,0.5)
qtseq = seq(1,10,0.1)
e_x1=data.frame(a=mtweibull(qtseq,1,0.5))
e_x1=cbind(e_x1,data.frame(b=mtweibull(qtseq,2,0.5)))
e_x1=cbind(e_x1,data.frame(c=mtweibull(qtseq,3,0.5)))
e_x1=cbind(e_x1,data.frame(d=mtweibull(qtseq,4,0.5)))
e_x1=cbind(e_x1,qtseq)
## Valor esperado; eje horizontal es qt
ggplot(e_x1)+
geom_line(aes(x=qtseq,y=a),colour="red",size=1)+ #alpha =1
geom_line(aes(x=qtseq,y=b),colour="blue",size=1)+ #alpha =2
geom_line(aes(x=qtseq,y=c),colour="green",size=1)+ #alpha =3
geom_line(aes(x=qtseq,y=d),colour="black",size=1) # alpha=4
e_x2=data.frame(a=mtweibull(1,qtseq,0.5))
e_x2=cbind(e_x2,data.frame(b=mtweibull(2,qtseq,0.5)))
e_x2=cbind(e_x2,data.frame(c=mtweibull(3,qtseq,0.5)))
e_x2=cbind(e_x2,data.frame(d=mtweibull(4,qtseq,0.5)))
e_x2=cbind(e_x2,qtseq)
## Valor esperado; eje horizontal es alpha
ggplot(e_x2)+
geom_line(aes(x=qtseq,y=a),colour="red",size=1)+ #qt =1
geom_line(aes(x=qtseq,y=b),colour="blue",size=1)+ #qt =2
geom_line(aes(x=qtseq,y=c),colour="green",size=1)+ #qt =3
geom_line(aes(x=qtseq,y=d),colour="black",size=1) #qt= 4
v_x1=data.frame(a=vtweibull(qtseq,1,0.5))
v_x1=cbind(v_x1,data.frame(b=vtweibull(qtseq,2,0.5)))
v_x1=cbind(v_x1,data.frame(c=vtweibull(qtseq,3,0.5)))
v_x1=cbind(v_x1,data.frame(d=vtweibull(qtseq,4,0.5)))
v_x1=cbind(v_x1,qtseq)
## Varianza; eje horizontal es qt
ggplot(v_x1)+
geom_line(aes(x=qtseq,y=a),colour="red",size=1)+ #alpha =1
geom_line(aes(x=qtseq,y=b),colour="blue",size=1)+ #alpha =2
geom_line(aes(x=qtseq,y=c),colour="green",size=1)+ #alpha =3
geom_line(aes(x=qtseq,y=d),colour="black",size=1) # alpha=4
v_x2=data.frame(a=vtweibull(1,qtseq,0.5))
v_x2=cbind(v_x2,data.frame(b=vtweibull(2,qtseq,0.5)))
v_x2=cbind(v_x2,data.frame(c=vtweibull(3,qtseq,0.5)))
v_x2=cbind(v_x2,data.frame(d=vtweibull(4,qtseq,0.5)))
v_x2=cbind(v_x2,qtseq)
## Varianza; eje horizontal es alpha
ggplot(v_x2)+
geom_line(aes(x=qtseq,y=a),colour="red",size=1)+ #qt =1
geom_line(aes(x=qtseq,y=b),colour="blue",size=1)+ #qt =2
geom_line(aes(x=qtseq,y=c),colour="green",size=1)+ #qt =3
geom_line(aes(x=qtseq,y=d),colour="black",size=1) #qt= 4
qrv=data.frame(a=dtweibull(qtseq,qt=1,alpha=5,t=0.5))
qrv=cbind(qrv,data.frame(b=dtweibull(qtseq,qt=2,alpha=5,t=0.5)))
qrv=cbind(qrv,data.frame(c=dtweibull(qtseq,qt=3,alpha=5,t=0.5)))
qrv=cbind(qrv,data.frame(d=dtweibull(qtseq,qt=4,alpha=5,t=0.5)))
qrv=cbind(qrv,data.frame(e=dtweibull(qtseq,qt=5,alpha=5,t=0.5)))
#plot de densidad, alpha = 5, t=0.5
ggplot(qrv)+
geom_line(aes(y=a,x=qtseq),colour="red")+ #qt =1
geom_line(aes(y=b,x=qtseq),colour="blue")+ #qt = 2
geom_line(aes(y=c,x=qtseq),colour="green")+ #qt = 3
geom_line(aes(y=d,x=qtseq),colour="black")+# qt=4
geom_line(aes(y=e,x=qtseq),colour="yellow") #qt=5
arv=data.frame(a=dtweibull(qtseq,qt=3,alpha=1,t=0.5))
arv=cbind(arv,data.frame(b=dtweibull(qtseq,qt=3,alpha=2,t=0.5)))
arv=cbind(arv,data.frame(c=dtweibull(qtseq,qt=3,alpha=3,t=0.5)))
arv=cbind(arv,data.frame(d=dtweibull(qtseq,qt=3,alpha=4,t=0.5)))
arv=cbind(arv,data.frame(e=dtweibull(qtseq,qt=3,alpha=5,t=0.5)))
#plot de densidad, qt = 3, t=0.5
ggplot(arv)+
geom_line(aes(y=a,x=qtseq),colour="red")+ #alpha =1
geom_line(aes(y=b,x=qtseq),colour="blue")+ #alpha = 2
geom_line(aes(y=c,x=qtseq),colour="green")+ #alpha = 3
geom_line(aes(y=d,x=qtseq),colour="black")+# alpha=4
geom_line(aes(y=e,x=qtseq),colour="yellow") # alpha=5
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/train.default.R
\name{train}
\alias{train}
\alias{train.default}
\alias{train.formula}
\alias{train.default}
\alias{train.formula}
\alias{train.recipe}
\title{Fit Predictive Models over Different Tuning Parameters}
\usage{
train(x, ...)
\method{train}{default}(x, y, method = "rf", preProcess = NULL, ...,
weights = NULL, metric = ifelse(is.factor(y), "Accuracy", "RMSE"),
maximize = ifelse(metric \%in\% c("RMSE", "logLoss", "MAE"), FALSE, TRUE),
trControl = trainControl(), tuneGrid = NULL,
tuneLength = ifelse(trControl$method == "none", 1, 3))
\method{train}{formula}(form, data, ..., weights, subset, na.action = na.fail,
contrasts = NULL)
\method{train}{recipe}(recipe, data, method = "rf", ...,
metric = ifelse(is.factor(y), "Accuracy", "RMSE"),
maximize = ifelse(metric \%in\% c("RMSE", "logLoss", "MAE"), FALSE, TRUE),
trControl = trainControl(), tuneGrid = NULL,
tuneLength = ifelse(trControl$method == "none", 1, 3))
}
\arguments{
\item{x}{An object where samples are in rows and features are in columns.
This could be a simple matrix, data frame or other type (e.g. sparse
matrix) but must have column names. See Details below. Preprocessing using the \code{preProcess}
argument only supports matrices or data frames.}
\item{\dots}{Arguments passed to the classification or regression routine
(such as \code{\link[randomForest]{randomForest}}). Errors will occur if
values for tuning parameters are passed here.}
\item{y}{A numeric or factor vector containing the outcome for each sample.}
\item{method}{A string specifying which classification or regression model
to use. Possible values are found using \code{names(getModelInfo())}. See
\url{http://topepo.github.io/caret/train-models-by-tag.html}. A list of functions can also
be passed for a custom model function. See
\url{http://topepo.github.io/caret/using-your-own-model-in-train.html} for details.}
\item{preProcess}{A string vector that defines a pre-processing of the
predictor data. Current possibilities are "BoxCox", "YeoJohnson",
"expoTrans", "center", "scale", "range", "knnImpute", "bagImpute",
"medianImpute", "pca", "ica" and "spatialSign". The default is no
pre-processing. See \code{\link{preProcess}} and \code{\link{trainControl}}
on the procedures and how to adjust them. Pre-processing code is only
designed to work when \code{x} is a simple matrix or data frame.}
\item{weights}{A numeric vector of case weights. This argument will only
affect models that allow case weights.}
\item{metric}{A string that specifies what summary metric will be used to
select the optimal model. By default, possible values are "RMSE" and
"Rsquared" for regression and "Accuracy" and "Kappa" for classification. If
custom performance metrics are used (via the \code{summaryFunction} argument
in \code{\link{trainControl}}, the value of \code{metric} should match one
of the arguments. If it does not, a warning is issued and the first metric
given by the \code{summaryFunction} is used. (NOTE: If given, this argument
must be named.)}
\item{maximize}{A logical: should the metric be maximized or minimized?}
\item{trControl}{A list of values that define how this function acts. See
\code{\link{trainControl}} and
\url{http://topepo.github.io/caret/using-your-own-model-in-train.html}. (NOTE: If given,
this argument must be named.)}
\item{tuneGrid}{A data frame with possible tuning values. The columns are
named the same as the tuning parameters. Use \code{\link{getModelInfo}} to
get a list of tuning parameters for each model or see
\url{http://topepo.github.io/caret/available-models.html}. (NOTE: If given, this
argument must be named.)}
\item{tuneLength}{An integer denoting the amount of granularity in the
tuning parameter grid. By default, this argument is the number of levels for
each tuning parameters that should be generated by \code{\link{train}}. If
\code{\link{trainControl}} has the option \code{search = "random"}, this is
the maximum number of tuning parameter combinations that will be generated
by the random search. (NOTE: If given, this argument must be named.)}
\item{form}{A formula of the form \code{y ~ x1 + x2 + ...}}
\item{data}{Data frame from which variables specified in \code{formula} or
\code{recipe} are preferentially to be taken.}
\item{subset}{An index vector specifying the cases to be used in the
training sample. (NOTE: If given, this argument must be named.)}
\item{na.action}{A function to specify the action to be taken if NAs are
found. The default action is for the procedure to fail. An alternative is
\code{na.omit}, which leads to rejection of cases with missing values on any
required variable. (NOTE: If given, this argument must be named.)}
\item{contrasts}{A list of contrasts to be used for some or all the factors
appearing as variables in the model formula.}
\item{recipe}{An unprepared \code{\link{recipe}} object that describes the
model terms (i.e. outcome, predictors, etc.) as well as any pre-processing
that should be done to the data. This is an alternative approach to specifying
the model. Note that, when using the recipe method, any arguments passed to
\code{preProcess} will be ignored. See the links and example below for
more details using recipes.}
}
\value{
A list is returned of class \code{train} containing: \item{method
}{The chosen model.} \item{modelType }{An identifier of the model type.}
\item{results }{A data frame the training error rate and values of the
tuning parameters.} \item{bestTune }{A data frame with the final
parameters.}
\item{call}{The (matched) function call with dots expanded} \item{dots}{A
list containing any ... values passed to the original call} \item{metric}{A
string that specifies what summary metric will be used to select the optimal
model.} \item{control}{The list of control parameters.} \item{preProcess
}{Either \code{NULL} or an object of class \code{\link{preProcess}}}
\item{finalModel}{A fit object using the best parameters}
\item{trainingData}{A data frame} \item{resample}{A data frame with columns
for each performance metric. Each row corresponds to each resample. If
leave-one-out cross-validation or out-of-bag estimation methods are
requested, this will be \code{NULL}. The \code{returnResamp} argument of
\code{\link{trainControl}} controls how much of the resampled results are
saved.} \item{perfNames}{A character vector of performance metrics that are
produced by the summary function} \item{maximize}{A logical recycled from
the function arguments.} \item{yLimits}{The range of the training set
outcomes.} \item{times}{A list of execution times: \code{everything} is for
the entire call to \code{train}, \code{final} for the final model fit and,
optionally, \code{prediction} for the time to predict new samples (see
\code{\link{trainControl}})}
}
\description{
This function sets up a grid of tuning parameters for a number of
classification and regression routines, fits each model and calculates a
resampling based performance measure.
}
\details{
\code{train} can be used to tune models by picking the complexity parameters
that are associated with the optimal resampling statistics. For particular
model, a grid of parameters (if any) is created and the model is trained on
slightly different data for each candidate combination of tuning parameters.
Across each data set, the performance of held-out samples is calculated and
the mean and standard deviation is summarized for each combination. The
combination with the optimal resampling statistic is chosen as the final
model and the entire training set is used to fit a final model.
The predictors in \code{x} can be most any object as long as the underlying
model fit function can deal with the object class. The function was designed
to work with simple matrices and data frame inputs, so some functionality
may not work (e.g. pre-processing). When using string kernels, the vector of
character strings should be converted to a matrix with a single column.
More details on this function can be found at
\url{http://topepo.github.io/caret/model-training-and-tuning.html}.
A variety of models are currently available and are enumerated by tag (i.e.
their model characteristics) at
\url{http://topepo.github.io/caret/train-models-by-tag.html}.
More details on using recipes can be found at
\url{http://topepo.github.io/caret/recipes.html}.
Note that case weights can be passed into \code{train}
using a role of \code{"case weight"} for a single variable.
Also, if there are non-predictor columns that should be used
when determining the model's performance metrics, the role
of \code{"performance var"} can be used with multiple columns
and these will be made available during resampling to the
\code{summaryFunction} function.
}
\examples{
\dontrun{
#######################################
## Classification Example
data(iris)
TrainData <- iris[,1:4]
TrainClasses <- iris[,5]
knnFit1 <- train(TrainData, TrainClasses,
method = "knn",
preProcess = c("center", "scale"),
tuneLength = 10,
trControl = trainControl(method = "cv"))
knnFit2 <- train(TrainData, TrainClasses,
method = "knn",
preProcess = c("center", "scale"),
tuneLength = 10,
trControl = trainControl(method = "boot"))
library(MASS)
nnetFit <- train(TrainData, TrainClasses,
method = "nnet",
preProcess = "range",
tuneLength = 2,
trace = FALSE,
maxit = 100)
#######################################
## Regression Example
library(mlbench)
data(BostonHousing)
lmFit <- train(medv ~ . + rm:lstat,
data = BostonHousing,
method = "lm")
library(rpart)
rpartFit <- train(medv ~ .,
data = BostonHousing,
method = "rpart",
tuneLength = 9)
#######################################
## Example with a custom metric
madSummary <- function (data,
lev = NULL,
model = NULL) {
out <- mad(data$obs - data$pred,
na.rm = TRUE)
names(out) <- "MAD"
out
}
robustControl <- trainControl(summaryFunction = madSummary)
marsGrid <- expand.grid(degree = 1, nprune = (1:10) * 2)
earthFit <- train(medv ~ .,
data = BostonHousing,
method = "earth",
tuneGrid = marsGrid,
metric = "MAD",
maximize = FALSE,
trControl = robustControl)
#######################################
## Example with a recipe
data(cox2)
cox2 <- cox2Descr
cox2$potency <- cox2IC50
library(recipes)
cox2_recipe <- recipe(potency ~ ., data = cox2) \%>\%
## Log the outcome
step_log(potency, base = 10) \%>\%
## Remove sparse and unbalanced predictors
step_nzv(all_predictors()) \%>\%
## Surface area predictors are highly correlated so
## conduct PCA just on these.
step_pca(contains("VSA"), prefix = "surf_area_",
threshold = .95) \%>\%
## Remove other highly correlated predictors
step_corr(all_predictors(), -starts_with("surf_area_"),
threshold = .90) \%>\%
## Center and scale all of the non-PCA predictors
step_center(all_predictors(), -starts_with("surf_area_")) \%>\%
step_scale(all_predictors(), -starts_with("surf_area_"))
set.seed(888)
cox2_lm <- train(cox2_recipe,
data = cox2,
method = "lm",
trControl = trainControl(method = "cv"))
#######################################
## Parallel Processing Example via multicore package
## library(doMC)
## registerDoMC(2)
## NOTE: don't run models form RWeka when using
### multicore. The session will crash.
## The code for train() does not change:
set.seed(1)
usingMC <- train(medv ~ .,
data = BostonHousing,
method = "glmboost")
## or use:
## library(doMPI) or
## library(doParallel) or
## library(doSMP) and so on
}
}
\references{
\url{http://topepo.github.io/caret/}
Kuhn (2008), ``Building Predictive Models in R Using the caret''
(\url{http://www.jstatsoft.org/article/view/v028i05/v28i05.pdf})
\url{https://topepo.github.io/recipes/}
}
\seealso{
\code{\link{models}}, \code{\link{trainControl}},
\code{\link{update.train}}, \code{\link{modelLookup}},
\code{\link{createFolds}}, \code{\link[recipes]{recipe}}
}
\author{
Max Kuhn (the guts of \code{train.formula} were based on Ripley's
\code{nnet.formula})
}
\keyword{models}
| /pkg/caret/man/train.Rd | no_license | Weekend-Warrior/caret | R | false | true | 12,621 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/train.default.R
\name{train}
\alias{train}
\alias{train.default}
\alias{train.formula}
\alias{train.default}
\alias{train.formula}
\alias{train.recipe}
\title{Fit Predictive Models over Different Tuning Parameters}
\usage{
train(x, ...)
\method{train}{default}(x, y, method = "rf", preProcess = NULL, ...,
weights = NULL, metric = ifelse(is.factor(y), "Accuracy", "RMSE"),
maximize = ifelse(metric \%in\% c("RMSE", "logLoss", "MAE"), FALSE, TRUE),
trControl = trainControl(), tuneGrid = NULL,
tuneLength = ifelse(trControl$method == "none", 1, 3))
\method{train}{formula}(form, data, ..., weights, subset, na.action = na.fail,
contrasts = NULL)
\method{train}{recipe}(recipe, data, method = "rf", ...,
metric = ifelse(is.factor(y), "Accuracy", "RMSE"),
maximize = ifelse(metric \%in\% c("RMSE", "logLoss", "MAE"), FALSE, TRUE),
trControl = trainControl(), tuneGrid = NULL,
tuneLength = ifelse(trControl$method == "none", 1, 3))
}
\arguments{
\item{x}{An object where samples are in rows and features are in columns.
This could be a simple matrix, data frame or other type (e.g. sparse
matrix) but must have column names. See Details below. Preprocessing using the \code{preProcess}
argument only supports matrices or data frames.}
\item{\dots}{Arguments passed to the classification or regression routine
(such as \code{\link[randomForest]{randomForest}}). Errors will occur if
values for tuning parameters are passed here.}
\item{y}{A numeric or factor vector containing the outcome for each sample.}
\item{method}{A string specifying which classification or regression model
to use. Possible values are found using \code{names(getModelInfo())}. See
\url{http://topepo.github.io/caret/train-models-by-tag.html}. A list of functions can also
be passed for a custom model function. See
\url{http://topepo.github.io/caret/using-your-own-model-in-train.html} for details.}
\item{preProcess}{A string vector that defines a pre-processing of the
predictor data. Current possibilities are "BoxCox", "YeoJohnson",
"expoTrans", "center", "scale", "range", "knnImpute", "bagImpute",
"medianImpute", "pca", "ica" and "spatialSign". The default is no
pre-processing. See \code{\link{preProcess}} and \code{\link{trainControl}}
on the procedures and how to adjust them. Pre-processing code is only
designed to work when \code{x} is a simple matrix or data frame.}
\item{weights}{A numeric vector of case weights. This argument will only
affect models that allow case weights.}
\item{metric}{A string that specifies what summary metric will be used to
select the optimal model. By default, possible values are "RMSE" and
"Rsquared" for regression and "Accuracy" and "Kappa" for classification. If
custom performance metrics are used (via the \code{summaryFunction} argument
in \code{\link{trainControl}}, the value of \code{metric} should match one
of the arguments. If it does not, a warning is issued and the first metric
given by the \code{summaryFunction} is used. (NOTE: If given, this argument
must be named.)}
\item{maximize}{A logical: should the metric be maximized or minimized?}
\item{trControl}{A list of values that define how this function acts. See
\code{\link{trainControl}} and
\url{http://topepo.github.io/caret/using-your-own-model-in-train.html}. (NOTE: If given,
this argument must be named.)}
\item{tuneGrid}{A data frame with possible tuning values. The columns are
named the same as the tuning parameters. Use \code{\link{getModelInfo}} to
get a list of tuning parameters for each model or see
\url{http://topepo.github.io/caret/available-models.html}. (NOTE: If given, this
argument must be named.)}
\item{tuneLength}{An integer denoting the amount of granularity in the
tuning parameter grid. By default, this argument is the number of levels for
each tuning parameters that should be generated by \code{\link{train}}. If
\code{\link{trainControl}} has the option \code{search = "random"}, this is
the maximum number of tuning parameter combinations that will be generated
by the random search. (NOTE: If given, this argument must be named.)}
\item{form}{A formula of the form \code{y ~ x1 + x2 + ...}}
\item{data}{Data frame from which variables specified in \code{formula} or
\code{recipe} are preferentially to be taken.}
\item{subset}{An index vector specifying the cases to be used in the
training sample. (NOTE: If given, this argument must be named.)}
\item{na.action}{A function to specify the action to be taken if NAs are
found. The default action is for the procedure to fail. An alternative is
\code{na.omit}, which leads to rejection of cases with missing values on any
required variable. (NOTE: If given, this argument must be named.)}
\item{contrasts}{A list of contrasts to be used for some or all the factors
appearing as variables in the model formula.}
\item{recipe}{An unprepared \code{\link{recipe}} object that describes the
model terms (i.e. outcome, predictors, etc.) as well as any pre-processing
that should be done to the data. This is an alternative approach to specifying
the model. Note that, when using the recipe method, any arguments passed to
\code{preProcess} will be ignored. See the links and example below for
more details using recipes.}
}
\value{
A list is returned of class \code{train} containing: \item{method
}{The chosen model.} \item{modelType }{An identifier of the model type.}
\item{results }{A data frame the training error rate and values of the
tuning parameters.} \item{bestTune }{A data frame with the final
parameters.}
\item{call}{The (matched) function call with dots expanded} \item{dots}{A
list containing any ... values passed to the original call} \item{metric}{A
string that specifies what summary metric will be used to select the optimal
model.} \item{control}{The list of control parameters.} \item{preProcess
}{Either \code{NULL} or an object of class \code{\link{preProcess}}}
\item{finalModel}{A fit object using the best parameters}
\item{trainingData}{A data frame} \item{resample}{A data frame with columns
for each performance metric. Each row corresponds to each resample. If
leave-one-out cross-validation or out-of-bag estimation methods are
requested, this will be \code{NULL}. The \code{returnResamp} argument of
\code{\link{trainControl}} controls how much of the resampled results are
saved.} \item{perfNames}{A character vector of performance metrics that are
produced by the summary function} \item{maximize}{A logical recycled from
the function arguments.} \item{yLimits}{The range of the training set
outcomes.} \item{times}{A list of execution times: \code{everything} is for
the entire call to \code{train}, \code{final} for the final model fit and,
optionally, \code{prediction} for the time to predict new samples (see
\code{\link{trainControl}})}
}
\description{
This function sets up a grid of tuning parameters for a number of
classification and regression routines, fits each model and calculates a
resampling based performance measure.
}
\details{
\code{train} can be used to tune models by picking the complexity parameters
that are associated with the optimal resampling statistics. For particular
model, a grid of parameters (if any) is created and the model is trained on
slightly different data for each candidate combination of tuning parameters.
Across each data set, the performance of held-out samples is calculated and
the mean and standard deviation is summarized for each combination. The
combination with the optimal resampling statistic is chosen as the final
model and the entire training set is used to fit a final model.
The predictors in \code{x} can be most any object as long as the underlying
model fit function can deal with the object class. The function was designed
to work with simple matrices and data frame inputs, so some functionality
may not work (e.g. pre-processing). When using string kernels, the vector of
character strings should be converted to a matrix with a single column.
More details on this function can be found at
\url{http://topepo.github.io/caret/model-training-and-tuning.html}.
A variety of models are currently available and are enumerated by tag (i.e.
their model characteristics) at
\url{http://topepo.github.io/caret/train-models-by-tag.html}.
More details on using recipes can be found at
\url{http://topepo.github.io/caret/recipes.html}.
Note that case weights can be passed into \code{train}
using a role of \code{"case weight"} for a single variable.
Also, if there are non-predictor columns that should be used
when determining the model's performance metrics, the role
of \code{"performance var"} can be used with multiple columns
and these will be made available during resampling to the
\code{summaryFunction} function.
}
\examples{
\dontrun{
#######################################
## Classification Example
data(iris)
TrainData <- iris[,1:4]
TrainClasses <- iris[,5]
knnFit1 <- train(TrainData, TrainClasses,
method = "knn",
preProcess = c("center", "scale"),
tuneLength = 10,
trControl = trainControl(method = "cv"))
knnFit2 <- train(TrainData, TrainClasses,
method = "knn",
preProcess = c("center", "scale"),
tuneLength = 10,
trControl = trainControl(method = "boot"))
library(MASS)
nnetFit <- train(TrainData, TrainClasses,
method = "nnet",
preProcess = "range",
tuneLength = 2,
trace = FALSE,
maxit = 100)
#######################################
## Regression Example
library(mlbench)
data(BostonHousing)
lmFit <- train(medv ~ . + rm:lstat,
data = BostonHousing,
method = "lm")
library(rpart)
rpartFit <- train(medv ~ .,
data = BostonHousing,
method = "rpart",
tuneLength = 9)
#######################################
## Example with a custom metric
madSummary <- function (data,
lev = NULL,
model = NULL) {
out <- mad(data$obs - data$pred,
na.rm = TRUE)
names(out) <- "MAD"
out
}
robustControl <- trainControl(summaryFunction = madSummary)
marsGrid <- expand.grid(degree = 1, nprune = (1:10) * 2)
earthFit <- train(medv ~ .,
data = BostonHousing,
method = "earth",
tuneGrid = marsGrid,
metric = "MAD",
maximize = FALSE,
trControl = robustControl)
#######################################
## Example with a recipe
data(cox2)
cox2 <- cox2Descr
cox2$potency <- cox2IC50
library(recipes)
cox2_recipe <- recipe(potency ~ ., data = cox2) \%>\%
## Log the outcome
step_log(potency, base = 10) \%>\%
## Remove sparse and unbalanced predictors
step_nzv(all_predictors()) \%>\%
## Surface area predictors are highly correlated so
## conduct PCA just on these.
step_pca(contains("VSA"), prefix = "surf_area_",
threshold = .95) \%>\%
## Remove other highly correlated predictors
step_corr(all_predictors(), -starts_with("surf_area_"),
threshold = .90) \%>\%
## Center and scale all of the non-PCA predictors
step_center(all_predictors(), -starts_with("surf_area_")) \%>\%
step_scale(all_predictors(), -starts_with("surf_area_"))
set.seed(888)
cox2_lm <- train(cox2_recipe,
data = cox2,
method = "lm",
trControl = trainControl(method = "cv"))
#######################################
## Parallel Processing Example via multicore package
## library(doMC)
## registerDoMC(2)
## NOTE: don't run models form RWeka when using
### multicore. The session will crash.
## The code for train() does not change:
set.seed(1)
usingMC <- train(medv ~ .,
data = BostonHousing,
method = "glmboost")
## or use:
## library(doMPI) or
## library(doParallel) or
## library(doSMP) and so on
}
}
\references{
\url{http://topepo.github.io/caret/}
Kuhn (2008), ``Building Predictive Models in R Using the caret''
(\url{http://www.jstatsoft.org/article/view/v028i05/v28i05.pdf})
\url{https://topepo.github.io/recipes/}
}
\seealso{
\code{\link{models}}, \code{\link{trainControl}},
\code{\link{update.train}}, \code{\link{modelLookup}},
\code{\link{createFolds}}, \code{\link[recipes]{recipe}}
}
\author{
Max Kuhn (the guts of \code{train.formula} were based on Ripley's
\code{nnet.formula})
}
\keyword{models}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/colortypes.R
\name{colortypes}
\alias{colortypes}
\title{Color types}
\arguments{
\item{strokeColor}{}
\item{fillColor}{}
\item{highlightStroke}{}
\item{highlightFill}{}
\item{pointColor}{}
\item{pointStrokeColor}{}
\item{pointHighlightFill}{}
\item{pointHighlightStroke}{}
\item{color}{}
\item{highlight}{}
}
\description{
Allowed color types
}
| /man/colortypes.Rd | permissive | LSanselme/chartjs | R | false | false | 442 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/colortypes.R
\name{colortypes}
\alias{colortypes}
\title{Color types}
\arguments{
\item{strokeColor}{}
\item{fillColor}{}
\item{highlightStroke}{}
\item{highlightFill}{}
\item{pointColor}{}
\item{pointStrokeColor}{}
\item{pointHighlightFill}{}
\item{pointHighlightStroke}{}
\item{color}{}
\item{highlight}{}
}
\description{
Allowed color types
}
|
#' Plot histogram of values over given threshold
#'
#' @param srcTbl tibble returned by filteredData() interactive
#' @param threshold cut-off value
#'
#' @return ggplot2 object
#'
plotHist <- function(srcTbl, threshold) {
src <- srcTbl %>%
dplyr::filter(maximum > threshold) %>%
dplyr::mutate(maximum = maximum - threshold)
binW <- IQR(src$maximum)/(length(src$maximum)^(1/3))
ggplot(src, aes(x = maximum)) +
geom_histogram(binwidth = binW) +
theme_bw()
}
#' QQ-plot for large quantiles
#'
#' @param srcTbl tibble returned by filteredData()
#' @param alpha Kendall stable distribution parameter
#' @param minMaxQ minimum and maximum quantile to be used
#' @param stepQ step between minimum and maximum quantile
#' @param symmetric if TRUE, symmetrical version of stable Kendall distribution will be used
#' @param meanFunction function giving moment of order alpha of the step distribution
#'
#' @return ggplot2 object
#'
plotLargeQQ <- function(srcTbl, alpha, minMaxQ, stepQ, symmetric = FALSE, meanFunction = function(x) 1) {
qSeq <- seq(minMaxQ[1], minMaxQ[2], stepQ)
x <- srcTbl %>%
dplyr::mutate(maximum = as.vector(scale(maximum))) %>%
dplyr::filter(is.finite(maximum)) %>%
dplyr::ungroup() %>%
dplyr::select(maximum) %>%
unlist(use.names = FALSE) %>%
quantile(probs = qSeq, na.rm = TRUE)
qLim <- qkend(meanFunction)
y <- qLim(qSeq, alpha)
tibble(x = x, y = y) %>%
dplyr::filter(is.finite(x),
is.finite(y)) %>%
ggplot(aes(x, y)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
theme_bw() +
xlab("empirical") +
ylab("theoretical")
}
#' Standard QQ-plot
#'
#' @param srcTbl tibble returned by filteredData()
#' @param alpha Kendall stable dist. parameter
#' @param meanFunction function giving moment of order alpha of the step distribution
#' @param symmetric if TRUE, symmetrical version of stable Kendall distribution will be used
#' @param threshold cut-off value for observations
#'
#' @return ggplot2 object
#'
plotQQ <- function(srcTbl, alpha, meanFunction = function(x) 1, symmetric = FALSE, threshold = 0) {
x <- srcTbl %>%
dplyr::filter(is.finite(maximum),
maximum > threshold) %>%
dplyr::mutate(maximum = maximum - threshold)
if(symmetric) {
x <- x %>%
dplyr::mutate(maximum = as.vector(scale(maximum)))
}
x <- x %>%
dplyr::ungroup() %>%
dplyr::select(maximum) %>%
filter(is.finite(maximum)) %>%
unlist(use.names = FALSE)
prob <- (1:length(x) - 0.5)/(length(x))
x <- quantile(x, prob)
if(symmetric) qLim <- qkendSym(meanFunction)
else qLim <- qkend(meanFunction)
y <- qLim(prob, alpha)
tibble(x = x, y = y) %>%
ggplot(aes(x, y)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
theme_bw() +
xlab("empirical") +
ylab("theoretical")
}
#' Plot of data over time
#'
#' @param srcTbl tibble returned by filteredData()
#'
#' @return ggplot2 object
#'
plotTime <- function(srcTbl, datesRange = "") {
srcTbl %>%
# filter(dzienPomiaru >= datesRange[1],
# dzienPomiaru <= datesRange[2]) %>%
ggplot(aes(x = measTime, y = maximum)) +
geom_line() +
theme_bw() +
xlab("date") +
ylab("measured value")
}
#' ECDF for data
#'
#' @param srcTbl tibble returned by filteredData()
#'
#' @return ggplot2 object
#'
plotEcdf <- function(srcTbl) {
ggplot(srcTbl, aes(x = maximum)) +
stat_ecdf() +
theme_bw()
}
| /R/vis.R | permissive | asmarka/kendallRandomPackage | R | false | false | 3,496 | r | #' Plot histogram of values over given threshold
#'
#' @param srcTbl tibble returned by filteredData() interactive
#' @param threshold cut-off value
#'
#' @return ggplot2 object
#'
plotHist <- function(srcTbl, threshold) {
src <- srcTbl %>%
dplyr::filter(maximum > threshold) %>%
dplyr::mutate(maximum = maximum - threshold)
binW <- IQR(src$maximum)/(length(src$maximum)^(1/3))
ggplot(src, aes(x = maximum)) +
geom_histogram(binwidth = binW) +
theme_bw()
}
#' QQ-plot for large quantiles
#'
#' @param srcTbl tibble returned by filteredData()
#' @param alpha Kendall stable distribution parameter
#' @param minMaxQ minimum and maximum quantile to be used
#' @param stepQ step between minimum and maximum quantile
#' @param symmetric if TRUE, symmetrical version of stable Kendall distribution will be used
#' @param meanFunction function giving moment of order alpha of the step distribution
#'
#' @return ggplot2 object
#'
plotLargeQQ <- function(srcTbl, alpha, minMaxQ, stepQ, symmetric = FALSE, meanFunction = function(x) 1) {
qSeq <- seq(minMaxQ[1], minMaxQ[2], stepQ)
x <- srcTbl %>%
dplyr::mutate(maximum = as.vector(scale(maximum))) %>%
dplyr::filter(is.finite(maximum)) %>%
dplyr::ungroup() %>%
dplyr::select(maximum) %>%
unlist(use.names = FALSE) %>%
quantile(probs = qSeq, na.rm = TRUE)
qLim <- qkend(meanFunction)
y <- qLim(qSeq, alpha)
tibble(x = x, y = y) %>%
dplyr::filter(is.finite(x),
is.finite(y)) %>%
ggplot(aes(x, y)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
theme_bw() +
xlab("empirical") +
ylab("theoretical")
}
#' Standard QQ-plot
#'
#' @param srcTbl tibble returned by filteredData()
#' @param alpha Kendall stable dist. parameter
#' @param meanFunction function giving moment of order alpha of the step distribution
#' @param symmetric if TRUE, symmetrical version of stable Kendall distribution will be used
#' @param threshold cut-off value for observations
#'
#' @return ggplot2 object
#'
plotQQ <- function(srcTbl, alpha, meanFunction = function(x) 1, symmetric = FALSE, threshold = 0) {
x <- srcTbl %>%
dplyr::filter(is.finite(maximum),
maximum > threshold) %>%
dplyr::mutate(maximum = maximum - threshold)
if(symmetric) {
x <- x %>%
dplyr::mutate(maximum = as.vector(scale(maximum)))
}
x <- x %>%
dplyr::ungroup() %>%
dplyr::select(maximum) %>%
filter(is.finite(maximum)) %>%
unlist(use.names = FALSE)
prob <- (1:length(x) - 0.5)/(length(x))
x <- quantile(x, prob)
if(symmetric) qLim <- qkendSym(meanFunction)
else qLim <- qkend(meanFunction)
y <- qLim(prob, alpha)
tibble(x = x, y = y) %>%
ggplot(aes(x, y)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
theme_bw() +
xlab("empirical") +
ylab("theoretical")
}
#' Plot of data over time
#'
#' @param srcTbl tibble returned by filteredData()
#'
#' @return ggplot2 object
#'
plotTime <- function(srcTbl, datesRange = "") {
srcTbl %>%
# filter(dzienPomiaru >= datesRange[1],
# dzienPomiaru <= datesRange[2]) %>%
ggplot(aes(x = measTime, y = maximum)) +
geom_line() +
theme_bw() +
xlab("date") +
ylab("measured value")
}
#' ECDF for data
#'
#' @param srcTbl tibble returned by filteredData()
#'
#' @return ggplot2 object
#'
plotEcdf <- function(srcTbl) {
ggplot(srcTbl, aes(x = maximum)) +
stat_ecdf() +
theme_bw()
}
|
#' fitbitr
#'
#' Provides an API Access to fitbit API via R
#'
#' @name fitbitr
#' @docType package
#' @import stringr lubridate dplyr tidyr httr jsonlite data.table
url_base <- "https://api.fitbit.com/1/"
url_api <- paste0(url_base, "user/-/")
| /R/fitbitr.R | permissive | af12066/fitbitr | R | false | false | 255 | r | #' fitbitr
#'
#' Provides an API Access to fitbit API via R
#'
#' @name fitbitr
#' @docType package
#' @import stringr lubridate dplyr tidyr httr jsonlite data.table
url_base <- "https://api.fitbit.com/1/"
url_api <- paste0(url_base, "user/-/")
|
rm(list=ls())
setwd("~/Users/Adam/Research/BA_Thesis/Data")
library("openxlsx")
library(plm)
library(foreign, gplots)
library(dplyr)
library(lazyeval) #for the group by function
library(ggplot2)
#library(micEconCES)
library(logistf)
library(stargazer)
library(biglm)
#panel_original <- read.xlsx("/Users/Adam/Research/BA_Thesis/Data final/regression_data edits ok.xlsx", 1)
panel_original <- read.xlsx("/Users/Adam/Research/BA_Thesis/Data final/regression_data_changedIV.xlsx", 1)
####################### merge the other debt figure from Rajan - ugh! ##############
debt_path = '/Users/Adam/Research/BA_Thesis/Data final/table_10 -- includes debt per acre in 1920.dta'
rajan_data <- read.dta(debt_path)
rajan_data <- data.frame(rajan_data[c('fips', 'debt_acre_1920')])
panel_original$fips <- panel_original$FIPS.code
library(sqldf)
panel_original <- sqldf('Select a.*, b.debt_acre_1920 FROM
panel_original AS a LEFT JOIN
rajan_data AS b ON (a.fips = b.fips)')
###################### merge industries with crosswalk #############################
industry_csv = '/Users/Adam/Research/BA_Thesis/Data final/Industry Crosswalk.xlsx'
industry_data = read.xlsx(industry_csv, 1)
industry_data$industry_code <- as.character(industry_data$Code)
industry_data$ext_finance <- as.numeric(industry_data$Matural.Company.Financial.Dependence)
industry_data <- data.frame(industry_data[c('industry_code', 'Industry', 'ext_finance')])
panel_original$industry_code <- as.character(panel_original$`Unnamed:.21`)
panel_original <- sqldf('Select a.*, b.Industry, b.ext_finance FROM
panel_original AS a LEFT JOIN
industry_data AS b ON (a.industry_code = b.industry_code)')
#################################### CPI ##################
#CPI <- read.xlsx("/Users/Adam/Research/BA_Thesis/Data/CPI Unadjusted,annual,index units.xlsx")
#panel_original <- sqldf('Select a.*, b.CPI FROM
#panel_original AS a LEFT JOIN
#CPI AS b ON (a.Year = b.Year)')
################################ lets get crackin #################################
panel <- data.frame(panel_original[c('Year', 'County', 'firm.code', 'Value.of.product', 'varying_iv', 'fixed_char', 'open_29', 'Post_1929', 'debt',
'Wage.earners.by.month,.January', 'Wage.earners.by.month,.February', 'Wage.earners.by.month,.March',
'Wage.earners.by.month,.April', 'Wage.earners.by.month,.May', 'Wage.earners.by.month,.June',
'Wage.earners.by.month,.July', 'Wage.earners.by.month,.August', 'Wage.earners.by.month,.September',
'Wage.earners.by.month,.October', 'Wage.earners.by.month,.November', 'Wage.earners.by.month,.December', "Unnamed:.21"
, 'debt_acre_1920', 'Industry', 'ext_finance')])
panel <- subset(panel, panel$open_29 == 1)
#duplicates section
#panel_elements <- panel[c('Year', 'firm.code')]
#dup <- data.frame(duplicated(panel_elements))
#new_panel = data.frame(panel, dup)
#panel <- new_panel[new_panel$duplicated.panel_elements. == F,]
#is.na(panel) <- sapply(panel, is.infinite)
panel[mapply(is.infinite, panel)] <- NA
# remove infintie and NAn
#panel <- panel[is.finite(rowSums(panel)),]
#panel <- panel[!rowSums(!is.finite(panel)),]
#y <- as.numeric(as.character(panel$Value.of.product))
#y[!is.na(y) & y > 0] <- log(y[!is.na(y) & y > 0])
#varying_iv <- as.numeric(panel$varying_iv)
panel$firm <- as.factor(panel$firm.code)
panel$county <- as.factor(panel$County)
panel$year <- as.factor(panel$Year)
panel$post <- as.numeric(panel$Post_1929)
panel$varying_iv <- as.numeric(panel$varying_iv)
panel$industry <- as.factor(panel$Industry)
#panel$CPI <- as.numeric(panel$CPI)
#panel$y <- y/panel$CPI
panel$varying_iv[27598] <- NA
#fixed_iv_model_y_noFixedEffects <- lm(y ~ post*fixed_char, data = panel)
#fixed_iv_model_y_YearCounty <- lm(y ~ post*fixed_char + year + county, data = panel)
#fixed_iv_model_y <- lm(y ~ post*fixed_char + year + county + industry, data = panel)
#library(sandwich)
#library(lmtest)
#library(multiwayvcov)
#fixed_iv_model_y_noFixedEffects <- coeftest(fixed_iv_model_y_noFixedEffects, vcov=vcovHC(fixed_iv_model_y_noFixedEffects,type="HC0",cluster="County"))
#fixed_iv_model_y_YearCounty <- coeftest(fixed_iv_model_y_YearCounty, vcov=vcovHC(fixed_iv_model_y_YearCounty,type="HC0",cluster="County"))
#fixed_iv_model_y <- coeftest(fixed_iv_model_y, vcov=vcovHC(fixed_iv_model_y,type="HC0",cluster="County"))
#varying_iv_model <-lm(y ~ varying_iv + year, data = panel)
#fixed_iv_model <- plm(y ~ post + fixed_char + post*fixed_char -1, data = panel, index = c('firm.code', 'Year'), model='within')
#varying_iv_model <-plm(y ~ varying_iv, data = panel, index = c('firm.code', 'Year'), model='within')
#summary(fixed_iv_model)
#summary(varying_iv_model)
panel$labor <- rep(NA, 34207)
panel$labor <- (as.numeric(panel$Wage.earners.by.month..January) + as.numeric(panel$Wage.earners.by.month..February) +
as.numeric(panel$Wage.earners.by.month..March) + as.numeric(panel$Wage.earners.by.month..April)+
as.numeric(panel$Wage.earners.by.month..May) + as.numeric(panel$Wage.earners.by.month..June) +
as.numeric(panel$Wage.earners.by.month..July) + as.numeric(panel$Wage.earners.by.month..August) +
as.numeric(panel$Wage.earners.by.month..September) + as.numeric(panel$Wage.earners.by.month..October)
+ as.numeric(panel$Wage.earners.by.month..November) + as.numeric(panel$Wage.earners.by.month..December))
panel$labor[!is.na(panel$labor) & panel$labor > 0] <- log(panel$labor[!is.na(panel$labor) & panel$labor > 0])
panel[mapply(is.infinite, panel)] <- NA
fixed_iv_model_no_FixedEffects <- lm(labor ~ post*fixed_char, data = panel)
fixed_iv_model_JustYearCounty <- lm(labor ~ post*fixed_char + year + county, data = panel)
fixed_iv_model_labor <- lm(labor ~ post*fixed_char + year + industry + county, data = panel)
summary(fixed_iv_model_labor)
varying_iv_model_labor <- lm(labor ~ varying_iv + year + industry + county, data = panel)
summary(varying_iv_model_labor)
library(sandwich)
library(lmtest)
library(multiwayvcov)
fixed_iv_model_labor <- coeftest(fixed_iv_model_labor, vcov=vcovHC(fixed_iv_model_labor,type="HC0",cluster="County"))
fixed_iv_model_no_FixedEffects <- coeftest(fixed_iv_model_no_FixedEffects, vcov=vcovHC(fixed_iv_model_no_FixedEffects,type="HC0",cluster="County"))
fixed_iv_model_JustYearCounty <- coeftest(fixed_iv_model_JustYearCounty, vcov=vcovHC(fixed_iv_model_JustYearCounty,type="HC0",cluster="County"))
varying_labor_robust <- coeftest(varying_iv_model_labor, vcov=vcovHC(varying_iv_model_labor,type="HC0",cluster="County"))
################################# make iv for robustness check ############
panel_robust <-subset(panel, panel$debt != "")
# no NA's for two stage
panel_robust <- panel_robust[complete.cases(panel_robust),] #5:6
panel_robust$debt <- as.numeric(panel_robust$debt)
#mortgage_debt[!is.na(mortgage_debt) & mortgage_debt > 0] <- log(mortgage_debt[!is.na(mortgage_debt) & mortgage_debt > 0])
panel_robust$debt_acre_1910 <- abs(panel_robust$debt - panel_robust$debt_acre_1920)
panel_robust$debt_normalized <- panel_robust$debt_acre_1920/panel_robust$debt_acre_1910
SLS1 <- lm(post*fixed_char ~ debt + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS1)
X_hat <- fitted(SLS1)
SLS2_labor <- lm(labor ~ X_hat + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS2_labor)
SLS1_norm <- lm(post*fixed_char ~ debt_normalized + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS1_norm)
X_hat_norm <- fitted(SLS1_norm)
SLS2_labor_norm <- lm(labor ~ X_hat_norm + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS2_labor_norm)
#panel_robust$instrument <- X_hat
#sign doesnt make sense...dont use
#SLS1_varying <- lm(varying_iv ~ debt + year + county + industry, data = panel_robust, na.action=na.omit)
#summary(SLS1_varying)
#X_hat_varying <- fitted(SLS1_varying)
#SLS2_labor_varying <- lm(labor ~ X_hat_varying + year + county + industry, data = panel_robust, na.action=na.omit)
#summary(SLS2_labor_varying)
library(sandwich)
library(lmtest)
library(multiwayvcov)
SLS1_labor_robust <- coeftest(SLS1, vcov=vcovHC(SLS1,type="HC0",cluster="County"))
SLS2_labor_robust <- coeftest(SLS2_labor, vcov=vcovHC(SLS2_labor,type="HC0",cluster="County"))
#output?
#SLS1_y <- lm(post*fixed_char ~ debt + year + county + industry, data = panel_robust, na.action=na.omit)
#summary(SLS1_y)
#X_hat <- fitted(SLS1_y)
#SLS2_y <- lm(y ~ X_hat + year + county + industry, data = panel_robust, na.action=na.omit)
#summary(SLS2_y)
##second IV
#panel_robust$debt_acre_1920 <- as.numeric(panel_robust$debt_acre_1920)
#SLS1 <- lm(post*fixed_char ~ debt_acre_1920 + year + county + industry, data = panel_robust, na.action=na.omit)
#summary(SLS1)
#X_hat <- fitted(SLS1)
#SLS2_labor_IV2 <- lm(labor ~ X_hat + year + county + industry -1, data = panel_robust, na.action=na.omit)
#summary(SLS2_labor_IV2)
#library(sandwich)
#library(lmtest)
#library(multiwayvcov)
#SLS2_labor_IV2_robust <- coeftest(SLS2_labor_IV2, vcov=vcovHC(SLS2_labor_IV2,type="HC0",cluster="County"))
########################## external financing by industry ###########################
#median is 0.04
high_ext_dependence <- subset(panel_robust, panel_robust$ext_finance > 0.04)
low_ext_dependence <- subset(panel_robust, panel_robust$ext_finance <= 0.04)
high_dep_SLS1 <- lm(post*fixed_char ~ debt + year + county + industry -1, data = high_ext_dependence, na.action=na.omit)
summary(high_dep_SLS1)
X_hat <- fitted(high_dep_SLS1)
high_dep_SLS2 <- lm(labor ~ X_hat + year + county + industry -1, data = high_ext_dependence, na.action=na.omit)
summary(high_dep_SLS2)
library(sandwich)
library(lmtest)
library(multiwayvcov)
high_ext_dep_labor <- coeftest(high_dep_SLS2, vcov=vcovHC(high_dep_SLS2,type="HC0",cluster="County"))
low_dep_SLS1 <- lm(post*fixed_char ~ debt + year + county + industry -1, data = low_ext_dependence, na.action=na.omit)
summary(low_dep_SLS1)
X_hat <- fitted(low_dep_SLS1)
low_dep_SLS2 <- lm(labor ~ X_hat + year + county + industry -1, data = low_ext_dependence, na.action=na.omit)
summary(low_dep_SLS2)
library(sandwich)
library(lmtest)
library(multiwayvcov)
low_ext_dep_labor <- coeftest(low_dep_SLS2, vcov=vcovHC(low_dep_SLS2,type="HC0",cluster="County"))
############# WALD test #############
# get variances of each model, after clustering at county level
vcov_high=vcovHC(high_dep_SLS2,type="HC0",cluster="County")
high_var <- vcov_high[c('X_hat', 'X_hat'),c('X_hat', 'X_hat')][1,1]
high_coeff<- summary(high_dep_SLS2)$coefficients[1,]
vcov_low=vcovHC(low_dep_SLS2,type="HC0",cluster="County")
low_var <- vcov_low[c('X_hat', 'X_hat'),c('X_hat', 'X_hat')][1,1]
low_coeff<- summary(low_dep_SLS2)$coefficients[1,]
covar <- cov(high_coeff, low_coeff) #is this correct????
se <- sqrt(high_var + low_var -2*covar)
wald.z <- (summary(high_dep_SLS2)$coefficients[1,1] - summary(low_dep_SLS2)$coefficients[1,1])/se
p_wald <- 2*pnorm(wald.z)
################better approach###############
panel$dependence_dummy <- ifelse((panel$ext_finance > 0.04), 1, ifelse(panel$ext_finance <= 0.04 , 0, 0))
panel_robust$dependence_dummy <- ifelse((panel_robust$ext_finance > 0.04), 1, ifelse(panel_robust$ext_finance <= 0.04 , 0, 0))
fixed_iv_model_labor_dependence <- lm(labor ~ dependence_dummy*post*fixed_char + year + county + industry, data = panel)
SLS1_dependence <- lm(dependence_dummy*post*fixed_char ~ debt + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS1_dependence)
X_hat <- fitted(SLS1_dependence)
SLS2_dependence <- lm(labor ~ dependence_dummy*X_hat + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS2_dependence)
library(sandwich)
library(lmtest)
library(multiwayvcov)
fixed_iv_model_labor_dependence <- coeftest(fixed_iv_model_labor_dependence, vcov=vcovHC(fixed_iv_model_labor_dependence,type="HC0",cluster="County"))
ext_dependence_model <- coeftest(SLS2_dependence, vcov=vcovHC(SLS2_dependence,type="HC0",cluster="County"))
#WALD test etc
#dummy for high versus low - add dummy*X_hat as extra or interact with all other variables as well
#heterogeniety analysis -- bunch of figures x axis financial dependence, and y axis coefficient of interest
#
########## fun with tables #######################
#ggplot(data = panel, aes(x=panel$year, y=panel$labor)) + geom_point(aes(colour = factor(panel$industry)), size = 4) + xlab("Year") + ylab("Labor") + ggtitle("Scatterplot of Labor During Great Depression by industry")
#ggplot(data = Panel, aes(x=Panel$alt_iv, y=Panel$Total.cost.of.materials..fuel..and.electric.cost.sum.of.f001..f002..f003.)) + geom_point() + stat_smooth(method = "lm", col = "red") + xlab("Bank Distress") + ylab("Capital") + ggtitle("Scatterplot of Capital vs. Bank Distress (with OLS fit line)")
#ggplot(data = Panel, aes(x=Panel$alt_iv, y=Panel$Total.value.of.products)) + geom_point() + stat_smooth(method = "lm", col = "red") + xlab("Bank Distress") + ylab("Output") + ggtitle("Scatterplot of Output vs. Bank Distress (with OLS fit line)")
table1 <- stargazer(fixed_iv_model_no_FixedEffects, fixed_iv_model_JustYearCounty,
fixed_iv_model_labor,title="Fixed Effects", align=TRUE)
table2 <- stargazer(SLS1_labor_robust, SLS2_labor_robust, title="Instrumental Variables", align=TRUE)
table3 <- stargazer(fixed_iv_model_labor_dependence, title="Dependence on External Finance", align=TRUE)
#table3 <- stargazer(fixed_iv_model_y_noFixedEffects, fixed_iv_model_y_YearCounty,
#fixed_iv_model_y,title="Fixed Effects -- Total Value Added", align=TRUE)
################################################### fun with graphs ##############################################
############### spatial map #################################
install.packages("mapproj")
install.packages("ggmap")
install.packages("DeducerSpatial")
#make sure the packages are running
require(maps)
require(ggmap)
par(mfrow = c(2, 1))
#map("usa")
data(county.fips)
# Plot unemployment by country
#colors = c("#F1EEF6", "#D4B9DA", "#C994C7", "#DF65B0", "#DD1C77", "#980043")
colors = c("#C994C7", "#DF65B0", "#DD1C77", "#980043")
maps_data <- panel_original[c('fips', 'fixed_char')]
maps_data$log_bank_distress <- abs(as.numeric(log(maps_data$fixed_char)))
maps_data$log_bank_distress[!is.na(maps_data$log_bank_distress) & maps_data$log_bank_distress > 0] <- log(maps_data$log_bank_distress[!is.na(maps_data$log_bank_distress) & maps_data$log_bank_distress > 0])
maps_data[mapply(is.infinite, maps_data)] <- 0
maps_data$colorBuckets <- as.numeric(cut(maps_data$log_bank_distress, breaks = 4))
colorsmatched <- maps_data$colorBuckets[match(county.fips$fips, maps_data$fips)]
colorsmatched[is.na(colorsmatched)] <- 1
map("county", col = colors[colorsmatched], fill = TRUE, resolution = 0, lty = 0, projection = "polyconic")
map("state", col = "white", fill = FALSE, add = TRUE, lty = 1, lwd = 0.2, projection = "polyconic")
title("Bank Distress by County, 1929-1935")
leg.txt <- c("1st Quartile", "2nd Quartile", "3rd Quartile", "4th Quartile")
legend("top", leg.txt,horiz = TRUE,fill = colors, cex = 0.45)
######################## changes in revenue and labor by industry graphs ################
CPI <- read.xlsx("/Users/Adam/Research/BA_Thesis/Data/CPI Unadjusted,annual,index units.xlsx")
panel_changes <- sqldf('Select a.*, b.CPI FROM panel_original AS a LEFT JOIN CPI AS b ON (a.Year = b.Year)')
panel_changes <- subset(panel_changes, panel_changes$open_29 == 1 & panel_changes$open_33 ==1 & Year!=1935)
panel_changes$CPI <- as.numeric(panel_changes$CPI)
y <- as.numeric(as.character(panel_changes$Value.of.product))
#y[!is.na(y) & y > 0] <- log(y[!is.na(y) & y > 0])
panel_changes$y <- y/panel_changes$CPI
panel_changes$y <- panel_changes$y * 1/1000
#panel_original_revenue[mapply(is.infinite, panel_original_revenue)] <- NA
#panel_changes[is.na(panel_changes)] <- 0
#panel_original_revenue$dependence_dummy <- ifelse((panel_original_revenue$ext_finance > 0.04), 1, ifelse(panel_original_revenue$ext_finance <= 0.04 , 0, 0))
panel_changes$labor <- (as.numeric(panel_changes[,'Wage.earners.by.month,.January']) + as.numeric(panel_changes[,'Wage.earners.by.month,.February']) +
as.numeric(panel_changes[,'Wage.earners.by.month,.March']) + as.numeric(panel_changes[,'Wage.earners.by.month,.April'])+
as.numeric(panel_changes[,'Wage.earners.by.month,.May']) + as.numeric(panel_changes[,'Wage.earners.by.month,.June']) +
as.numeric(panel_changes[,'Wage.earners.by.month,.July']) + as.numeric(panel_changes[,'Wage.earners.by.month,.August']) +
as.numeric(panel_changes[,'Wage.earners.by.month,.September']) + as.numeric(panel_changes[,'Wage.earners.by.month,.October'])
+ as.numeric(panel_changes[,'Wage.earners.by.month,.November']) + as.numeric(panel_changes[,'Wage.earners.by.month,.December']))
panel_changes$labor <- panel_changes$labor * 1/1000
#rev_data <- data.frame(ddply(panel_original_revenue, .(Industry, Year,ext_finance), summarize, Average_rev=mean(y)))
#rev_data$dependence_dummy <- as.integer(rev_data$dependence_dummy)
#rev_data$Industry <- as.factor(rev_data$Industry)
#rev_data$Year <- as.integer(rev_data$Year)
#ggplot(data =subset(rev_data, ext_finance < -0.10 & ext_finance >= -0.12), aes(x=Year, y=Average_rev, group=Industry, shape=Industry, color=Industry)) +geom_line() + geom_point()+ xlab("Year") + ylab("Revenue, Adjusted for Inflation (hundred thousands)") + ggtitle("Average Firm Revenue by Industry, 1929-1935")
#ggplot(data =subset(rev_data, ext_finance == -0.10), aes(x=Year, y=Average_rev, group=Industry, shape=Industry, color=Industry)) +geom_line() + geom_point()+ xlab("Year") + ylab("Revenue, Adjusted for Inflation (hundred thousands)") + ggtitle("Average Firm Revenue by Industry, 1929-1935")
#ggplot(data =subset(rev_data, ext_finance <= 0.14 & ext_finance > -0.10), aes(x=Year, y=Average_rev, group=Industry, shape=Industry, color=Industry)) +geom_line() + geom_point()+ xlab("Year") + ylab("Revenue, Adjusted for Inflation (hundred thousands)") + ggtitle("Average Firm Revenue by Industry, 1929-1935")
#ggplot(data =subset(rev_data, ext_finance <=0.39 & ext_finance > 0.14), aes(x=Year, y=Average_rev, group=Industry, shape=Industry, color=Industry)) +geom_line() + geom_point()+ xlab("Year") + ylab("Revenue, Adjusted for Inflation (hundred thousands)") + ggtitle("Average Firm Revenue by Industry, 1929-1935")
#ggplot(data =subset(rev_data, ext_finance< 0.4), aes(x=Industry, y=Average_rev, fill=Industry)) + xlab("Industry") + ylab("Revenue, Adjusted for Inflation (hundred thousands)") + ggtitle("Average Firm Revenue by Industry, 1929-1935") +geom_bar(stat="identity", position=position_dodge())
panel_changes<-panel_changes[order(panel_changes$firm.code,panel_changes$Year),]
panel_changes$y_diff <-panel_changes$y - lag(lag(panel_changes$y))
panel_changes$l_diff <- panel_changes$labor - lag(lag(panel_changes$labor))
#View(Panel_diff)
for (i in 3:nrow(panel_changes)){
if (panel_changes$firm.code[i]!=panel_changes$firm.code[i-2]) {
panel_changes$y_diff[i] <- NA
panel_changes$l_diff[i] <- NA
}
}
panel_changes[is.na(panel_changes)] <- 0
rev_change_data <- data.frame(ddply(panel_changes, .(Industry), summarize, Average_change=mean(y_diff)))
ggplot(data =rev_change_data, aes(x=Industry, y=Average_change, fill=Industry)) + xlab("Industry") + ylab("Average Change in Revenue, 1933-1929, Adjusted for Inflation (in thousands)") + ggtitle("Average Change in Revenue by Industry, 1933-1929") +geom_bar(stat="identity", position=position_dodge(0.9)) + geom_text(aes(label=Industry), size = 3,angle = 90) + theme(axis.text.x=element_blank())
labor_change_data <- data.frame(ddply(panel_changes, .(Industry), summarize, Average_change=mean(l_diff)))
ggplot(data =labor_change_data, aes(x=Industry, y=Average_change, fill=Industry)) + xlab("Industry") + ylab("Average Change in Wage Earners Hired, 1933-1929 (in thousands)") + ggtitle("Average Change in Employment by Industry, 1933-1929") +geom_bar(stat="identity", position=position_dodge(0.9)) + geom_text(aes(label=Industry), size = 3,angle = 90) + theme(axis.text.x=element_blank())
################## firm exit graph #########
panel_exit <- panel_original
est_change_data <- data.frame(ddply(subset(panel_exit, open_29 ==1),~Year,summarise,est_count=length(unique(firm.code))))
entry_29 <- length(unique(subset(panel_exit, panel_exit$open_29 ==1)$firm.code))
entry_31 <- length(unique(subset(panel_exit, panel_exit$open_29 ==0 & panel_exit$open_31 ==1 )$firm.code))
entry_33 <- length(unique(subset(panel_exit, panel_exit$open_29 ==0 & panel_exit$open_31 ==0 &panel_exit$open_33 ==1)$firm.code))
entry_35 <- length(unique(subset(panel_exit, panel_exit$open_29 ==0 & panel_exit$open_31 ==0 &panel_exit$open_33 ==0 & panel_exit$open_35 ==1)$firm.code))
entry <- c(entry_29, entry_31, entry_33, entry_35)
year_entry <- c(1929, 1931, 1933, 1935)
entry_data <- data.frame(entry, year_entry)
ggplot(data =est_change_data, aes(x=Year, y=est_count)) +geom_line() + geom_point()+ xlab("Year") + ylab("Number of Establishments") + ggtitle("Number of Firms Per Year, 1929-1935")
################### Coefficient by industry #####################
library(sandwich)
library(lmtest)
library(multiwayvcov)
d <- data.frame(matrix(NA, nrow = 22, ncol = 3))
for(i in 1:length(levels(panel$industry)))
{
new_industry <- levels(panel$industry)[i]
fixed_iv_model_labor <- lm(labor ~ post*fixed_char + year + county, data = subset(panel, panel$industry == new_industry))
fixed_iv_model_labor <- coeftest(fixed_iv_model_labor, vcov=vcovHC(fixed_iv_model_labor,type="HC0",cluster="County"))
#coeff<- tail(fixed_iv_model_labor[,1],1)
#print(c(levels(panel$industry)[i], coeff))
coeff<- fixed_iv_model_labor[3,1]
d[i,] <- c(levels(panel$industry)[i], as.numeric(as.character(coeff)), head(panel$ext_finance[panel$industry == new_industry]))
}
d$coeff <- as.numeric(d$X2)
ggplot(data =d, aes(x=X3, y=coeff, group=1)) + geom_point()+ geom_smooth(method='lm')+ xlab("External Financial Dependence") + ylab("Bank Distress Coefficient") + ggtitle("Effects of Bank Distress on Employment v. External Financial Dependence by Industry")
########################## Summary Statistics #####################
############ banking stats ##################
fdic_data <- read.xlsx("/Users/Adam/Research/BA_Thesis/Data/FDIC_check.xlsx")
fdic_data$Year <- as.integer(fdic_data$Year)
fdic_data <- subset(fdic_data, Year == 1929 |Year == 1930 | Year == 1931 | Year == 1932 |Year == 1933)
central_data <- subset(fdic_data, State == 'ohio' | State == 'illinois' | State =='indiana' | State == 'michigan' | State=='wisconsin')
mid_atlantic_data <- subset(fdic_data, State == 'new york' | State == 'new jersey' | State =='pennsylvania')
mountain_data <- subset(fdic_data, State == 'montana' | State == 'idaho' | State =='wyoming' | State == 'colorado' | State=='new mexico' | State=='arizona' | State=='utah'| State=='nevada')
new_england_data <- subset(fdic_data, State == 'maine' | State == 'new hampshire' | State =='vermont' | State == 'massachusetts' | State=='rhode island' | State=='connecticut')
northwestern_data <- subset(fdic_data, State == 'minnesota' | State == 'iowa' | State =='missouri' | State == 'north dakota' | State=='south dakota' | State=='nebraska' | State=='kansas')
pacific_data <- subset(fdic_data, State == 'washington' | State == 'oregon' | State =='california')
south_atlantic_data <- subset(fdic_data, State == 'maryland' | State == 'delaware' | State =='district of columbia'| State =='virginia'| State =='west virginia' | State =='north carolina'| State =='south carolina'| State =='georgia'| State =='florida')
south_central_data <- subset(fdic_data, State == 'kentucky' | State == 'tennessee' | State =='alabama' | State =='mississippi' | State =='arkansas'| State =='oklahoma'| State =='louisiana'| State =='texas')
central_data <- unique(central_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
mid_atlantic_data <- unique(mid_atlantic_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
mountain_data <- unique(mountain_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
new_england_data <- unique(new_england_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
northwestern_data <- unique(northwestern_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
pacific_data <- unique(pacific_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
south_atlantic_data <- unique(south_atlantic_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
south_central_data <- unique(south_central_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
central_data_sum <- data.frame(ddply(central_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
mid_atlantic_data_sum <- data.frame(ddply(mid_atlantic_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
mountain_data_sum <- data.frame(ddply(mountain_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
new_england_sum <- data.frame(ddply(new_england_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
northwestern_sum <- data.frame(ddply(northwestern_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
pacific_sum <- data.frame(ddply(pacific_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
south_atlantic_sum <- data.frame(ddply(south_atlantic_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
south_central_sum <- data.frame(ddply(south_central_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
####################### census stats ########################
count(unique(panel_original$firm.code))
count(unique(panel_original[c('County','State')]))
| /Code/Regression final.R | no_license | asudit/BA_Thesis | R | false | false | 28,427 | r | rm(list=ls())
setwd("~/Users/Adam/Research/BA_Thesis/Data")
library("openxlsx")
library(plm)
library(foreign, gplots)
library(dplyr)
library(lazyeval) #for the group by function
library(ggplot2)
#library(micEconCES)
library(logistf)
library(stargazer)
library(biglm)
#panel_original <- read.xlsx("/Users/Adam/Research/BA_Thesis/Data final/regression_data edits ok.xlsx", 1)
panel_original <- read.xlsx("/Users/Adam/Research/BA_Thesis/Data final/regression_data_changedIV.xlsx", 1)
####################### merge the other debt figure from Rajan - ugh! ##############
debt_path = '/Users/Adam/Research/BA_Thesis/Data final/table_10 -- includes debt per acre in 1920.dta'
rajan_data <- read.dta(debt_path)
rajan_data <- data.frame(rajan_data[c('fips', 'debt_acre_1920')])
panel_original$fips <- panel_original$FIPS.code
library(sqldf)
panel_original <- sqldf('Select a.*, b.debt_acre_1920 FROM
panel_original AS a LEFT JOIN
rajan_data AS b ON (a.fips = b.fips)')
###################### merge industries with crosswalk #############################
industry_csv = '/Users/Adam/Research/BA_Thesis/Data final/Industry Crosswalk.xlsx'
industry_data = read.xlsx(industry_csv, 1)
industry_data$industry_code <- as.character(industry_data$Code)
industry_data$ext_finance <- as.numeric(industry_data$Matural.Company.Financial.Dependence)
industry_data <- data.frame(industry_data[c('industry_code', 'Industry', 'ext_finance')])
panel_original$industry_code <- as.character(panel_original$`Unnamed:.21`)
panel_original <- sqldf('Select a.*, b.Industry, b.ext_finance FROM
panel_original AS a LEFT JOIN
industry_data AS b ON (a.industry_code = b.industry_code)')
#################################### CPI ##################
#CPI <- read.xlsx("/Users/Adam/Research/BA_Thesis/Data/CPI Unadjusted,annual,index units.xlsx")
#panel_original <- sqldf('Select a.*, b.CPI FROM
#panel_original AS a LEFT JOIN
#CPI AS b ON (a.Year = b.Year)')
################################ lets get crackin #################################
panel <- data.frame(panel_original[c('Year', 'County', 'firm.code', 'Value.of.product', 'varying_iv', 'fixed_char', 'open_29', 'Post_1929', 'debt',
'Wage.earners.by.month,.January', 'Wage.earners.by.month,.February', 'Wage.earners.by.month,.March',
'Wage.earners.by.month,.April', 'Wage.earners.by.month,.May', 'Wage.earners.by.month,.June',
'Wage.earners.by.month,.July', 'Wage.earners.by.month,.August', 'Wage.earners.by.month,.September',
'Wage.earners.by.month,.October', 'Wage.earners.by.month,.November', 'Wage.earners.by.month,.December', "Unnamed:.21"
, 'debt_acre_1920', 'Industry', 'ext_finance')])
panel <- subset(panel, panel$open_29 == 1)
#duplicates section
#panel_elements <- panel[c('Year', 'firm.code')]
#dup <- data.frame(duplicated(panel_elements))
#new_panel = data.frame(panel, dup)
#panel <- new_panel[new_panel$duplicated.panel_elements. == F,]
#is.na(panel) <- sapply(panel, is.infinite)
panel[mapply(is.infinite, panel)] <- NA
# remove infintie and NAn
#panel <- panel[is.finite(rowSums(panel)),]
#panel <- panel[!rowSums(!is.finite(panel)),]
#y <- as.numeric(as.character(panel$Value.of.product))
#y[!is.na(y) & y > 0] <- log(y[!is.na(y) & y > 0])
#varying_iv <- as.numeric(panel$varying_iv)
panel$firm <- as.factor(panel$firm.code)
panel$county <- as.factor(panel$County)
panel$year <- as.factor(panel$Year)
panel$post <- as.numeric(panel$Post_1929)
panel$varying_iv <- as.numeric(panel$varying_iv)
panel$industry <- as.factor(panel$Industry)
#panel$CPI <- as.numeric(panel$CPI)
#panel$y <- y/panel$CPI
panel$varying_iv[27598] <- NA
#fixed_iv_model_y_noFixedEffects <- lm(y ~ post*fixed_char, data = panel)
#fixed_iv_model_y_YearCounty <- lm(y ~ post*fixed_char + year + county, data = panel)
#fixed_iv_model_y <- lm(y ~ post*fixed_char + year + county + industry, data = panel)
#library(sandwich)
#library(lmtest)
#library(multiwayvcov)
#fixed_iv_model_y_noFixedEffects <- coeftest(fixed_iv_model_y_noFixedEffects, vcov=vcovHC(fixed_iv_model_y_noFixedEffects,type="HC0",cluster="County"))
#fixed_iv_model_y_YearCounty <- coeftest(fixed_iv_model_y_YearCounty, vcov=vcovHC(fixed_iv_model_y_YearCounty,type="HC0",cluster="County"))
#fixed_iv_model_y <- coeftest(fixed_iv_model_y, vcov=vcovHC(fixed_iv_model_y,type="HC0",cluster="County"))
#varying_iv_model <-lm(y ~ varying_iv + year, data = panel)
#fixed_iv_model <- plm(y ~ post + fixed_char + post*fixed_char -1, data = panel, index = c('firm.code', 'Year'), model='within')
#varying_iv_model <-plm(y ~ varying_iv, data = panel, index = c('firm.code', 'Year'), model='within')
#summary(fixed_iv_model)
#summary(varying_iv_model)
panel$labor <- rep(NA, 34207)
panel$labor <- (as.numeric(panel$Wage.earners.by.month..January) + as.numeric(panel$Wage.earners.by.month..February) +
as.numeric(panel$Wage.earners.by.month..March) + as.numeric(panel$Wage.earners.by.month..April)+
as.numeric(panel$Wage.earners.by.month..May) + as.numeric(panel$Wage.earners.by.month..June) +
as.numeric(panel$Wage.earners.by.month..July) + as.numeric(panel$Wage.earners.by.month..August) +
as.numeric(panel$Wage.earners.by.month..September) + as.numeric(panel$Wage.earners.by.month..October)
+ as.numeric(panel$Wage.earners.by.month..November) + as.numeric(panel$Wage.earners.by.month..December))
panel$labor[!is.na(panel$labor) & panel$labor > 0] <- log(panel$labor[!is.na(panel$labor) & panel$labor > 0])
panel[mapply(is.infinite, panel)] <- NA
fixed_iv_model_no_FixedEffects <- lm(labor ~ post*fixed_char, data = panel)
fixed_iv_model_JustYearCounty <- lm(labor ~ post*fixed_char + year + county, data = panel)
fixed_iv_model_labor <- lm(labor ~ post*fixed_char + year + industry + county, data = panel)
summary(fixed_iv_model_labor)
varying_iv_model_labor <- lm(labor ~ varying_iv + year + industry + county, data = panel)
summary(varying_iv_model_labor)
library(sandwich)
library(lmtest)
library(multiwayvcov)
fixed_iv_model_labor <- coeftest(fixed_iv_model_labor, vcov=vcovHC(fixed_iv_model_labor,type="HC0",cluster="County"))
fixed_iv_model_no_FixedEffects <- coeftest(fixed_iv_model_no_FixedEffects, vcov=vcovHC(fixed_iv_model_no_FixedEffects,type="HC0",cluster="County"))
fixed_iv_model_JustYearCounty <- coeftest(fixed_iv_model_JustYearCounty, vcov=vcovHC(fixed_iv_model_JustYearCounty,type="HC0",cluster="County"))
varying_labor_robust <- coeftest(varying_iv_model_labor, vcov=vcovHC(varying_iv_model_labor,type="HC0",cluster="County"))
################################# make iv for robustness check ############
panel_robust <-subset(panel, panel$debt != "")
# no NA's for two stage
panel_robust <- panel_robust[complete.cases(panel_robust),] #5:6
panel_robust$debt <- as.numeric(panel_robust$debt)
#mortgage_debt[!is.na(mortgage_debt) & mortgage_debt > 0] <- log(mortgage_debt[!is.na(mortgage_debt) & mortgage_debt > 0])
panel_robust$debt_acre_1910 <- abs(panel_robust$debt - panel_robust$debt_acre_1920)
panel_robust$debt_normalized <- panel_robust$debt_acre_1920/panel_robust$debt_acre_1910
SLS1 <- lm(post*fixed_char ~ debt + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS1)
X_hat <- fitted(SLS1)
SLS2_labor <- lm(labor ~ X_hat + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS2_labor)
SLS1_norm <- lm(post*fixed_char ~ debt_normalized + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS1_norm)
X_hat_norm <- fitted(SLS1_norm)
SLS2_labor_norm <- lm(labor ~ X_hat_norm + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS2_labor_norm)
#panel_robust$instrument <- X_hat
#sign doesnt make sense...dont use
#SLS1_varying <- lm(varying_iv ~ debt + year + county + industry, data = panel_robust, na.action=na.omit)
#summary(SLS1_varying)
#X_hat_varying <- fitted(SLS1_varying)
#SLS2_labor_varying <- lm(labor ~ X_hat_varying + year + county + industry, data = panel_robust, na.action=na.omit)
#summary(SLS2_labor_varying)
library(sandwich)
library(lmtest)
library(multiwayvcov)
SLS1_labor_robust <- coeftest(SLS1, vcov=vcovHC(SLS1,type="HC0",cluster="County"))
SLS2_labor_robust <- coeftest(SLS2_labor, vcov=vcovHC(SLS2_labor,type="HC0",cluster="County"))
#output?
#SLS1_y <- lm(post*fixed_char ~ debt + year + county + industry, data = panel_robust, na.action=na.omit)
#summary(SLS1_y)
#X_hat <- fitted(SLS1_y)
#SLS2_y <- lm(y ~ X_hat + year + county + industry, data = panel_robust, na.action=na.omit)
#summary(SLS2_y)
##second IV
#panel_robust$debt_acre_1920 <- as.numeric(panel_robust$debt_acre_1920)
#SLS1 <- lm(post*fixed_char ~ debt_acre_1920 + year + county + industry, data = panel_robust, na.action=na.omit)
#summary(SLS1)
#X_hat <- fitted(SLS1)
#SLS2_labor_IV2 <- lm(labor ~ X_hat + year + county + industry -1, data = panel_robust, na.action=na.omit)
#summary(SLS2_labor_IV2)
#library(sandwich)
#library(lmtest)
#library(multiwayvcov)
#SLS2_labor_IV2_robust <- coeftest(SLS2_labor_IV2, vcov=vcovHC(SLS2_labor_IV2,type="HC0",cluster="County"))
########################## external financing by industry ###########################
#median is 0.04
high_ext_dependence <- subset(panel_robust, panel_robust$ext_finance > 0.04)
low_ext_dependence <- subset(panel_robust, panel_robust$ext_finance <= 0.04)
high_dep_SLS1 <- lm(post*fixed_char ~ debt + year + county + industry -1, data = high_ext_dependence, na.action=na.omit)
summary(high_dep_SLS1)
X_hat <- fitted(high_dep_SLS1)
high_dep_SLS2 <- lm(labor ~ X_hat + year + county + industry -1, data = high_ext_dependence, na.action=na.omit)
summary(high_dep_SLS2)
library(sandwich)
library(lmtest)
library(multiwayvcov)
high_ext_dep_labor <- coeftest(high_dep_SLS2, vcov=vcovHC(high_dep_SLS2,type="HC0",cluster="County"))
low_dep_SLS1 <- lm(post*fixed_char ~ debt + year + county + industry -1, data = low_ext_dependence, na.action=na.omit)
summary(low_dep_SLS1)
X_hat <- fitted(low_dep_SLS1)
low_dep_SLS2 <- lm(labor ~ X_hat + year + county + industry -1, data = low_ext_dependence, na.action=na.omit)
summary(low_dep_SLS2)
library(sandwich)
library(lmtest)
library(multiwayvcov)
low_ext_dep_labor <- coeftest(low_dep_SLS2, vcov=vcovHC(low_dep_SLS2,type="HC0",cluster="County"))
############# WALD test #############
# get variances of each model, after clustering at county level
vcov_high=vcovHC(high_dep_SLS2,type="HC0",cluster="County")
high_var <- vcov_high[c('X_hat', 'X_hat'),c('X_hat', 'X_hat')][1,1]
high_coeff<- summary(high_dep_SLS2)$coefficients[1,]
vcov_low=vcovHC(low_dep_SLS2,type="HC0",cluster="County")
low_var <- vcov_low[c('X_hat', 'X_hat'),c('X_hat', 'X_hat')][1,1]
low_coeff<- summary(low_dep_SLS2)$coefficients[1,]
covar <- cov(high_coeff, low_coeff) #is this correct????
se <- sqrt(high_var + low_var -2*covar)
wald.z <- (summary(high_dep_SLS2)$coefficients[1,1] - summary(low_dep_SLS2)$coefficients[1,1])/se
p_wald <- 2*pnorm(wald.z)
################better approach###############
panel$dependence_dummy <- ifelse((panel$ext_finance > 0.04), 1, ifelse(panel$ext_finance <= 0.04 , 0, 0))
panel_robust$dependence_dummy <- ifelse((panel_robust$ext_finance > 0.04), 1, ifelse(panel_robust$ext_finance <= 0.04 , 0, 0))
fixed_iv_model_labor_dependence <- lm(labor ~ dependence_dummy*post*fixed_char + year + county + industry, data = panel)
SLS1_dependence <- lm(dependence_dummy*post*fixed_char ~ debt + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS1_dependence)
X_hat <- fitted(SLS1_dependence)
SLS2_dependence <- lm(labor ~ dependence_dummy*X_hat + year + county + industry, data = panel_robust, na.action=na.omit)
summary(SLS2_dependence)
library(sandwich)
library(lmtest)
library(multiwayvcov)
fixed_iv_model_labor_dependence <- coeftest(fixed_iv_model_labor_dependence, vcov=vcovHC(fixed_iv_model_labor_dependence,type="HC0",cluster="County"))
ext_dependence_model <- coeftest(SLS2_dependence, vcov=vcovHC(SLS2_dependence,type="HC0",cluster="County"))
#WALD test etc
#dummy for high versus low - add dummy*X_hat as extra or interact with all other variables as well
#heterogeniety analysis -- bunch of figures x axis financial dependence, and y axis coefficient of interest
#
########## fun with tables #######################
#ggplot(data = panel, aes(x=panel$year, y=panel$labor)) + geom_point(aes(colour = factor(panel$industry)), size = 4) + xlab("Year") + ylab("Labor") + ggtitle("Scatterplot of Labor During Great Depression by industry")
#ggplot(data = Panel, aes(x=Panel$alt_iv, y=Panel$Total.cost.of.materials..fuel..and.electric.cost.sum.of.f001..f002..f003.)) + geom_point() + stat_smooth(method = "lm", col = "red") + xlab("Bank Distress") + ylab("Capital") + ggtitle("Scatterplot of Capital vs. Bank Distress (with OLS fit line)")
#ggplot(data = Panel, aes(x=Panel$alt_iv, y=Panel$Total.value.of.products)) + geom_point() + stat_smooth(method = "lm", col = "red") + xlab("Bank Distress") + ylab("Output") + ggtitle("Scatterplot of Output vs. Bank Distress (with OLS fit line)")
table1 <- stargazer(fixed_iv_model_no_FixedEffects, fixed_iv_model_JustYearCounty,
fixed_iv_model_labor,title="Fixed Effects", align=TRUE)
table2 <- stargazer(SLS1_labor_robust, SLS2_labor_robust, title="Instrumental Variables", align=TRUE)
table3 <- stargazer(fixed_iv_model_labor_dependence, title="Dependence on External Finance", align=TRUE)
#table3 <- stargazer(fixed_iv_model_y_noFixedEffects, fixed_iv_model_y_YearCounty,
#fixed_iv_model_y,title="Fixed Effects -- Total Value Added", align=TRUE)
################################################### fun with graphs ##############################################
############### spatial map #################################
install.packages("mapproj")
install.packages("ggmap")
install.packages("DeducerSpatial")
#make sure the packages are running
require(maps)
require(ggmap)
par(mfrow = c(2, 1))
#map("usa")
data(county.fips)
# Plot unemployment by country
#colors = c("#F1EEF6", "#D4B9DA", "#C994C7", "#DF65B0", "#DD1C77", "#980043")
colors = c("#C994C7", "#DF65B0", "#DD1C77", "#980043")
maps_data <- panel_original[c('fips', 'fixed_char')]
maps_data$log_bank_distress <- abs(as.numeric(log(maps_data$fixed_char)))
maps_data$log_bank_distress[!is.na(maps_data$log_bank_distress) & maps_data$log_bank_distress > 0] <- log(maps_data$log_bank_distress[!is.na(maps_data$log_bank_distress) & maps_data$log_bank_distress > 0])
maps_data[mapply(is.infinite, maps_data)] <- 0
maps_data$colorBuckets <- as.numeric(cut(maps_data$log_bank_distress, breaks = 4))
colorsmatched <- maps_data$colorBuckets[match(county.fips$fips, maps_data$fips)]
colorsmatched[is.na(colorsmatched)] <- 1
map("county", col = colors[colorsmatched], fill = TRUE, resolution = 0, lty = 0, projection = "polyconic")
map("state", col = "white", fill = FALSE, add = TRUE, lty = 1, lwd = 0.2, projection = "polyconic")
title("Bank Distress by County, 1929-1935")
leg.txt <- c("1st Quartile", "2nd Quartile", "3rd Quartile", "4th Quartile")
legend("top", leg.txt,horiz = TRUE,fill = colors, cex = 0.45)
######################## changes in revenue and labor by industry graphs ################
CPI <- read.xlsx("/Users/Adam/Research/BA_Thesis/Data/CPI Unadjusted,annual,index units.xlsx")
panel_changes <- sqldf('Select a.*, b.CPI FROM panel_original AS a LEFT JOIN CPI AS b ON (a.Year = b.Year)')
panel_changes <- subset(panel_changes, panel_changes$open_29 == 1 & panel_changes$open_33 ==1 & Year!=1935)
panel_changes$CPI <- as.numeric(panel_changes$CPI)
y <- as.numeric(as.character(panel_changes$Value.of.product))
#y[!is.na(y) & y > 0] <- log(y[!is.na(y) & y > 0])
panel_changes$y <- y/panel_changes$CPI
panel_changes$y <- panel_changes$y * 1/1000
#panel_original_revenue[mapply(is.infinite, panel_original_revenue)] <- NA
#panel_changes[is.na(panel_changes)] <- 0
#panel_original_revenue$dependence_dummy <- ifelse((panel_original_revenue$ext_finance > 0.04), 1, ifelse(panel_original_revenue$ext_finance <= 0.04 , 0, 0))
panel_changes$labor <- (as.numeric(panel_changes[,'Wage.earners.by.month,.January']) + as.numeric(panel_changes[,'Wage.earners.by.month,.February']) +
as.numeric(panel_changes[,'Wage.earners.by.month,.March']) + as.numeric(panel_changes[,'Wage.earners.by.month,.April'])+
as.numeric(panel_changes[,'Wage.earners.by.month,.May']) + as.numeric(panel_changes[,'Wage.earners.by.month,.June']) +
as.numeric(panel_changes[,'Wage.earners.by.month,.July']) + as.numeric(panel_changes[,'Wage.earners.by.month,.August']) +
as.numeric(panel_changes[,'Wage.earners.by.month,.September']) + as.numeric(panel_changes[,'Wage.earners.by.month,.October'])
+ as.numeric(panel_changes[,'Wage.earners.by.month,.November']) + as.numeric(panel_changes[,'Wage.earners.by.month,.December']))
panel_changes$labor <- panel_changes$labor * 1/1000
#rev_data <- data.frame(ddply(panel_original_revenue, .(Industry, Year,ext_finance), summarize, Average_rev=mean(y)))
#rev_data$dependence_dummy <- as.integer(rev_data$dependence_dummy)
#rev_data$Industry <- as.factor(rev_data$Industry)
#rev_data$Year <- as.integer(rev_data$Year)
#ggplot(data =subset(rev_data, ext_finance < -0.10 & ext_finance >= -0.12), aes(x=Year, y=Average_rev, group=Industry, shape=Industry, color=Industry)) +geom_line() + geom_point()+ xlab("Year") + ylab("Revenue, Adjusted for Inflation (hundred thousands)") + ggtitle("Average Firm Revenue by Industry, 1929-1935")
#ggplot(data =subset(rev_data, ext_finance == -0.10), aes(x=Year, y=Average_rev, group=Industry, shape=Industry, color=Industry)) +geom_line() + geom_point()+ xlab("Year") + ylab("Revenue, Adjusted for Inflation (hundred thousands)") + ggtitle("Average Firm Revenue by Industry, 1929-1935")
#ggplot(data =subset(rev_data, ext_finance <= 0.14 & ext_finance > -0.10), aes(x=Year, y=Average_rev, group=Industry, shape=Industry, color=Industry)) +geom_line() + geom_point()+ xlab("Year") + ylab("Revenue, Adjusted for Inflation (hundred thousands)") + ggtitle("Average Firm Revenue by Industry, 1929-1935")
#ggplot(data =subset(rev_data, ext_finance <=0.39 & ext_finance > 0.14), aes(x=Year, y=Average_rev, group=Industry, shape=Industry, color=Industry)) +geom_line() + geom_point()+ xlab("Year") + ylab("Revenue, Adjusted for Inflation (hundred thousands)") + ggtitle("Average Firm Revenue by Industry, 1929-1935")
#ggplot(data =subset(rev_data, ext_finance< 0.4), aes(x=Industry, y=Average_rev, fill=Industry)) + xlab("Industry") + ylab("Revenue, Adjusted for Inflation (hundred thousands)") + ggtitle("Average Firm Revenue by Industry, 1929-1935") +geom_bar(stat="identity", position=position_dodge())
panel_changes<-panel_changes[order(panel_changes$firm.code,panel_changes$Year),]
panel_changes$y_diff <-panel_changes$y - lag(lag(panel_changes$y))
panel_changes$l_diff <- panel_changes$labor - lag(lag(panel_changes$labor))
#View(Panel_diff)
for (i in 3:nrow(panel_changes)){
if (panel_changes$firm.code[i]!=panel_changes$firm.code[i-2]) {
panel_changes$y_diff[i] <- NA
panel_changes$l_diff[i] <- NA
}
}
panel_changes[is.na(panel_changes)] <- 0
rev_change_data <- data.frame(ddply(panel_changes, .(Industry), summarize, Average_change=mean(y_diff)))
ggplot(data =rev_change_data, aes(x=Industry, y=Average_change, fill=Industry)) + xlab("Industry") + ylab("Average Change in Revenue, 1933-1929, Adjusted for Inflation (in thousands)") + ggtitle("Average Change in Revenue by Industry, 1933-1929") +geom_bar(stat="identity", position=position_dodge(0.9)) + geom_text(aes(label=Industry), size = 3,angle = 90) + theme(axis.text.x=element_blank())
labor_change_data <- data.frame(ddply(panel_changes, .(Industry), summarize, Average_change=mean(l_diff)))
ggplot(data =labor_change_data, aes(x=Industry, y=Average_change, fill=Industry)) + xlab("Industry") + ylab("Average Change in Wage Earners Hired, 1933-1929 (in thousands)") + ggtitle("Average Change in Employment by Industry, 1933-1929") +geom_bar(stat="identity", position=position_dodge(0.9)) + geom_text(aes(label=Industry), size = 3,angle = 90) + theme(axis.text.x=element_blank())
################## firm exit graph #########
panel_exit <- panel_original
est_change_data <- data.frame(ddply(subset(panel_exit, open_29 ==1),~Year,summarise,est_count=length(unique(firm.code))))
entry_29 <- length(unique(subset(panel_exit, panel_exit$open_29 ==1)$firm.code))
entry_31 <- length(unique(subset(panel_exit, panel_exit$open_29 ==0 & panel_exit$open_31 ==1 )$firm.code))
entry_33 <- length(unique(subset(panel_exit, panel_exit$open_29 ==0 & panel_exit$open_31 ==0 &panel_exit$open_33 ==1)$firm.code))
entry_35 <- length(unique(subset(panel_exit, panel_exit$open_29 ==0 & panel_exit$open_31 ==0 &panel_exit$open_33 ==0 & panel_exit$open_35 ==1)$firm.code))
entry <- c(entry_29, entry_31, entry_33, entry_35)
year_entry <- c(1929, 1931, 1933, 1935)
entry_data <- data.frame(entry, year_entry)
ggplot(data =est_change_data, aes(x=Year, y=est_count)) +geom_line() + geom_point()+ xlab("Year") + ylab("Number of Establishments") + ggtitle("Number of Firms Per Year, 1929-1935")
################### Coefficient by industry #####################
library(sandwich)
library(lmtest)
library(multiwayvcov)
d <- data.frame(matrix(NA, nrow = 22, ncol = 3))
for(i in 1:length(levels(panel$industry)))
{
new_industry <- levels(panel$industry)[i]
fixed_iv_model_labor <- lm(labor ~ post*fixed_char + year + county, data = subset(panel, panel$industry == new_industry))
fixed_iv_model_labor <- coeftest(fixed_iv_model_labor, vcov=vcovHC(fixed_iv_model_labor,type="HC0",cluster="County"))
#coeff<- tail(fixed_iv_model_labor[,1],1)
#print(c(levels(panel$industry)[i], coeff))
coeff<- fixed_iv_model_labor[3,1]
d[i,] <- c(levels(panel$industry)[i], as.numeric(as.character(coeff)), head(panel$ext_finance[panel$industry == new_industry]))
}
d$coeff <- as.numeric(d$X2)
ggplot(data =d, aes(x=X3, y=coeff, group=1)) + geom_point()+ geom_smooth(method='lm')+ xlab("External Financial Dependence") + ylab("Bank Distress Coefficient") + ggtitle("Effects of Bank Distress on Employment v. External Financial Dependence by Industry")
########################## Summary Statistics #####################
############ banking stats ##################
fdic_data <- read.xlsx("/Users/Adam/Research/BA_Thesis/Data/FDIC_check.xlsx")
fdic_data$Year <- as.integer(fdic_data$Year)
fdic_data <- subset(fdic_data, Year == 1929 |Year == 1930 | Year == 1931 | Year == 1932 |Year == 1933)
central_data <- subset(fdic_data, State == 'ohio' | State == 'illinois' | State =='indiana' | State == 'michigan' | State=='wisconsin')
mid_atlantic_data <- subset(fdic_data, State == 'new york' | State == 'new jersey' | State =='pennsylvania')
mountain_data <- subset(fdic_data, State == 'montana' | State == 'idaho' | State =='wyoming' | State == 'colorado' | State=='new mexico' | State=='arizona' | State=='utah'| State=='nevada')
new_england_data <- subset(fdic_data, State == 'maine' | State == 'new hampshire' | State =='vermont' | State == 'massachusetts' | State=='rhode island' | State=='connecticut')
northwestern_data <- subset(fdic_data, State == 'minnesota' | State == 'iowa' | State =='missouri' | State == 'north dakota' | State=='south dakota' | State=='nebraska' | State=='kansas')
pacific_data <- subset(fdic_data, State == 'washington' | State == 'oregon' | State =='california')
south_atlantic_data <- subset(fdic_data, State == 'maryland' | State == 'delaware' | State =='district of columbia'| State =='virginia'| State =='west virginia' | State =='north carolina'| State =='south carolina'| State =='georgia'| State =='florida')
south_central_data <- subset(fdic_data, State == 'kentucky' | State == 'tennessee' | State =='alabama' | State =='mississippi' | State =='arkansas'| State =='oklahoma'| State =='louisiana'| State =='texas')
central_data <- unique(central_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
mid_atlantic_data <- unique(mid_atlantic_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
mountain_data <- unique(mountain_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
new_england_data <- unique(new_england_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
northwestern_data <- unique(northwestern_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
pacific_data <- unique(pacific_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
south_atlantic_data <- unique(south_atlantic_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
south_central_data <- unique(south_central_data[c('State', 'County', 'Year', 'FDIC_BANKS_SUS_','FDIC.DEPOSITS','FDIC_DEPOSITS_SUS_','FDIC.BANKS')])
central_data_sum <- data.frame(ddply(central_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
mid_atlantic_data_sum <- data.frame(ddply(mid_atlantic_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
mountain_data_sum <- data.frame(ddply(mountain_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
new_england_sum <- data.frame(ddply(new_england_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
northwestern_sum <- data.frame(ddply(northwestern_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
pacific_sum <- data.frame(ddply(pacific_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
south_atlantic_sum <- data.frame(ddply(south_atlantic_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
south_central_sum <- data.frame(ddply(south_central_data, .(Year), summarize, banks_sus=sum(FDIC_BANKS_SUS_),banks=sum(FDIC.BANKS),deposits_sus=sum(FDIC_DEPOSITS_SUS_),deposits=sum(FDIC.DEPOSITS), bank_percentage=banks_sus/banks, deposits_percentage = deposits_sus/deposits))
####################### census stats ########################
count(unique(panel_original$firm.code))
count(unique(panel_original[c('County','State')]))
|
#' RCQC
#'
#' filter RC object and summarize quality control sample varation
#'
#' @param ramclustObj ramclustR object to analyze
#' @param qctag "QC" by default - rowname tag to identify QC samples
#' @param npc number of Principle components to calcuate and plot
#' @param scale "pareto" by default: PCA scaling method used
#' @param which.data which dataset to use. "SpecAbund" by default
#' @param outfile name of output pdf file.
#'
#' @details plots a ramclustR summary plot. first page represents the correlation of each cluster to all other clusters, sorted by retention time. large blocks of yellow along the diaganol indicate either poor clustering or a group of coregulated metabolites with similar retention time. It is an imperfect diagnostic, partuclarly with lipids on reverse phase LC or sugars on HILIC LC systems. Page 2: histogram of r values from page 1 - only r values one position from the diagonal are used. Pages 3:5 - PCA results, with QC samples colored red. relative standard deviation calculated as sd(QC PC scores) / sd(all PC scores). Page 6: histogram of CV values for each compound int he dataset, QC samples only.
#' @return new RC object, with QC samples moved to new slot. prints output summary plots to pdf.
#' @references Broeckling CD, Afsar FA, Neumann S, Ben-Hur A, Prenni JE. RAMClust: a novel feature clustering method enables spectral-matching-based annotation for metabolomics data. Anal Chem. 2014 Jul 15;86(14):6812-7. doi: 10.1021/ac501530d. Epub 2014 Jun 26. PubMed PMID: 24927477.
#' @references Broeckling CD, Ganna A, Layer M, Brown K, Sutton B, Ingelsson E, Peers G, Prenni JE. Enabling Efficient and Confident Annotation of LC-MS Metabolomics Data through MS1 Spectrum and Time Prediction. Anal Chem. 2016 Sep 20;88(18):9226-34. doi: 10.1021/acs.analchem.6b02479. Epub 2016 Sep 8. PubMed PMID: 7560453.
#' @keywords 'ramclustR' 'RAMClustR', 'ramclustR', 'metabolomics', 'clustering', 'feature', 'xcms'
#' @author Corey Broeckling
#' @export
RCQC<-function(ramclustObj=NULL,
qctag="QC",
npc=4,
scale="pareto",
which.data="SpecAbund",
outfile="ramclustQC.pdf"
){
dir.create("QC")
pdf(file=paste("QC/", "ramclustQC2.pdf", sep=""), useDingbats=FALSE, width=8, height=8)
#visualize clustering
## if clustering was perfect, we should see a normal distribution of
## correlational r values 1 step from the diagonal
## imperfect clustering introduces right skew
## load("datasets/RCobject.Rdata")
if(!is.null(ramclustObj$clrt)) {
o<-order(ramclustObj$clrt)
c<-cor(ramclustObj$SpecAbund[,o])
d<-diag(as.matrix((c[2:(nrow(c)), 1:ncol(c)-1])))
hist(d, breaks=50, main="")
title(main="histogram of pearson's r for each cluster to its adjacent cluster (by time)", cex.main=0.8,
sub=paste("skew =", round(skewness(d), digits=3), " :values near zero are better"), cex.sub=0.6)
# ideally heatmap will have a bright yellow diagonal with no yellow squares near the diagonal
# this is slow for larger numbers of clusters
heatmap.2(c^2, trace="none", dendrogram="none", Rowv=FALSE, Colv=FALSE, main="pearsons r^2, clusters sorted by rt", cex.main=0.5,
cexRow=0.02 + 1/log10(length(o)), cexCol=0.02 + 1/log10(length(o)))
}
## PCA of QC samples
while(any(search()=="tf")) {detach(tf)}
td<-ramclustObj[[which.data]] ##move as.matrix to ramclustR function
qc<-grep("QC", dimnames(td)[[1]])
if(length(qc)>1) {
cols<-rep(8, nrow(td))
cols[qc]<-2
PCA<-pca(td, scale=scale, nPcs=npc, center=TRUE)
sc<-PCA@scores
write.csv(sc, file="QC/RCpcascores.csv")
ld<-PCA@loadings
for(i in 1:(ncol(sc)-1)) {
plot(sc[,i], sc[,i+1], col=cols, pch=19, main="PCA analysis, QC samples vs full set",
xlab=paste("PC", i, ":: r^2 =", round(PCA@R2[i], digits=2), ":: QC(rel sd) = ",
round(sd(sc[qc,i])/sd(sc[,i]), digits=2) ),
ylab=paste("PC", i+1, ":: r^2 =", round(PCA@R2[i+1], digits=2), ":: QC(rel sd) = ",
round(sd(sc[qc,i+1])/sd(sc[,i+1]), digits=2) )
)
legend(qctag, text.col=2, x="topright", bty="n")
}
## histogram of QC relative standard deviations for all compounds/clusters
sds<-apply(td[qc,], 2, FUN="sd", na.rm=TRUE)
#cat(sds, '\n')
means<-apply(td[qc,], 2, FUN="mean", na.rm=TRUE)
cvs<-sds/means
qs<-quantile(cvs, probs=seq(0,1,0.2), na.rm=TRUE)
hist(cvs, breaks=50, main="", na.rm=TRUE)
title("histogram of cluster CVs of QC samples", line=2.7)
title("20% quantiles in red on top axis", col.main =2, cex.main=0.7, line=2)
axis(side=3, col=2, col.ticks=2, col.axis=2, round(qs, digits=3), labels=TRUE, las=2, cex.axis=0.4)
nonqc<-ramclustObj[["SpecAbund"]][-grep(qctag, dimnames(ramclustObj[[i]])[[1]]),]
ramclustObj$clcvqc<-cvs
} else {nonqc<-ramclustObj[["SpecAbund"]]}
## histogram of replicate injection relative standard deviations
keep<-table(row.names(nonqc))
keep<-names(keep[which(keep>=2)])
if(length(keep)>0){
class<-as.factor(keep)
levs<-levels(class)
mean1<-matrix(nrow=length(levs), ncol=ncol(nonqc))
sds<-matrix(nrow=length(levs), ncol=ncol(nonqc))
for (i in 1:length(levs)){
mean1[i,]<-apply(nonqc[which(as.character(row.names(nonqc))==levs[i]),], 2, "mean")
sds[i,]<-apply(nonqc[which(as.character(row.names(nonqc))==levs[i]),], 2, "sd")
}
cvs<-apply(sds/mean1, 2, FUN="median", na.rm=TRUE)
means<-apply(mean1, 2, FUN="median", na.rm=TRUE)
write.csv(data.frame(means, cvs), file="QC/cvs.csv")
#ordmeans<-sort(means, decreasing=TRUE)
#fivecut<-ordmeans[round(length(means)*0.05)]
#up25<-which(means>quantile(means)[4])
#up5<-which(means>fivecut)
qs<-quantile(cvs, probs=seq(0,1,0.2), na.rm=TRUE)
hist(cvs, breaks=50, main="")
title("histogram of cluster median CVs for replicate injections", line=2.7)
title("20% quantiles in red on top axis", col.main =2, cex.main=0.7, line=2)
axis(side=3, col=2, col.ticks=2, col.axis=2, round(qs, digits=3), labels=TRUE, las=2, cex.axis=0.4)
ramclustObj$clcvrepinj<-cvs
}
dev.off()
for(i in c("SpecAbund", "SpecAbundAve")) {
if(!is.null(ramclustObj[[i]])) {
qc<-grep(qctag, dimnames(ramclustObj[[i]])[[1]])
if(length(qc)>0) {
ramclustObj[[paste("QC_", i, sep="")]]<- ramclustObj[[i]][qc,]
} else {
ramclustObj[[paste("QC_", i, sep="")]]<-NA
}
}
}
for(i in c("SpecAbund", "SpecAbundAve")) {
if(!is.null(ramclustObj[[i]])) {
qc<-grep(qctag, dimnames(ramclustObj[[i]])[[1]])
if(length(qc)>0) {
ramclustObj[[i]]<- ramclustObj[[i]][-qc,]
}
}
}
return(ramclustObj)
}
| /R/RCQC.R | no_license | inambioinfo/RAMClustR | R | false | false | 6,814 | r | #' RCQC
#'
#' filter RC object and summarize quality control sample varation
#'
#' @param ramclustObj ramclustR object to analyze
#' @param qctag "QC" by default - rowname tag to identify QC samples
#' @param npc number of Principle components to calcuate and plot
#' @param scale "pareto" by default: PCA scaling method used
#' @param which.data which dataset to use. "SpecAbund" by default
#' @param outfile name of output pdf file.
#'
#' @details plots a ramclustR summary plot. first page represents the correlation of each cluster to all other clusters, sorted by retention time. large blocks of yellow along the diaganol indicate either poor clustering or a group of coregulated metabolites with similar retention time. It is an imperfect diagnostic, partuclarly with lipids on reverse phase LC or sugars on HILIC LC systems. Page 2: histogram of r values from page 1 - only r values one position from the diagonal are used. Pages 3:5 - PCA results, with QC samples colored red. relative standard deviation calculated as sd(QC PC scores) / sd(all PC scores). Page 6: histogram of CV values for each compound int he dataset, QC samples only.
#' @return new RC object, with QC samples moved to new slot. prints output summary plots to pdf.
#' @references Broeckling CD, Afsar FA, Neumann S, Ben-Hur A, Prenni JE. RAMClust: a novel feature clustering method enables spectral-matching-based annotation for metabolomics data. Anal Chem. 2014 Jul 15;86(14):6812-7. doi: 10.1021/ac501530d. Epub 2014 Jun 26. PubMed PMID: 24927477.
#' @references Broeckling CD, Ganna A, Layer M, Brown K, Sutton B, Ingelsson E, Peers G, Prenni JE. Enabling Efficient and Confident Annotation of LC-MS Metabolomics Data through MS1 Spectrum and Time Prediction. Anal Chem. 2016 Sep 20;88(18):9226-34. doi: 10.1021/acs.analchem.6b02479. Epub 2016 Sep 8. PubMed PMID: 7560453.
#' @keywords 'ramclustR' 'RAMClustR', 'ramclustR', 'metabolomics', 'clustering', 'feature', 'xcms'
#' @author Corey Broeckling
#' @export
RCQC<-function(ramclustObj=NULL,
qctag="QC",
npc=4,
scale="pareto",
which.data="SpecAbund",
outfile="ramclustQC.pdf"
){
dir.create("QC")
pdf(file=paste("QC/", "ramclustQC2.pdf", sep=""), useDingbats=FALSE, width=8, height=8)
#visualize clustering
## if clustering was perfect, we should see a normal distribution of
## correlational r values 1 step from the diagonal
## imperfect clustering introduces right skew
## load("datasets/RCobject.Rdata")
if(!is.null(ramclustObj$clrt)) {
o<-order(ramclustObj$clrt)
c<-cor(ramclustObj$SpecAbund[,o])
d<-diag(as.matrix((c[2:(nrow(c)), 1:ncol(c)-1])))
hist(d, breaks=50, main="")
title(main="histogram of pearson's r for each cluster to its adjacent cluster (by time)", cex.main=0.8,
sub=paste("skew =", round(skewness(d), digits=3), " :values near zero are better"), cex.sub=0.6)
# ideally heatmap will have a bright yellow diagonal with no yellow squares near the diagonal
# this is slow for larger numbers of clusters
heatmap.2(c^2, trace="none", dendrogram="none", Rowv=FALSE, Colv=FALSE, main="pearsons r^2, clusters sorted by rt", cex.main=0.5,
cexRow=0.02 + 1/log10(length(o)), cexCol=0.02 + 1/log10(length(o)))
}
## PCA of QC samples
while(any(search()=="tf")) {detach(tf)}
td<-ramclustObj[[which.data]] ##move as.matrix to ramclustR function
qc<-grep("QC", dimnames(td)[[1]])
if(length(qc)>1) {
cols<-rep(8, nrow(td))
cols[qc]<-2
PCA<-pca(td, scale=scale, nPcs=npc, center=TRUE)
sc<-PCA@scores
write.csv(sc, file="QC/RCpcascores.csv")
ld<-PCA@loadings
for(i in 1:(ncol(sc)-1)) {
plot(sc[,i], sc[,i+1], col=cols, pch=19, main="PCA analysis, QC samples vs full set",
xlab=paste("PC", i, ":: r^2 =", round(PCA@R2[i], digits=2), ":: QC(rel sd) = ",
round(sd(sc[qc,i])/sd(sc[,i]), digits=2) ),
ylab=paste("PC", i+1, ":: r^2 =", round(PCA@R2[i+1], digits=2), ":: QC(rel sd) = ",
round(sd(sc[qc,i+1])/sd(sc[,i+1]), digits=2) )
)
legend(qctag, text.col=2, x="topright", bty="n")
}
## histogram of QC relative standard deviations for all compounds/clusters
sds<-apply(td[qc,], 2, FUN="sd", na.rm=TRUE)
#cat(sds, '\n')
means<-apply(td[qc,], 2, FUN="mean", na.rm=TRUE)
cvs<-sds/means
qs<-quantile(cvs, probs=seq(0,1,0.2), na.rm=TRUE)
hist(cvs, breaks=50, main="", na.rm=TRUE)
title("histogram of cluster CVs of QC samples", line=2.7)
title("20% quantiles in red on top axis", col.main =2, cex.main=0.7, line=2)
axis(side=3, col=2, col.ticks=2, col.axis=2, round(qs, digits=3), labels=TRUE, las=2, cex.axis=0.4)
nonqc<-ramclustObj[["SpecAbund"]][-grep(qctag, dimnames(ramclustObj[[i]])[[1]]),]
ramclustObj$clcvqc<-cvs
} else {nonqc<-ramclustObj[["SpecAbund"]]}
## histogram of replicate injection relative standard deviations
keep<-table(row.names(nonqc))
keep<-names(keep[which(keep>=2)])
if(length(keep)>0){
class<-as.factor(keep)
levs<-levels(class)
mean1<-matrix(nrow=length(levs), ncol=ncol(nonqc))
sds<-matrix(nrow=length(levs), ncol=ncol(nonqc))
for (i in 1:length(levs)){
mean1[i,]<-apply(nonqc[which(as.character(row.names(nonqc))==levs[i]),], 2, "mean")
sds[i,]<-apply(nonqc[which(as.character(row.names(nonqc))==levs[i]),], 2, "sd")
}
cvs<-apply(sds/mean1, 2, FUN="median", na.rm=TRUE)
means<-apply(mean1, 2, FUN="median", na.rm=TRUE)
write.csv(data.frame(means, cvs), file="QC/cvs.csv")
#ordmeans<-sort(means, decreasing=TRUE)
#fivecut<-ordmeans[round(length(means)*0.05)]
#up25<-which(means>quantile(means)[4])
#up5<-which(means>fivecut)
qs<-quantile(cvs, probs=seq(0,1,0.2), na.rm=TRUE)
hist(cvs, breaks=50, main="")
title("histogram of cluster median CVs for replicate injections", line=2.7)
title("20% quantiles in red on top axis", col.main =2, cex.main=0.7, line=2)
axis(side=3, col=2, col.ticks=2, col.axis=2, round(qs, digits=3), labels=TRUE, las=2, cex.axis=0.4)
ramclustObj$clcvrepinj<-cvs
}
dev.off()
for(i in c("SpecAbund", "SpecAbundAve")) {
if(!is.null(ramclustObj[[i]])) {
qc<-grep(qctag, dimnames(ramclustObj[[i]])[[1]])
if(length(qc)>0) {
ramclustObj[[paste("QC_", i, sep="")]]<- ramclustObj[[i]][qc,]
} else {
ramclustObj[[paste("QC_", i, sep="")]]<-NA
}
}
}
for(i in c("SpecAbund", "SpecAbundAve")) {
if(!is.null(ramclustObj[[i]])) {
qc<-grep(qctag, dimnames(ramclustObj[[i]])[[1]])
if(length(qc)>0) {
ramclustObj[[i]]<- ramclustObj[[i]][-qc,]
}
}
}
return(ramclustObj)
}
|
library(aidar)
### Name: getHisto3D
### Title: retrieves a given 3D histogram by it's name from the given file
### and returns it as a data.frame
### Aliases: getHisto3D
### Keywords: aida histogram
### ** Examples
histoFile = system.file("extdata", "histos.xml.gz", package="aidar")
h3 = getHisto3D(histoFile, '13')
| /data/genthat_extracted_code/aidar/examples/getHisto3D.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 326 | r | library(aidar)
### Name: getHisto3D
### Title: retrieves a given 3D histogram by it's name from the given file
### and returns it as a data.frame
### Aliases: getHisto3D
### Keywords: aida histogram
### ** Examples
histoFile = system.file("extdata", "histos.xml.gz", package="aidar")
h3 = getHisto3D(histoFile, '13')
|
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below a pair of functions that cache the inverse of a matrix.
## makeCacheMatrix: This function creates a special "matrix" object
## that can cache its inverse as following:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse of the matrix
## 4. get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve should
## retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinverse(m)
m
}
## Test run
#> source("makeCacheMatrix.R")
#>
#> x <- makeCacheMatrix()
#>
#> x$set(matrix(c(-1,-2,1,1),2,2))
#>
#> x$get()
#[,1] [,2]
#[1,] -1 1
#[2,] -2 1
#>
#> cacheSolve(x)
#[,1] [,2]
#[1,] 1 -1
#[2,] 2 -1
#>
#> cacheSolve(x)
#getting cached data
#[,1] [,2]
#[1,] 1 -1
#[2,] 2 -1
#> | /cachematrix.R | no_license | mohkarime/ProgrammingAssignment2 | R | false | false | 1,745 | r | ## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below a pair of functions that cache the inverse of a matrix.
## makeCacheMatrix: This function creates a special "matrix" object
## that can cache its inverse as following:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse of the matrix
## 4. get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve should
## retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinverse(m)
m
}
## Test run
#> source("makeCacheMatrix.R")
#>
#> x <- makeCacheMatrix()
#>
#> x$set(matrix(c(-1,-2,1,1),2,2))
#>
#> x$get()
#[,1] [,2]
#[1,] -1 1
#[2,] -2 1
#>
#> cacheSolve(x)
#[,1] [,2]
#[1,] 1 -1
#[2,] 2 -1
#>
#> cacheSolve(x)
#getting cached data
#[,1] [,2]
#[1,] 1 -1
#[2,] 2 -1
#> |
## function to find intersections with lines at a given longitude
#' @importFrom rgeos gIntersection
#' @importFrom sp SpatialLines Lines Line coordinates
longint <- function(lines, longitudes, latrange = c(-90, 90), fun = median) {
latitudes <- rep(NA_real_, length(longitudes))
for (i in seq_along(longitudes)) {
line <- SpatialLines(list(Lines(list(Line(cbind(rep(longitudes[i], 2), latrange))), "1")), proj4string = CRS(projection(lines)))
pts <- try(gIntersection(lines, line))
if (!inherits(pts, "try-error") & !is.null(pts)) latitudes[i] <- median(coordinates(pts)[,2])
}
cbind(longitudes, latitudes)
}
keepOnlyMostComplexLine <- function(x) {
for (iObj in seq_len(nrow(x))) {
if (inherits(x, "SpatialLinesDataFrame")) {
wmax <- which.max(sapply(x[iObj, ]@lines[[1]]@Lines, function(x)
nrow(x@coords)))
x@lines[[iObj]]@Lines <- x@lines[[iObj]]@Lines[wmax]
}
}
x
}
#' @importFrom raster projectExtent raster calc rasterToContour isLonLat
#' @importFrom sp spChFIDs
#' @importFrom maptools spRbind
monthlyIceContours <- function(month, years = NULL, fun = max,
llim = NULL, product = "nsidc", lev = 15,
longlat = TRUE) {
icf <- icefiles(product = product)
if (is.null(years)) years <- unique(format(icf$date, "%Y"))
cl <- vector("list", length(years))
dummy <- readice(product = product)
if (!is.null(llim)) {
ex <- projectExtent(raster(llim, crs = "+proj=longlat +ellps=WGS84"), projection(dummy))
} else {
ex <- NULL
}
for (iyear in seq_along(years)) {
thisf <- subset(icf, format(date, "%m") == month & format(date, "%Y") == years[iyear])
ice <- readice(thisf$date, xylim = ex)
ice <- calc(ice, max, na.rm = TRUE)
thiscl <- keepOnlyMostComplexLine(rasterToContour(ice, lev = lev))
thiscl$year <- years[iyear]
if (iyear == 1) {
icelines <- thiscl
} else {
icelines <- spRbind(icelines, spChFIDs(thiscl, as.character(iyear)))
}
}
if (longlat & !isLonLat(dummy)) icelines <- spTransform(icelines, "+proj=longlat +ellps=WGS84")
icelines
}
| /R/highlevel.R | no_license | bootneck2000/raadtools | R | false | false | 2,150 | r | ## function to find intersections with lines at a given longitude
#' @importFrom rgeos gIntersection
#' @importFrom sp SpatialLines Lines Line coordinates
longint <- function(lines, longitudes, latrange = c(-90, 90), fun = median) {
latitudes <- rep(NA_real_, length(longitudes))
for (i in seq_along(longitudes)) {
line <- SpatialLines(list(Lines(list(Line(cbind(rep(longitudes[i], 2), latrange))), "1")), proj4string = CRS(projection(lines)))
pts <- try(gIntersection(lines, line))
if (!inherits(pts, "try-error") & !is.null(pts)) latitudes[i] <- median(coordinates(pts)[,2])
}
cbind(longitudes, latitudes)
}
keepOnlyMostComplexLine <- function(x) {
for (iObj in seq_len(nrow(x))) {
if (inherits(x, "SpatialLinesDataFrame")) {
wmax <- which.max(sapply(x[iObj, ]@lines[[1]]@Lines, function(x)
nrow(x@coords)))
x@lines[[iObj]]@Lines <- x@lines[[iObj]]@Lines[wmax]
}
}
x
}
#' @importFrom raster projectExtent raster calc rasterToContour isLonLat
#' @importFrom sp spChFIDs
#' @importFrom maptools spRbind
monthlyIceContours <- function(month, years = NULL, fun = max,
llim = NULL, product = "nsidc", lev = 15,
longlat = TRUE) {
icf <- icefiles(product = product)
if (is.null(years)) years <- unique(format(icf$date, "%Y"))
cl <- vector("list", length(years))
dummy <- readice(product = product)
if (!is.null(llim)) {
ex <- projectExtent(raster(llim, crs = "+proj=longlat +ellps=WGS84"), projection(dummy))
} else {
ex <- NULL
}
for (iyear in seq_along(years)) {
thisf <- subset(icf, format(date, "%m") == month & format(date, "%Y") == years[iyear])
ice <- readice(thisf$date, xylim = ex)
ice <- calc(ice, max, na.rm = TRUE)
thiscl <- keepOnlyMostComplexLine(rasterToContour(ice, lev = lev))
thiscl$year <- years[iyear]
if (iyear == 1) {
icelines <- thiscl
} else {
icelines <- spRbind(icelines, spChFIDs(thiscl, as.character(iyear)))
}
}
if (longlat & !isLonLat(dummy)) icelines <- spTransform(icelines, "+proj=longlat +ellps=WGS84")
icelines
}
|
library(R.oo)
### Name: Object$load
### Title: Static method to load an Object from a file or a connection
### Aliases: Object$load load.Object Object.load load,Object-method
### Keywords: programming methods IO internal methods
### ** Examples
## Not run: For a complete example see help(Object).
| /data/genthat_extracted_code/R.oo/examples/load.Object.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 303 | r | library(R.oo)
### Name: Object$load
### Title: Static method to load an Object from a file or a connection
### Aliases: Object$load load.Object Object.load load,Object-method
### Keywords: programming methods IO internal methods
### ** Examples
## Not run: For a complete example see help(Object).
|
#######################################
####### DECONVOLUTION FUNCTIONS #######
#######################################
############################################
#' Basis Matrix
#' @description Basis matrix construction
#' @name SCDC_basis
#' @param x ExpressionSet object for single cells
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param ct.varname variable name for 'cell types'
#' @param sample variable name for subject/samples
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @return a list of basis matrix, sum of cell-type-specific library size, sample variance matrix, basis matrix by mvw, mvw matrix.
#' @export
SCDC_basis <- function(x, ct.sub = NULL, ct.varname, sample, ct.cell.size = NULL){
# select only the subset of cell types of interest
if (is.null(ct.sub)){
ct.sub <- unique(x@phenoData@data[,ct.varname])
}
ct.sub <- ct.sub[!is.na(ct.sub)]
x.sub <- x[,x@phenoData@data[,ct.varname] %in% ct.sub]
# qc: remove non-zero genes
x.sub <- x.sub[rowSums(exprs(x.sub)) > 0,]
# calculate sample mean & sample variance matrix: genes by cell types
countmat <- exprs(x.sub)
ct.id <- droplevels(as.factor(x.sub@phenoData@data[,ct.varname]))
sample.id <- as.character(x.sub@phenoData@data[,sample])
ct_sample.id <- paste(ct.id,sample.id, sep = '%')
mean.mat <- sapply(unique(ct_sample.id), function(id){
y = as.matrix(countmat[, ct_sample.id %in% id])
apply(y,1,sum, na.rm = TRUE)/sum(y)
})
mean.id <- do.call('rbind',strsplit(unique(ct_sample.id), split = '%'))
sigma <- sapply(unique(mean.id[,1]), function(id){
y = mean.mat[,mean.id[,1] %in% id]
apply(y,1,var, na.rm = TRUE)
})
sum.mat2 <- sapply(unique(sample.id), function(sid){
sapply(unique(ct.id), function(id){
y = as.matrix(countmat[, ct.id %in% id & sample.id %in% sid])
sum(y)/ncol(y)
})
})
rownames(sum.mat2) <- unique(ct.id)
colnames(sum.mat2) <- unique(sample.id)
# library size factor calculated from the samples:
if (is.null(ct.cell.size)){
sum.mat <- rowMeans(sum.mat2, na.rm = T)
} else {
if (is.null(names(ct.cell.size))){
message("Cell size factor vector requires cell type names...")
break
} else {
sum.mat <- ct.cell.size
}
}
basis <- sapply(unique(mean.id[,1]), function(id){
z <- sum.mat[mean.id[,1]]
mean.mat.z <- t(t(mean.mat)*z)
y = as.matrix(mean.mat.z[,mean.id[,1] %in% id])
apply(y,1,mean, na.rm = TRUE)
})
# weighted basis matrix
my.max <- function(x,...){
y <- apply(x,1,max, na.rm = TRUE)
y / median(y, na.rm = T)
}
# MATCH DONOR, CELLTYPE, GENES!!!!!!!!!!!!!!!!
var.adj <- sapply(unique(sample.id), function(sid) {
my.max(sapply(unique(ct.id), function(id) {
y = countmat[, ct.id %in% id & sample.id %in% sid,
drop = FALSE]
apply(y,1,var, na.rm=T)
}), na.rm = T)
})
colnames(var.adj) <- unique(sample.id)
q15 <- apply(var.adj,2,quantile, probs = 0.15, na.rm =T)
q85 <- apply(var.adj,2,quantile, probs = 0.85, na.rm =T)
var.adj.q <- t(apply(var.adj, 1,
function(y){y[y<q15] <- q15[y<q15]
y[y>q85] <- q85[y>q85]
return(y)})) + 1e-4
message("Creating Basis Matrix adjusted for maximal variance weight")
mean.mat.mvw <- sapply(unique(ct_sample.id), function(id){
sid = unlist(strsplit(id,'%'))[2]
y = as.matrix(countmat[, ct_sample.id %in% id])
yy = sweep(y, 1, sqrt(var.adj.q[,sid]), '/')
apply(yy,1,sum, na.rm = TRUE)/sum(yy)
})
basis.mvw <- sapply(unique(mean.id[,1]), function(id){
z <- sum.mat[mean.id[,1]]
mean.mat.z <- t(t(mean.mat.mvw)*z)
y = as.matrix(mean.mat.z[,mean.id[,1] %in% id])
apply(y,1,mean, na.rm = TRUE)
})
# reorder columns
basis.mvw <- basis.mvw[,ct.sub]
sigma <- sigma[, ct.sub]
basis <- basis[, ct.sub]
sum.mat <- sum.mat[ct.sub]
return(list(basis = basis, sum.mat = sum.mat,
sigma = sigma, basis.mvw = basis.mvw, var.adj.q = var.adj.q))
}
#############################################
#' Basis matrix for single cells from one subject
#' @description Basis matrix construction for single cells from one subject
#' @name SCDC_basis_ONE
#' @param x ExpressionSet object for single cells
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param ct.varname variable name for 'cell types'
#' @param sample variable name for subject/samples
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @return a list of basis matrix, sum of cell-type-specific library size, sample variance matrix, basis matrix by mvw, mvw matrix.
#' @export
SCDC_basis_ONE <- function(x , ct.sub = NULL, ct.varname, sample, ct.cell.size = NULL){
# select only the subset of cell types of interest
if (is.null(ct.sub)){
ct.sub <- unique(x@phenoData@data[,ct.varname])[!is.na(unique(x@phenoData@data[,ct.varname]))]
}
ct.sub <- ct.sub[!is.na(ct.sub)]
x.sub <- x[,x@phenoData@data[,ct.varname] %in% ct.sub]
# qc: remove non-zero genes
x.sub <- x.sub[rowSums(exprs(x.sub)) > 0,]
# calculate sample mean & sample variance matrix: genes by cell types
countmat <- exprs(x.sub)
# ct.id <- droplevels(as.factor(x.sub@phenoData@data[,ct.varname]))
ct.id <- x.sub@phenoData@data[,ct.varname]
sample.id <- x.sub@phenoData@data[,sample]
ct_sample.id <- paste(ct.id,sample.id, sep = '%')
mean.mat <- sapply(unique(ct_sample.id), function(id){
y = as.matrix(countmat[, ct_sample.id %in% id])
apply(y,1,sum, na.rm = TRUE)/sum(y)
})
mean.id <- do.call('rbind',strsplit(unique(ct_sample.id), split = '%'))
# by subj, then take avg????
sum.mat2 <- sapply(unique(sample.id), function(sid){
sapply(unique(ct.id), function(id){
y = as.matrix(countmat[, ct.id %in% id & sample.id %in% sid])
sum(y)/ncol(y)
})
})
rownames(sum.mat2) <- unique(ct.id)
colnames(sum.mat2) <- unique(sample.id)
# sum.mat <- rowMeans(sum.mat2, na.rm = T)
if (is.null(ct.cell.size)){
sum.mat <- rowMeans(sum.mat2, na.rm = T)
} else {
if (is.null(names(ct.cell.size))){
message("Cell size factor vector requires cell type names...")
break
} else {
sum.mat <- ct.cell.size
}
}
basis <- sapply(unique(mean.id[,1]), function(id){
z <- sum.mat[mean.id[,1]]
mean.mat.z <- t(t(mean.mat)*z)
# id = unique(mean.id[,1])[1]
y = as.matrix(mean.mat.z[,mean.id[,1] %in% id])
apply(y,1,mean, na.rm = TRUE)
})
# weighted basis matrix
my.max <- function(x,...){
y <- apply(x,1,max, na.rm = TRUE)
y / median(y, na.rm = T)
}
# MATCH DONOR, CELLTYPE, GENES!!!!!!!!!!!!!!!!
var.adj <- sapply(unique(sample.id), function(sid) {
my.max(sapply(unique(ct.id), function(id) {
y = countmat[, ct.id %in% id & sample.id %in% sid,
drop = FALSE]
apply(y,1,var, na.rm=T)
}), na.rm = T)
})
colnames(var.adj) <- unique(sample.id)
q15 <- apply(var.adj,2,quantile, probs = 0.15, na.rm =T)
q85 <- apply(var.adj,2,quantile, probs = 0.85, na.rm =T)
var.adj.q <- as.matrix(apply(var.adj, 1,
function(y){y[y<q15] <- q15[y<q15]
y[y>q85] <- q85[y>q85]
return(y)}) + 1e-4)
message("Creating Basis Matrix adjusted for maximal variance weight")
mean.mat.mvw <- sapply(unique(ct_sample.id), function(id){
y = as.matrix(countmat[, ct_sample.id %in% id])
yy = sweep(y, 1, sqrt(var.adj.q), '/')
apply(yy,1,sum, na.rm = TRUE)/sum(yy)
})
basis.mvw <- sapply(unique(mean.id[,1]), function(id){
z <- sum.mat[mean.id[,1]]
mean.mat.z <- t(t(mean.mat.mvw)*z)
y = as.matrix(mean.mat.z[,mean.id[,1] %in% id])
apply(y,1,mean, na.rm = TRUE)
})
# reorder columns
basis.mvw <- basis.mvw[,ct.sub]
sigma <- NULL # in the one subject case, no variance is calculated.
basis <- basis[, ct.sub]
sum.mat <- sum.mat[ct.sub]
return(list(basis = basis, sum.mat = sum.mat,
sigma = sigma, basis.mvw = basis.mvw, var.adj.q = var.adj.q))
}
#################################
#' Clustering QC
#' @description Single cells Clustering QC
#' @name SCDC_qc
#' @import pheatmap
#' @param sc.eset ExpressionSet object for single cells
#' @param ct.varname variable name for 'cell type'
#' @param sample variable name for subject/sample
#' @param scsetname the name for the single cell dataset
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param arow annotation of rows for pheatmap
#' @param qcthreshold the probability threshold used to filter out questionable cells
#' @param generate.figure logical. If generate the heatmap by pheatmap or not. default is TRUE.
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @return a list including: 1) a probability matrix for each single cell input; 2) a clustering QCed ExpressionSet object; 3) a heatmap of QC result.
#' @export
SCDC_qc <- function (sc.eset, ct.varname, sample, scsetname = "Single Cell",
ct.sub, iter.max = 1000, nu = 1e-04, epsilon = 0.01, arow =NULL,
qcthreshold = 0.7, generate.figure = T, ct.cell.size = NULL,
cbPalette = c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7"),
...) {
sc.basis = SCDC_basis(x = sc.eset, ct.sub = ct.sub, ct.varname = ct.varname, sample = sample, ct.cell.size = ct.cell.size)
M.S <- sc.basis$sum.mat[ct.sub]
xsc <- getCPM0(exprs(sc.eset)[rownames(sc.basis$basis.mvw),])
N.sc <- ncol(xsc)
m.basis <- sc.basis$basis.mvw[, ct.sub]
sigma <- sc.basis$sigma[, ct.sub]
valid.ct <- (colSums(is.na(sigma)) == 0) & (colSums(is.na(m.basis)) ==
0) & (!is.na(M.S))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution..."))
m.basis <- m.basis[, valid.ct]
M.S <- M.S[valid.ct]
sigma <- sigma[, valid.ct]
prop.qc <- NULL
for (i in 1:N.sc) {
message("Begin iterative weighted estimation...")
basis.temp <- m.basis
xsc.temp <- xsc[, i]
sigma.temp <- sigma
### weighting scheme:
lm.qc <- nnls::nnls(A=basis.temp,b=xsc.temp)
delta <- lm.qc$residuals
wt.gene <- 1/(nu + delta^2 + colSums((lm.qc$x)^2*t(sigma.temp)))
x.wt <- xsc.temp*sqrt(wt.gene)
b.wt <- sweep(basis.temp,1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
prop.wt <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max){
wt.gene <- 1/(nu + delta^2 + colSums((lm.wt$x)^2*t(sigma.temp)))
x.wt <- xsc.temp*sqrt(wt.gene)
b.wt <- sweep(basis.temp,1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
delta.new <- lm.wt$residuals
prop.wt.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt - prop.wt.new) < epsilon )){
prop.wt <- prop.wt.new
delta <- delta.new
message("Converged at iteration ", iter)
break
}
prop.wt <- prop.wt.new
delta <- delta.new
}
prop.qc <- rbind(prop.qc, prop.wt)
}
# name col and row
colnames(prop.qc) <- colnames(m.basis)
rownames(prop.qc) <- colnames(xsc)
if (generate.figure){
heat.anno <- pheatmap(prop.qc, annotation_row = arow,
annotation_names_row=FALSE, show_rownames = F,
annotation_names_col=FALSE, cutree_rows = length(ct.sub),
color = cbPalette[2:4],
cluster_rows = T, cluster_cols = F)
} else {
heat.anno <- NULL
}
prop.qc.keep <- rowSums(prop.qc > qcthreshold) ==1 # truncated values -> F or T
sc.eset.qc <- sc.eset[,prop.qc.keep]
return(list(prop.qc = prop.qc, sc.eset.qc = sc.eset.qc, heatfig = heat.anno))
}
#################################
#' Clustering QC for single cells from one subject
#' @description Clustering QC for single cells from one subject
#' @name SCDC_qc_ONE
#' @param sc.eset ExpressionSet object for single cells
#' @param ct.varname variable name for 'cell type'
#' @param sample variable name for subject/sample
#' @param scsetname the name for the single cell dataset
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param arow annotation of rows for pheatmap
#' @param qcthreshold the probability threshold used to filter out questionable cells
#' @param generate.figure logical. If generate the heatmap by pheatmap or not. default is TRUE.
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @return a list including: 1) a probability matrix for each single cell input; 2) a clustering QCed ExpressionSet object; 3) a heatmap of QC result.
#' @export
SCDC_qc_ONE <- function(sc.eset, ct.varname, sample, scsetname = "Single Cell",
ct.sub, iter.max = 1000, nu = 1e-04, epsilon = 0.01,
arow = NULL, weight.basis = F, qcthreshold = 0.7,
generate.figure = T, ct.cell.size = NULL,
cbPalette = c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7"),
...){
sc.basis <- SCDC_basis_ONE(x = sc.eset, ct.sub = ct.sub, ct.varname = ct.varname, sample = sample, ct.cell.size = ct.cell.size)
if (weight.basis){
basis.mvw <- sc.basis$basis.mvw[, ct.sub]
} else {
basis.mvw <- sc.basis$basis[, ct.sub]
}
xsc <- getCPM0(exprs(sc.eset))
N.sc <- ncol(xsc)
ALS.S <- sc.basis$sum.mat[ct.sub]
valid.ct <- (colSums(is.na(basis.mvw)) == 0) & (!is.na(ALS.S))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution..."))
basis.mvw <- basis.mvw[, valid.ct]
ALS.S <- ALS.S[valid.ct]
prop.est.mvw <- NULL
# prop estimation for each sc sample:
for (i in 1:N.sc) {
xsc.i <- xsc[, i]*100 # why times 100 if not normalize???
gene.use <- intersect(rownames(basis.mvw), names(xsc.i))
basis.mvw.temp <- basis.mvw[gene.use,]
xsc.temp <- xsc.i[gene.use]
message(paste(colnames(xsc)[i], "has common genes", sum(xsc[, i] != 0), "..."))
# first NNLS:
lm <- nnls::nnls(A=basis.mvw.temp,b=xsc.temp)
delta <- lm$residuals
wt.gene <- 1/(nu + delta^2)
x.wt <- xsc.temp*sqrt(wt.gene)
b.wt <- sweep(basis.mvw.temp,1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
prop.wt <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max){
wt.gene <- 1/(nu + delta^2)
x.wt <- xsc.temp * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.temp,1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
delta.new <- lm.wt$residuals
prop.wt.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt.new - prop.wt)) < epsilon){
prop.wt <- prop.wt.new
delta <- delta.new
message("WNNLS Converged at iteration ", iter)
break
}
prop.wt <- prop.wt.new
delta <- delta.new
}
prop.est.mvw <- rbind(prop.est.mvw, prop.wt)
}
colnames(prop.est.mvw) <- colnames(basis.mvw)
rownames(prop.est.mvw) <- colnames(xsc)
### plot steps:
if (generate.figure){
heat.anno <- pheatmap(prop.est.mvw, annotation_row = arow,
annotation_names_row=FALSE, show_rownames = F,
annotation_names_col=FALSE, cutree_rows = length(ct.sub),
color = cbPalette[2:4],
cluster_rows = T, cluster_cols = F) #, main = scsetname
} else {
heat.anno <- NULL
}
prop.qc.keep <- rowSums(prop.est.mvw > qcthreshold) ==1 # truncated values -> F or T
sc.eset.qc <- sc.eset[,prop.qc.keep]
return(list(prop.qc = prop.est.mvw, sc.eset.qc = sc.eset.qc, heatfig = heat.anno))
}
######################################
#' Proportion estimation
#' @description Proportion estimation function for multi-subject case
#' @name SCDC_prop
#' @param bulk.eset ExpressionSet object for bulk samples
#' @param sc.eset ExpressionSet object for single cell samples
#' @param ct.varname variable name for 'cell types'
#' @param sample variable name for subject/samples
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param truep true cell-type proportions for bulk samples if known
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @param Transform_bisque The bulk sample transformation from bisqueRNA. Aiming to reduce the systematic difference between single cells and bulk samples.
#' @return Estimated proportion, basis matrix, predicted gene expression levels for bulk samples
#' @export
SCDC_prop <- function (bulk.eset, sc.eset, ct.varname, sample, ct.sub, iter.max = 1000,
nu = 1e-04, epsilon = 0.01, truep = NULL, weight.basis = T,
ct.cell.size = NULL, Transform_bisque = F, ...)
{
bulk.eset <- bulk.eset[rowSums(exprs(bulk.eset)) > 0, , drop = FALSE]
ct.sub <- intersect(ct.sub, unique(sc.eset@phenoData@data[,
ct.varname]))
sc.basis <- SCDC_basis(x = sc.eset, ct.sub = ct.sub, ct.varname = ct.varname,
sample = sample, ct.cell.size = ct.cell.size)
commongenes <- intersect(rownames(sc.basis$basis.mvw), rownames(bulk.eset))
if (length(commongenes) < 0.2 * min(dim(sc.eset)[1], dim(bulk.eset)[1])) {
stop("Too few common genes!")
}
message(paste("Used", length(commongenes), "common genes..."))
if (weight.basis) {
basis.mvw <- sc.basis$basis.mvw[commongenes, ct.sub]
}
else {
basis.mvw <- sc.basis$basis[commongenes, ct.sub]
}
# link to bisqueRNA, bulk transformation method. https://github.com/cozygene/bisque
if (Transform_bisque) {
GenerateSCReference <- function(sc.eset, ct.sub) {
cell.labels <- base::factor(sc.eset[[ct.sub]])
all.cell.types <- base::levels(cell.labels)
aggr.fn <- function(ct.sub) {
base::rowMeans(Biobase::exprs(sc.eset)[,cell.labels == ct.sub, drop=F])
}
template <- base::numeric(base::nrow(sc.eset))
sc.ref <- base::vapply(all.cell.types, aggr.fn, template)
return(sc.ref)
}
sc.ref <- GenerateSCReference(sc.eset, cell.types)[genes, , drop = F]
ncount <- table(sc.eset@phenoData@data[, sample], sc.eset@phenoData@data[, ct.varname])
true.prop <- ncount/rowSums(ncount, na.rm = T)
sc.props <- round(true.prop[complete.cases(true.prop), ], 2)
Y.train <- sc.ref %*% t(sc.props[, colnames(sc.ref)])
dim(Y.train)
X.pred <- exprs(bulk.eset)[commongenes, ]
sample.names <- base::colnames(Biobase::exprs(bulk.eset))
template <- base::numeric(base::length(sample.names))
base::names(template) <- sample.names
SemisupervisedTransformBulk <- function(gene, Y.train, X.pred) {
Y.train.scaled <- base::scale(Y.train[gene, , drop = T])
Y.center <- base::attr(Y.train.scaled, "scaled:center")
Y.scale <- base::attr(Y.train.scaled, "scaled:scale")
n <- base::length(Y.train.scaled)
shrink.scale <- base::sqrt(base::sum((Y.train[gene, , drop = T] - Y.center)^2)/n + 1)
X.pred.scaled <- base::scale(X.pred[gene, , drop = T])
Y.pred <- base::matrix((X.pred.scaled * shrink.scale) +
Y.center, dimnames = base::list(base::colnames(X.pred),
gene))
return(Y.pred)
}
Y.pred <- base::matrix(base::vapply(X = commongenes,
FUN = SemisupervisedTransformBulk, FUN.VALUE = template,
Y.train, X.pred, USE.NAMES = TRUE), nrow = base::length(sample.names))
indices <- base::apply(Y.pred, MARGIN = 2, FUN = function(column) {
base::anyNA(column)
})
if (base::any(indices)) {
if (sum(!indices) == 0) {
base::stop("Zero genes left for decomposition.")
}
Y.pred <- Y.pred[, !indices, drop = F]
sc.ref <- sc.ref[!indices, , drop = F]
}
results <- base::as.matrix(base::apply(Y.pred, 1, function(b) {
sol <- lsei::pnnls(sc.ref, b, sum = 1)
return(sol$x)
}))
prop.est.mvw <- t(results)
colnames(prop.est.mvw) <- colnames(sc.ref)
rownames(prop.est.mvw) <- colnames(bulk.eset)
yhat <- sc.ref %*% results
colnames(yhat) <- colnames(bulk.eset)
yobs <- exprs(bulk.eset)
yeval <- SCDC_yeval(y = yobs, yest = yhat, yest.names = c("SCDC"))
peval <- NULL
if (!is.null(truep)) {
peval <- SCDC_peval(ptrue = truep, pest = prop.est.mvw,
pest.names = c("SCDC"), select.ct = ct.sub)
}
} else {
xbulk <- getCPM0(exprs(bulk.eset)[commongenes, ])
sigma <- sc.basis$sigma[commongenes, ct.sub]
ALS.S <- sc.basis$sum.mat[ct.sub]
N.bulk <- ncol(bulk.eset)
valid.ct <- (colSums(is.na(sigma)) == 0) & (colSums(is.na(basis.mvw)) ==
0) & (!is.na(ALS.S))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution..."))
basis.mvw <- basis.mvw[, valid.ct]
ALS.S <- ALS.S[valid.ct]
sigma <- sigma[, valid.ct]
prop.est.mvw <- NULL
yhat <- NULL
yhatgene.temp <- rownames(basis.mvw)
for (i in 1:N.bulk) {
basis.mvw.temp <- basis.mvw
xbulk.temp <- xbulk[, i]*100
sigma.temp <- sigma
message(paste(colnames(xbulk)[i], "has common genes",
sum(xbulk[, i] != 0), "..."))
lm <- nnls::nnls(A = basis.mvw.temp, b = xbulk.temp)
delta <- lm$residuals
wt.gene <- 1/(nu + delta^2 + colSums((lm$x * ALS.S)^2 *
t(sigma.temp)))
x.wt <- xbulk.temp * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.temp, 1, sqrt(wt.gene), "*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
prop.wt <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max) {
wt.gene <- 1/(nu + delta^2 + colSums((lm.wt$x * ALS.S)^2 *
t(sigma.temp)))
x.wt <- xbulk.temp * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.temp, 1, sqrt(wt.gene), "*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
delta.new <- lm.wt$residuals
prop.wt.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt.new - prop.wt)) < epsilon) {
prop.wt <- prop.wt.new
delta <- delta.new
R2 <- 1 - var(xbulk.temp - basis.mvw.temp %*%
as.matrix(lm.wt$x))/var(xbulk.temp)
message("WNNLS Converged at iteration ",
iter)
break
}
prop.wt <- prop.wt.new
delta <- delta.new
}
R2 <- 1 - var(xbulk.temp - basis.mvw.temp %*% as.matrix(lm.wt$x))/var(xbulk.temp)
prop.est.mvw <- rbind(prop.est.mvw, prop.wt)
yhat.temp <- basis.mvw.temp %*% as.matrix(lm.wt$x)
yhatgene.temp <- intersect(rownames(yhat.temp), yhatgene.temp)
yhat <- cbind(yhat[yhatgene.temp, ], yhat.temp[yhatgene.temp,
])
}
colnames(prop.est.mvw) <- colnames(basis.mvw)
rownames(prop.est.mvw) <- colnames(xbulk)
colnames(yhat) <- colnames(xbulk)
yobs <- exprs(bulk.eset)
yeval <- SCDC_yeval(y = yobs, yest = yhat, yest.names = c("SCDC"))
peval <- NULL
if (!is.null(truep)) {
peval <- SCDC_peval(ptrue = truep, pest = prop.est.mvw,
pest.names = c("SCDC"), select.ct = ct.sub)
}
}
return(list(prop.est.mvw = prop.est.mvw, basis.mvw = basis.mvw,
yhat = yhat, yeval = yeval, peval = peval))
}
############################################
#' Proportion estimation function for one-subject case
#' @description Proportion estimation function for one-subject case
#' @name SCDC_prop_ONE
#' @param bulk.eset ExpressionSet object for bulk samples
#' @param sc.eset ExpressionSet object for single cell samples
#' @param ct.varname variable name for 'cell types'
#' @param sample variable name for subject/samples
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param truep true cell-type proportions for bulk samples if known
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @return Estimated proportion, basis matrix, predicted gene expression levels for bulk samples
#' @export
SCDC_prop_ONE <- function (bulk.eset, sc.eset, ct.varname, sample, truep = NULL,
ct.sub, iter.max = 2000, nu = 1e-10, epsilon = 0.01, weight.basis = T,
ct.cell.size = NULL,
...) {
bulk.eset <- bulk.eset[rowSums(exprs(bulk.eset)) > 0, , drop = FALSE]
sc.basis <- SCDC_basis_ONE(x = sc.eset, ct.sub = ct.sub,
ct.varname = ct.varname, sample = sample, ct.cell.size = ct.cell.size)
if (weight.basis) {
basis <- sc.basis$basis.mvw
}
else {
basis <- sc.basis$basis
}
commongenes <- intersect(rownames(basis), rownames(bulk.eset))
if (length(commongenes) < 0.2 * min(dim(sc.eset)[1], dim(bulk.eset)[1])) {
stop("Too few common genes!")
}
message(paste("Used", length(commongenes), "common genes..."))
basis.mvw <- basis[commongenes, ct.sub]
xbulk <- getCPM0(exprs(bulk.eset)[commongenes, ])
ALS.S <- sc.basis$sum.mat[ct.sub]
N.bulk <- ncol(bulk.eset)
valid.ct <- (colSums(is.na(basis.mvw)) == 0) & (!is.na(ALS.S))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution..."))
basis.mvw <- basis.mvw[, valid.ct]
ALS.S <- ALS.S[valid.ct]
prop.est.mvw <- NULL
yhat <- NULL
yhatgene.temp <- rownames(basis.mvw)
for (i in 1:N.bulk) {
xbulk.temp <- xbulk[, i]
message(paste(colnames(xbulk)[i], "has common genes",
sum(xbulk[, i] != 0), "..."))
lm <- nnls::nnls(A = basis.mvw, b = xbulk.temp)
delta <- lm$residuals
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp * sqrt(wt.gene)
b.wt <- sweep(basis.mvw, 1, sqrt(wt.gene), "*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
prop.wt <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max) {
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp * sqrt(wt.gene)
b.wt <- sweep(basis.mvw, 1, sqrt(wt.gene), "*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
delta.new <- lm.wt$residuals
prop.wt.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt.new - prop.wt)) < epsilon) {
prop.wt <- prop.wt.new
delta <- delta.new
message("WNNLS Converged at iteration ",
iter)
break
}
prop.wt <- prop.wt.new
delta <- delta.new
}
prop.est.mvw <- rbind(prop.est.mvw, prop.wt)
yhat.temp <- basis.mvw %*% as.matrix(lm.wt$x)
yhatgene.temp <- intersect(rownames(yhat.temp), yhatgene.temp)
yhat <- cbind(yhat[yhatgene.temp, ], yhat.temp[yhatgene.temp,
])
}
colnames(prop.est.mvw) <- colnames(basis.mvw)
rownames(prop.est.mvw) <- colnames(bulk.eset)
colnames(yhat) <- colnames(bulk.eset)
yobs <- exprs(bulk.eset)
yeval <- SCDC_yeval(y = yobs, yest = yhat, yest.names = c("SCDC"))
peval <- NULL
if (!is.null(truep)) {
if (all(rownames(truep) == rownames(prop.est.mvw))){
peval <- SCDC_peval(ptrue = truep, pest = prop.est.mvw,
pest.names = c("SCDC"), select.ct = ct.sub)
} else {
message("Your input sample names for proportion matrix and bulk.eset do not match! Please make sure sample names match.")
}
}
return(list(prop.est.mvw = prop.est.mvw, basis.mvw = basis.mvw,
yhat = yhat, yeval = yeval, peval = peval))
}
############################################
#' Tree-guided proportion estimation
#' @description Proportion estimation function for multi-subject case, and apply tree-guided deconvolution
#' @name SCDC_prop_subcl_marker
#' @param bulk.eset ExpressionSet object for bulk samples
#' @param sc.eset ExpressionSet object for single cell samples
#' @param ct.varname variable name for 'cell types'
#' @param fl.varname variable name for first-level 'meta-clusters'
#' @param sample variable name for subject/samples
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param ct.fl.sub 'cell types' for first-level 'meta-clusters'
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param weight.basis logical, use basis matrix adjusted by MVW, default is T.
#' @param select.marker logical, select marker genes to perform deconvolution in tree-guided steps. Default is T.
#' @param markers A set of marker gene that input manully to be used in deconvolution. If NULL, then
#' @param marker.varname variable name of cluster groups when selecting marker genes. If NULL, then use ct.varname.
#' @param allgenes.fl logical, use all genes in the first-level deconvolution
#' @param pseudocount.use a constant number used when selecting marker genes, default is 1.
#' @param LFC.lim a threshold of log fold change when selecting genes as input to perform Wilcoxon's test.
#' @param truep true cell-type proportions for bulk samples if known
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @param fl.cell.size default is NULL, similar to ct.cell.size. This is for first-level 'meta-clusters'.
#' @return Estimated proportion, basis matrix, predicted gene expression levels for bulk samples
#' @export
SCDC_prop_subcl_marker <- function (bulk.eset, sc.eset, ct.varname, fl.varname, sample,
ct.sub = NULL, ct.fl.sub, iter.max = 3000, nu = 1e-04, epsilon = 0.001,
weight.basis = T, truep = NULL, select.marker = T, markers = NULL,
marker.varname = NULL, allgenes.fl = F, pseudocount.use = 1,
LFC.lim = 0.5, ct.cell.size = NULL, fl.cell.size = NULL, ...)
{
if (is.null(ct.sub)) {
ct.sub <- unique(sc.eset@phenoData@data[, ct.varname])[!is.na(unique(sc.eset@phenoData@data[,
ct.varname]))]
}
ct.sub <- ct.sub[!is.na(ct.sub)]
ct.fl.sub <- ct.fl.sub[!is.na(ct.fl.sub)]
bulk.eset <- bulk.eset[rowSums(exprs(bulk.eset)) > 0, , drop = FALSE]
sc.eset <- sc.eset[, sc.eset@phenoData@data[, ct.varname] %in%
ct.sub]
sc.basis <- SCDC_basis(x = sc.eset, ct.sub = ct.sub, ct.varname = ct.varname,
sample = sample, ct.cell.size = ct.cell.size)
sc.fl.basis <- SCDC_basis(x = sc.eset, ct.sub = ct.fl.sub[!is.na(ct.fl.sub)],
ct.varname = fl.varname, sample = sample, ct.cell.size = fl.cell.size)
if (select.marker) {
if (is.null(marker.varname)) {
marker.varname <- ct.varname
}
countmat <- exprs(sc.eset)
ct.group <- sc.eset@phenoData@data[, marker.varname]
markers.wilcox <- NULL
for (u in 1:length(unique(ct.group))) {
ct.group.temp <- ct.group == unique(ct.group)[u]
group.1 <- apply(X = countmat[, ct.group.temp], MARGIN = 1,
FUN = function(x) log(x = mean(x = expm1(x = x)) +
pseudocount.use))
group.2 <- apply(X = countmat[, !ct.group.temp],
MARGIN = 1, FUN = function(x) log(x = mean(x = expm1(x = x)) +
pseudocount.use))
genes.diff <- rownames(sc.eset)[(group.1 - group.2) >
LFC.lim]
count.use <- countmat[rownames(sc.eset) %in% genes.diff,
]
p_val <- sapply(1:nrow(count.use), function(x) {
wilcox.test(count.use[x, ] ~ ct.group.temp)$p.value
})
p_val_adj <- p.adjust(p = p_val, method = "bonferroni",
n = nrow(count.use))
markers.temp <- rownames(count.use)[p_val_adj < 0.05]
markers.wilcox <- c(markers.wilcox, markers.temp)
}
markers <- unique(markers.wilcox)
message("Selected ", length(markers), " marker genes by Wilcoxon test...")
}
if (weight.basis) {
basis <- sc.basis$basis.mvw
basis.fl <- sc.fl.basis$basis.mvw
}
else {
basis <- sc.basis$basis
basis.fl <- sc.fl.basis$basis
}
if (!is.null(markers)) {
commongenes <- Reduce(intersect, list(rownames(basis),
rownames(bulk.eset), markers))
commongenes.fl <- Reduce(intersect, list(rownames(basis.fl),
rownames(bulk.eset), markers))
}
else {
commongenes <- intersect(rownames(basis), rownames(bulk.eset))
commongenes.fl <- intersect(rownames(basis.fl), rownames(bulk.eset))
if (length(commongenes) < 0.2 * min(dim(sc.eset)[1],
dim(bulk.eset)[1])) {
stop("Too few common genes!")
}
}
message(paste("Used", length(commongenes), "common genes for all cell types, \n",
"Used", length(commongenes.fl), "common genes for first level cell types..."))
basis.mvw <- basis[commongenes, ct.sub]
basis.mvw.fl <- basis.fl[commongenes.fl, ct.fl.sub]
xbulk0 <- getCPM0(exprs(bulk.eset)[commongenes, ])
xbulk <- as.matrix(xbulk0)
colnames(xbulk) <- colnames(bulk.eset)
xbulk1 <- getCPM0(exprs(bulk.eset)[commongenes.fl, ])
xbulk.fl <- as.matrix(xbulk1)
ALS.S <- sc.basis$sum.mat[ct.sub]
N.bulk <- ncol(bulk.eset)
valid.ct <- (colSums(is.na(basis.mvw)) == 0) & (!is.na(ALS.S))
ALS.S.fl <- sc.fl.basis$sum.mat[ct.fl.sub]
valid.ct.fl <- (colSums(is.na(basis.mvw.fl)) == 0) & (!is.na(ALS.S.fl))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution...\n",
"Used", sum(valid.ct.fl), "first level cell types ..."))
basis.mvw <- basis.mvw[, valid.ct]
ALS.S <- ALS.S[valid.ct]
basis.mvw.fl <- basis.mvw.fl[, valid.ct.fl]
ALS.S.fl <- ALS.S[valid.ct.fl]
prop.est <- NULL
rsquared <- NULL
for (i in 1:N.bulk) {
xbulk.temp <- xbulk[, i]
message(paste(colnames(xbulk)[i], "has common genes",
sum(xbulk[, i] != 0), "..."))
if (allgenes.fl) {
markers.fl <- names(xbulk.temp)
}
else {
markers.fl <- Reduce(intersect, list(markers, names(xbulk.temp)))
}
lm <- nnls::nnls(A = basis.mvw.fl[markers.fl, ], b = xbulk.temp[markers.fl])
delta <- lm$residuals
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp[markers.fl] * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.fl[markers.fl, ], 1, sqrt(wt.gene),
"*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
prop.wt.fl <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max) {
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp[markers.fl] * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.fl[markers.fl, ], 1, sqrt(wt.gene),
"*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
delta.new <- lm.wt$residuals
prop.wt.fl.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt.fl.new - prop.wt.fl)) < epsilon) {
prop.wt.fl <- prop.wt.fl.new
delta <- delta.new
message("WNNLS for First level clusters Converged at iteration ",
iter)
break
}
prop.wt.fl <- prop.wt.fl.new
delta <- delta.new
}
names(prop.wt.fl) <- colnames(basis.mvw.fl)
rt <- table(sc.eset@phenoData@data[, ct.varname], sc.eset@phenoData@data[,
fl.varname])
rt <- rt[, ct.fl.sub]
rt.list <- list()
prop.wt <- NULL
for (j in 1:ncol(rt)) {
rt.list[[j]] <- rownames(rt)[rt[, j] > 0]
names(rt.list)[j] <- colnames(rt)[j]
sub.cl <- rownames(rt)[rt[, j] > 0]
if (length(sub.cl) > 1 & prop.wt.fl[colnames(rt)[j]] >
0) {
if (is.null(dim(prop.wt.fl))) {
xbulk.j <- basis.mvw.fl[, j] * prop.wt.fl[j] +
(xbulk.temp - basis.mvw.fl %*% lm.wt$x) *
prop.wt.fl[j]
}
else {
xbulk.j <- basis.mvw.fl[, j] * prop.wt.fl[,
j] + (xbulk.temp - basis.mvw.fl %*% lm.wt$x) *
prop.wt.fl[, j]
}
markers.sl <- Reduce(intersect, list(markers,
rownames(xbulk.j)))
basis.sl <- basis.mvw[markers.sl, rownames(rt)[rt[,
j] > 0]]
lm.sl <- nnls::nnls(A = basis.sl, b = xbulk.j[markers.sl,
])
delta.sl <- lm.sl$residuals
wt.gene.sl <- 1/(nu + delta.sl^2)
x.wt.sl <- xbulk.j[markers.sl, ] * sqrt(wt.gene.sl)
b.wt.sl <- sweep(basis.sl, 1, sqrt(wt.gene.sl),
"*")
lm.wt.sl <- nnls::nnls(A = b.wt.sl, b = x.wt.sl)
prop.wt.sl <- lm.wt.sl$x/sum(lm.wt.sl$x)
delta.sl <- lm.wt.sl$residuals
for (iter in 1:iter.max) {
wt.gene.sl <- 1/(nu + delta.sl^2)
x.wt.sl <- xbulk.j[markers.sl, ] * sqrt(wt.gene.sl)
b.wt.sl <- sweep(basis.sl, 1, sqrt(wt.gene.sl),
"*")
lm.wt.sl <- nnls::nnls(A = b.wt.sl, b = x.wt.sl)
delta.sl.new <- lm.wt.sl$residuals
prop.wt.sl.new <- lm.wt.sl$x/sum(lm.wt.sl$x)
if (sum(abs(prop.wt.sl.new - prop.wt.sl)) <
epsilon) {
prop.wt.sl <- prop.wt.sl.new
delta.sl <- delta.sl.new
cat("WNNLS for Second level clusters",
rownames(rt)[rt[, j] > 0], "Converged at iteration ",
iter)
break
}
prop.wt.sl <- prop.wt.sl.new
delta.sl <- delta.sl.new
}
names(prop.wt.sl) <- sub.cl
prop.wt <- c(prop.wt, prop.wt.sl * prop.wt.fl[colnames(rt)[j]])
}
else if (length(sub.cl) == 1) {
prop.wt <- c(prop.wt, prop.wt.fl[colnames(rt)[j]])
}
else if (length(sub.cl) > 1 & prop.wt.fl[colnames(rt)[j]] ==
0) {
prop.wt.sl <- rep(0, length(sub.cl))
names(prop.wt.sl) <- sub.cl
prop.wt <- c(prop.wt, prop.wt.sl)
}
}
prop.est <- rbind(prop.est, prop.wt)
}
rownames(prop.est) <- colnames(bulk.eset)
peval <- NULL
if (!is.null(truep)) {
peval <- SCDC_peval(ptrue = truep, pest = prop.est, pest.names = c("SCDC"),
select.ct = ct.sub)
}
# calculate yhat after deconv
yhat <- sc.basis$basis.mvw %*% t(prop.est)[colnames(sc.basis$basis.mvw),]
return(list(prop.est = prop.est, prop.wt.fl = prop.wt.fl,
basis.mvw = basis.mvw, peval = peval, sc.basis = sc.basis,
sc.fl.basis = sc.fl.basis, yhat = yhat))
}
############################################
#' Tree-guided proportion estimation for ONE subject
#' @description Proportion estimation function for ONE-subject case, and apply tree-guided deconvolution
#' @name SCDC_prop_ONE_subcl_marker
#' @param bulk.eset ExpressionSet object for bulk samples
#' @param sc.eset ExpressionSet object for single cell samples
#' @param ct.varname variable name for 'cell types'
#' @param fl.varname variable name for first-level 'meta-clusters'
#' @param sample variable name for subject/samples
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param ct.fl.sub 'cell types' for first-level 'meta-clusters'
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param weight.basis logical, use basis matrix adjusted by MVW, default is T.
#' @param select.marker logical, select marker genes to perform deconvolution in tree-guided steps. Default is T.
#' @param markers A set of marker gene that input manully to be used in deconvolution. If NULL, then
#' @param marker.varname variable name of cluster groups when selecting marker genes. If NULL, then use ct.varname.
#' @param allgenes.fl logical, use all genes in the first-level deconvolution
#' @param pseudocount.use a constant number used when selecting marker genes, default is 1.
#' @param LFC.lim a threshold of log fold change when selecting genes as input to perform Wilcoxon's test.
#' @param truep true cell-type proportions for bulk samples if known
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @param fl.cell.size default is NULL, similar to ct.cell.size. This is for first-level 'meta-clusters'.
#' @return Estimated proportion, basis matrix, predicted gene expression levels for bulk samples
#' @export
SCDC_prop_ONE_subcl_marker <- function(bulk.eset, sc.eset, ct.varname, fl.varname, sample, truep = NULL,
ct.sub = NULL, ct.fl.sub, iter.max = 3000, nu = 1e-04, epsilon = 0.001,
weight.basis = F, bulk_disease = NULL, select.marker = T, markers = NULL, marker.varname = NULL,
pseudocount.use = 1, LFC.lim = 0.5, allgenes.fl = F, ct.cell.size = NULL, fl.cell.size = NULL,
...)
{
if (is.null(ct.sub)){
ct.sub <- unique(sc.eset@phenoData@data[,ct.varname])[!is.na(unique(sc.eset@phenoData@data[,ct.varname]))]
}
ct.sub <- ct.sub[!is.na(ct.sub)]
ct.fl.sub <- ct.fl.sub[!is.na(ct.fl.sub)]
bulk.eset <- bulk.eset[rowSums(exprs(bulk.eset))>0, , drop = FALSE]
sc.basis <- SCDC_basis_ONE(x = sc.eset, ct.sub = ct.sub, ct.varname = ct.varname, sample = sample, ct.cell.size = ct.cell.size)
sc.fl.basis <- SCDC_basis_ONE(x = sc.eset, ct.sub = ct.fl.sub[!is.na(ct.fl.sub)],
ct.varname = fl.varname, sample = sample, ct.cell.size = fl.cell.size)
if (select.marker){
if (is.null(marker.varname)){
marker.varname <- ct.varname
}
# wilcox test on two groups of cells for marker gene selection... (refer to seurat::FindMarkers)
countmat <- exprs(sc.eset)
ct.group <- sc.eset@phenoData@data[,marker.varname]
markers.wilcox <- NULL
# u=1
for(u in 1:length(unique(ct.group))){
ct.group.temp <- ct.group == unique(ct.group)[u]
group.1 <- apply(X = countmat[,ct.group.temp],
MARGIN = 1, FUN = function(x) log(x = mean(x = expm1(x = x)) +
pseudocount.use))
group.2 <- apply(X = countmat[,! ct.group.temp],
MARGIN = 1, FUN = function(x) log(x = mean(x = expm1(x = x)) +
pseudocount.use))
genes.diff <- rownames(sc.eset)[(group.1 - group.2) > LFC.lim]
count.use <- countmat[rownames(sc.eset) %in% genes.diff,]
##
p_val <- sapply(1:nrow(count.use), function(x){
wilcox.test(count.use[x,] ~ ct.group.temp)$p.value
})
p_val_adj <- p.adjust(p = p_val, method = "bonferroni",
n = nrow(count.use))
markers.temp <- rownames(count.use)[p_val_adj < 0.05]
markers.wilcox <- c(markers.wilcox, markers.temp)
}
markers <- unique(markers.wilcox)
message("Selected ",length(markers), " marker genes by Wilcoxon test...")
} # else need input of marker genes for clustering
# match genes / cells first
if (weight.basis){
basis <- sc.basis$basis.mvw
basis.fl <- sc.fl.basis$basis.mvw
} else {
basis <- sc.basis$basis
basis.fl <- sc.fl.basis$basis
}
if (!is.null(markers)){
commongenes <- Reduce(intersect, list(rownames(basis), rownames(bulk.eset), markers))
commongenes.fl <- Reduce(intersect, list(rownames(basis.fl), rownames(bulk.eset), markers))
} else {
commongenes <- intersect(rownames(basis), rownames(bulk.eset))
commongenes.fl <- intersect(rownames(basis.fl), rownames(bulk.eset))
# stop when few common genes exist...
if (length(commongenes) < 0.2 * min(dim(sc.eset)[1], dim(bulk.eset)[1])){
stop('Too few common genes!')
}
}
message(paste("Used", length(commongenes), "common genes for all cell types, \n",
"Used", length(commongenes.fl), "common genes for first level cell types..."))
basis.mvw <- basis[commongenes, ct.sub]
basis.mvw.fl <- basis.fl[commongenes.fl, ct.fl.sub]
xbulk0 <- getCPM0(exprs(bulk.eset)[commongenes,])
xbulk <- as.matrix(xbulk0) ## whether to normalize all /common genes
colnames(xbulk) <- colnames(bulk.eset)
xbulk1 <- getCPM0(exprs(bulk.eset)[commongenes.fl,])
xbulk.fl <- as.matrix(xbulk1)
ALS.S <- sc.basis$sum.mat[ct.sub]
N.bulk <- ncol(bulk.eset)
valid.ct <- (colSums(is.na(basis.mvw)) == 0) & (!is.na(ALS.S))
ALS.S.fl <- sc.fl.basis$sum.mat[ct.fl.sub]
valid.ct.fl <- (colSums(is.na(basis.mvw.fl)) == 0) & (!is.na(ALS.S.fl))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution...\n",
"Used", sum(valid.ct.fl),"first level cell types ..."))
basis.mvw <- basis.mvw[, valid.ct]
ALS.S <- ALS.S[valid.ct]
basis.mvw.fl <- basis.mvw.fl[, valid.ct.fl]
ALS.S.fl <- ALS.S[valid.ct.fl]
prop.est <- NULL
rsquared <- NULL
# prop estimation for each bulk sample:
for (i in 1:N.bulk) {
# i=1
xbulk.temp <- xbulk[, i] *1e3 ## will affect a little bit
message(paste(colnames(xbulk)[i], "has common genes", sum(xbulk[, i] != 0), "..."))
if (allgenes.fl){
markers.fl <- names(xbulk.temp)
} else {
markers.fl <- Reduce(intersect, list(markers, names(xbulk.temp)))
}
# first level NNLS:
lm <- nnls::nnls(A=basis.mvw.fl[markers.fl,],b=xbulk.temp[markers.fl])
delta <- lm$residuals
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp[markers.fl] *sqrt(wt.gene)
b.wt <- sweep(basis.mvw.fl[markers.fl,],1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
prop.wt.fl <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max){
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp[markers.fl] * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.fl[markers.fl,],1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
delta.new <- lm.wt$residuals
prop.wt.fl.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt.fl.new - prop.wt.fl)) < epsilon){
prop.wt.fl <- prop.wt.fl.new
delta <- delta.new
message("WNNLS for First level clusters Converged at iteration ", iter)
break
}
prop.wt.fl <- prop.wt.fl.new
delta <- delta.new
}
names(prop.wt.fl) <- colnames(basis.mvw.fl)
# relationship between first level and overall
rt <- table(sc.eset@phenoData@data[,ct.varname], sc.eset@phenoData@data[,fl.varname])
rt <- rt[,ct.fl.sub]
rt.list <- list()
prop.wt <- NULL
# prop.wt
for (j in 1:ncol(rt)){ # for each first level cluster
# j=1
rt.list[[j]] <- rownames(rt)[rt[,j] >0]
names(rt.list)[j] <- colnames(rt)[j]
sub.cl <- rownames(rt)[rt[,j] >0]
if (length(sub.cl) > 1 & prop.wt.fl[colnames(rt)[j]] > 0) {
if (is.null(dim(prop.wt.fl))){
# specify genes in xbulk.j??? first level genes?
xbulk.j <- basis.mvw.fl[,j]*prop.wt.fl[j] + (xbulk.temp - basis.mvw.fl %*% lm.wt$x)*prop.wt.fl[j]
} else {
xbulk.j <- basis.mvw.fl[,j]*prop.wt.fl[,j] + (xbulk.temp - basis.mvw.fl %*% lm.wt$x)*prop.wt.fl[,j]
}
markers.sl <- Reduce(intersect, list(markers, rownames(xbulk.j)))
##############################################################################
# make markers.sub as a list, for each of the first-level intra clusters.
##############################################################################
basis.sl <- basis.mvw[markers.sl,rownames(rt)[rt[,j] >0]]
lm.sl <- nnls::nnls(A=basis.sl,b=xbulk.j[markers.sl,])
delta.sl <- lm.sl$residuals
wt.gene.sl <- 1/(nu + delta.sl^2)
x.wt.sl <- xbulk.j[markers.sl,]*sqrt(wt.gene.sl)
b.wt.sl <- sweep(basis.sl,1,sqrt(wt.gene.sl),"*")
lm.wt.sl <- nnls::nnls(A=b.wt.sl, b=x.wt.sl)
prop.wt.sl <- lm.wt.sl$x/sum(lm.wt.sl$x)
delta.sl <- lm.wt.sl$residuals
for (iter in 1:iter.max){
wt.gene.sl <- 1/(nu + delta.sl^2)
x.wt.sl <- xbulk.j[markers.sl,] * sqrt(wt.gene.sl)
b.wt.sl <- sweep(basis.sl,1,sqrt(wt.gene.sl),"*")
lm.wt.sl <- nnls::nnls(A=b.wt.sl, b=x.wt.sl)
delta.sl.new <- lm.wt.sl$residuals
prop.wt.sl.new <- lm.wt.sl$x/sum(lm.wt.sl$x)
if (sum(abs(prop.wt.sl.new - prop.wt.sl)) < epsilon){
prop.wt.sl <- prop.wt.sl.new
delta.sl <- delta.sl.new
cat("WNNLS for Second level clusters",rownames(rt)[rt[,j] >0],"Converged at iteration ", iter)
break
}
prop.wt.sl <- prop.wt.sl.new
delta.sl <- delta.sl.new
}
names(prop.wt.sl) <- sub.cl
prop.wt <- c(prop.wt, prop.wt.sl*prop.wt.fl[colnames(rt)[j]])
} else if (length(sub.cl) == 1){
# j=2
prop.wt <- c(prop.wt, prop.wt.fl[colnames(rt)[j]])
} else if (length(sub.cl) > 1 & prop.wt.fl[colnames(rt)[j]] == 0){
prop.wt.sl <- rep(0, length(sub.cl))
names(prop.wt.sl) <- sub.cl
prop.wt <- c(prop.wt, prop.wt.sl)
}
}
prop.est <- rbind(prop.est, prop.wt)
}
rownames(prop.est) <- colnames(bulk.eset)
peval <- NULL
if (!is.null(truep)){
peval <- SCDC_eval(ptrue= truep, pest = prop.est, pest.names = c('SCDC'),
dtname = 'Perou', select.ct = ct.sub, bulk_obj = bulk.eset,
bulk_disease = bulk_disease)
}
return(list(prop.est = prop.est, prop.wt.fl = prop.wt.fl, basis.mvw = basis.mvw, peval = peval,
sc.basis = sc.basis, sc.fl.basis = sc.fl.basis))
}
| /R/Deconvolution.R | no_license | hzongyao/SCDC | R | false | false | 53,115 | r | #######################################
####### DECONVOLUTION FUNCTIONS #######
#######################################
############################################
#' Basis Matrix
#' @description Basis matrix construction
#' @name SCDC_basis
#' @param x ExpressionSet object for single cells
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param ct.varname variable name for 'cell types'
#' @param sample variable name for subject/samples
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @return a list of basis matrix, sum of cell-type-specific library size, sample variance matrix, basis matrix by mvw, mvw matrix.
#' @export
SCDC_basis <- function(x, ct.sub = NULL, ct.varname, sample, ct.cell.size = NULL){
# select only the subset of cell types of interest
if (is.null(ct.sub)){
ct.sub <- unique(x@phenoData@data[,ct.varname])
}
ct.sub <- ct.sub[!is.na(ct.sub)]
x.sub <- x[,x@phenoData@data[,ct.varname] %in% ct.sub]
# qc: remove non-zero genes
x.sub <- x.sub[rowSums(exprs(x.sub)) > 0,]
# calculate sample mean & sample variance matrix: genes by cell types
countmat <- exprs(x.sub)
ct.id <- droplevels(as.factor(x.sub@phenoData@data[,ct.varname]))
sample.id <- as.character(x.sub@phenoData@data[,sample])
ct_sample.id <- paste(ct.id,sample.id, sep = '%')
mean.mat <- sapply(unique(ct_sample.id), function(id){
y = as.matrix(countmat[, ct_sample.id %in% id])
apply(y,1,sum, na.rm = TRUE)/sum(y)
})
mean.id <- do.call('rbind',strsplit(unique(ct_sample.id), split = '%'))
sigma <- sapply(unique(mean.id[,1]), function(id){
y = mean.mat[,mean.id[,1] %in% id]
apply(y,1,var, na.rm = TRUE)
})
sum.mat2 <- sapply(unique(sample.id), function(sid){
sapply(unique(ct.id), function(id){
y = as.matrix(countmat[, ct.id %in% id & sample.id %in% sid])
sum(y)/ncol(y)
})
})
rownames(sum.mat2) <- unique(ct.id)
colnames(sum.mat2) <- unique(sample.id)
# library size factor calculated from the samples:
if (is.null(ct.cell.size)){
sum.mat <- rowMeans(sum.mat2, na.rm = T)
} else {
if (is.null(names(ct.cell.size))){
message("Cell size factor vector requires cell type names...")
break
} else {
sum.mat <- ct.cell.size
}
}
basis <- sapply(unique(mean.id[,1]), function(id){
z <- sum.mat[mean.id[,1]]
mean.mat.z <- t(t(mean.mat)*z)
y = as.matrix(mean.mat.z[,mean.id[,1] %in% id])
apply(y,1,mean, na.rm = TRUE)
})
# weighted basis matrix
my.max <- function(x,...){
y <- apply(x,1,max, na.rm = TRUE)
y / median(y, na.rm = T)
}
# MATCH DONOR, CELLTYPE, GENES!!!!!!!!!!!!!!!!
var.adj <- sapply(unique(sample.id), function(sid) {
my.max(sapply(unique(ct.id), function(id) {
y = countmat[, ct.id %in% id & sample.id %in% sid,
drop = FALSE]
apply(y,1,var, na.rm=T)
}), na.rm = T)
})
colnames(var.adj) <- unique(sample.id)
q15 <- apply(var.adj,2,quantile, probs = 0.15, na.rm =T)
q85 <- apply(var.adj,2,quantile, probs = 0.85, na.rm =T)
var.adj.q <- t(apply(var.adj, 1,
function(y){y[y<q15] <- q15[y<q15]
y[y>q85] <- q85[y>q85]
return(y)})) + 1e-4
message("Creating Basis Matrix adjusted for maximal variance weight")
mean.mat.mvw <- sapply(unique(ct_sample.id), function(id){
sid = unlist(strsplit(id,'%'))[2]
y = as.matrix(countmat[, ct_sample.id %in% id])
yy = sweep(y, 1, sqrt(var.adj.q[,sid]), '/')
apply(yy,1,sum, na.rm = TRUE)/sum(yy)
})
basis.mvw <- sapply(unique(mean.id[,1]), function(id){
z <- sum.mat[mean.id[,1]]
mean.mat.z <- t(t(mean.mat.mvw)*z)
y = as.matrix(mean.mat.z[,mean.id[,1] %in% id])
apply(y,1,mean, na.rm = TRUE)
})
# reorder columns
basis.mvw <- basis.mvw[,ct.sub]
sigma <- sigma[, ct.sub]
basis <- basis[, ct.sub]
sum.mat <- sum.mat[ct.sub]
return(list(basis = basis, sum.mat = sum.mat,
sigma = sigma, basis.mvw = basis.mvw, var.adj.q = var.adj.q))
}
#############################################
#' Basis matrix for single cells from one subject
#' @description Basis matrix construction for single cells from one subject
#' @name SCDC_basis_ONE
#' @param x ExpressionSet object for single cells
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param ct.varname variable name for 'cell types'
#' @param sample variable name for subject/samples
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @return a list of basis matrix, sum of cell-type-specific library size, sample variance matrix, basis matrix by mvw, mvw matrix.
#' @export
SCDC_basis_ONE <- function(x , ct.sub = NULL, ct.varname, sample, ct.cell.size = NULL){
# select only the subset of cell types of interest
if (is.null(ct.sub)){
ct.sub <- unique(x@phenoData@data[,ct.varname])[!is.na(unique(x@phenoData@data[,ct.varname]))]
}
ct.sub <- ct.sub[!is.na(ct.sub)]
x.sub <- x[,x@phenoData@data[,ct.varname] %in% ct.sub]
# qc: remove non-zero genes
x.sub <- x.sub[rowSums(exprs(x.sub)) > 0,]
# calculate sample mean & sample variance matrix: genes by cell types
countmat <- exprs(x.sub)
# ct.id <- droplevels(as.factor(x.sub@phenoData@data[,ct.varname]))
ct.id <- x.sub@phenoData@data[,ct.varname]
sample.id <- x.sub@phenoData@data[,sample]
ct_sample.id <- paste(ct.id,sample.id, sep = '%')
mean.mat <- sapply(unique(ct_sample.id), function(id){
y = as.matrix(countmat[, ct_sample.id %in% id])
apply(y,1,sum, na.rm = TRUE)/sum(y)
})
mean.id <- do.call('rbind',strsplit(unique(ct_sample.id), split = '%'))
# by subj, then take avg????
sum.mat2 <- sapply(unique(sample.id), function(sid){
sapply(unique(ct.id), function(id){
y = as.matrix(countmat[, ct.id %in% id & sample.id %in% sid])
sum(y)/ncol(y)
})
})
rownames(sum.mat2) <- unique(ct.id)
colnames(sum.mat2) <- unique(sample.id)
# sum.mat <- rowMeans(sum.mat2, na.rm = T)
if (is.null(ct.cell.size)){
sum.mat <- rowMeans(sum.mat2, na.rm = T)
} else {
if (is.null(names(ct.cell.size))){
message("Cell size factor vector requires cell type names...")
break
} else {
sum.mat <- ct.cell.size
}
}
basis <- sapply(unique(mean.id[,1]), function(id){
z <- sum.mat[mean.id[,1]]
mean.mat.z <- t(t(mean.mat)*z)
# id = unique(mean.id[,1])[1]
y = as.matrix(mean.mat.z[,mean.id[,1] %in% id])
apply(y,1,mean, na.rm = TRUE)
})
# weighted basis matrix
my.max <- function(x,...){
y <- apply(x,1,max, na.rm = TRUE)
y / median(y, na.rm = T)
}
# MATCH DONOR, CELLTYPE, GENES!!!!!!!!!!!!!!!!
var.adj <- sapply(unique(sample.id), function(sid) {
my.max(sapply(unique(ct.id), function(id) {
y = countmat[, ct.id %in% id & sample.id %in% sid,
drop = FALSE]
apply(y,1,var, na.rm=T)
}), na.rm = T)
})
colnames(var.adj) <- unique(sample.id)
q15 <- apply(var.adj,2,quantile, probs = 0.15, na.rm =T)
q85 <- apply(var.adj,2,quantile, probs = 0.85, na.rm =T)
var.adj.q <- as.matrix(apply(var.adj, 1,
function(y){y[y<q15] <- q15[y<q15]
y[y>q85] <- q85[y>q85]
return(y)}) + 1e-4)
message("Creating Basis Matrix adjusted for maximal variance weight")
mean.mat.mvw <- sapply(unique(ct_sample.id), function(id){
y = as.matrix(countmat[, ct_sample.id %in% id])
yy = sweep(y, 1, sqrt(var.adj.q), '/')
apply(yy,1,sum, na.rm = TRUE)/sum(yy)
})
basis.mvw <- sapply(unique(mean.id[,1]), function(id){
z <- sum.mat[mean.id[,1]]
mean.mat.z <- t(t(mean.mat.mvw)*z)
y = as.matrix(mean.mat.z[,mean.id[,1] %in% id])
apply(y,1,mean, na.rm = TRUE)
})
# reorder columns
basis.mvw <- basis.mvw[,ct.sub]
sigma <- NULL # in the one subject case, no variance is calculated.
basis <- basis[, ct.sub]
sum.mat <- sum.mat[ct.sub]
return(list(basis = basis, sum.mat = sum.mat,
sigma = sigma, basis.mvw = basis.mvw, var.adj.q = var.adj.q))
}
#################################
#' Clustering QC
#' @description Single cells Clustering QC
#' @name SCDC_qc
#' @import pheatmap
#' @param sc.eset ExpressionSet object for single cells
#' @param ct.varname variable name for 'cell type'
#' @param sample variable name for subject/sample
#' @param scsetname the name for the single cell dataset
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param arow annotation of rows for pheatmap
#' @param qcthreshold the probability threshold used to filter out questionable cells
#' @param generate.figure logical. If generate the heatmap by pheatmap or not. default is TRUE.
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @return a list including: 1) a probability matrix for each single cell input; 2) a clustering QCed ExpressionSet object; 3) a heatmap of QC result.
#' @export
SCDC_qc <- function (sc.eset, ct.varname, sample, scsetname = "Single Cell",
ct.sub, iter.max = 1000, nu = 1e-04, epsilon = 0.01, arow =NULL,
qcthreshold = 0.7, generate.figure = T, ct.cell.size = NULL,
cbPalette = c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7"),
...) {
sc.basis = SCDC_basis(x = sc.eset, ct.sub = ct.sub, ct.varname = ct.varname, sample = sample, ct.cell.size = ct.cell.size)
M.S <- sc.basis$sum.mat[ct.sub]
xsc <- getCPM0(exprs(sc.eset)[rownames(sc.basis$basis.mvw),])
N.sc <- ncol(xsc)
m.basis <- sc.basis$basis.mvw[, ct.sub]
sigma <- sc.basis$sigma[, ct.sub]
valid.ct <- (colSums(is.na(sigma)) == 0) & (colSums(is.na(m.basis)) ==
0) & (!is.na(M.S))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution..."))
m.basis <- m.basis[, valid.ct]
M.S <- M.S[valid.ct]
sigma <- sigma[, valid.ct]
prop.qc <- NULL
for (i in 1:N.sc) {
message("Begin iterative weighted estimation...")
basis.temp <- m.basis
xsc.temp <- xsc[, i]
sigma.temp <- sigma
### weighting scheme:
lm.qc <- nnls::nnls(A=basis.temp,b=xsc.temp)
delta <- lm.qc$residuals
wt.gene <- 1/(nu + delta^2 + colSums((lm.qc$x)^2*t(sigma.temp)))
x.wt <- xsc.temp*sqrt(wt.gene)
b.wt <- sweep(basis.temp,1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
prop.wt <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max){
wt.gene <- 1/(nu + delta^2 + colSums((lm.wt$x)^2*t(sigma.temp)))
x.wt <- xsc.temp*sqrt(wt.gene)
b.wt <- sweep(basis.temp,1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
delta.new <- lm.wt$residuals
prop.wt.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt - prop.wt.new) < epsilon )){
prop.wt <- prop.wt.new
delta <- delta.new
message("Converged at iteration ", iter)
break
}
prop.wt <- prop.wt.new
delta <- delta.new
}
prop.qc <- rbind(prop.qc, prop.wt)
}
# name col and row
colnames(prop.qc) <- colnames(m.basis)
rownames(prop.qc) <- colnames(xsc)
if (generate.figure){
heat.anno <- pheatmap(prop.qc, annotation_row = arow,
annotation_names_row=FALSE, show_rownames = F,
annotation_names_col=FALSE, cutree_rows = length(ct.sub),
color = cbPalette[2:4],
cluster_rows = T, cluster_cols = F)
} else {
heat.anno <- NULL
}
prop.qc.keep <- rowSums(prop.qc > qcthreshold) ==1 # truncated values -> F or T
sc.eset.qc <- sc.eset[,prop.qc.keep]
return(list(prop.qc = prop.qc, sc.eset.qc = sc.eset.qc, heatfig = heat.anno))
}
#################################
#' Clustering QC for single cells from one subject
#' @description Clustering QC for single cells from one subject
#' @name SCDC_qc_ONE
#' @param sc.eset ExpressionSet object for single cells
#' @param ct.varname variable name for 'cell type'
#' @param sample variable name for subject/sample
#' @param scsetname the name for the single cell dataset
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param arow annotation of rows for pheatmap
#' @param qcthreshold the probability threshold used to filter out questionable cells
#' @param generate.figure logical. If generate the heatmap by pheatmap or not. default is TRUE.
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @return a list including: 1) a probability matrix for each single cell input; 2) a clustering QCed ExpressionSet object; 3) a heatmap of QC result.
#' @export
SCDC_qc_ONE <- function(sc.eset, ct.varname, sample, scsetname = "Single Cell",
ct.sub, iter.max = 1000, nu = 1e-04, epsilon = 0.01,
arow = NULL, weight.basis = F, qcthreshold = 0.7,
generate.figure = T, ct.cell.size = NULL,
cbPalette = c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7"),
...){
sc.basis <- SCDC_basis_ONE(x = sc.eset, ct.sub = ct.sub, ct.varname = ct.varname, sample = sample, ct.cell.size = ct.cell.size)
if (weight.basis){
basis.mvw <- sc.basis$basis.mvw[, ct.sub]
} else {
basis.mvw <- sc.basis$basis[, ct.sub]
}
xsc <- getCPM0(exprs(sc.eset))
N.sc <- ncol(xsc)
ALS.S <- sc.basis$sum.mat[ct.sub]
valid.ct <- (colSums(is.na(basis.mvw)) == 0) & (!is.na(ALS.S))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution..."))
basis.mvw <- basis.mvw[, valid.ct]
ALS.S <- ALS.S[valid.ct]
prop.est.mvw <- NULL
# prop estimation for each sc sample:
for (i in 1:N.sc) {
xsc.i <- xsc[, i]*100 # why times 100 if not normalize???
gene.use <- intersect(rownames(basis.mvw), names(xsc.i))
basis.mvw.temp <- basis.mvw[gene.use,]
xsc.temp <- xsc.i[gene.use]
message(paste(colnames(xsc)[i], "has common genes", sum(xsc[, i] != 0), "..."))
# first NNLS:
lm <- nnls::nnls(A=basis.mvw.temp,b=xsc.temp)
delta <- lm$residuals
wt.gene <- 1/(nu + delta^2)
x.wt <- xsc.temp*sqrt(wt.gene)
b.wt <- sweep(basis.mvw.temp,1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
prop.wt <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max){
wt.gene <- 1/(nu + delta^2)
x.wt <- xsc.temp * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.temp,1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
delta.new <- lm.wt$residuals
prop.wt.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt.new - prop.wt)) < epsilon){
prop.wt <- prop.wt.new
delta <- delta.new
message("WNNLS Converged at iteration ", iter)
break
}
prop.wt <- prop.wt.new
delta <- delta.new
}
prop.est.mvw <- rbind(prop.est.mvw, prop.wt)
}
colnames(prop.est.mvw) <- colnames(basis.mvw)
rownames(prop.est.mvw) <- colnames(xsc)
### plot steps:
if (generate.figure){
heat.anno <- pheatmap(prop.est.mvw, annotation_row = arow,
annotation_names_row=FALSE, show_rownames = F,
annotation_names_col=FALSE, cutree_rows = length(ct.sub),
color = cbPalette[2:4],
cluster_rows = T, cluster_cols = F) #, main = scsetname
} else {
heat.anno <- NULL
}
prop.qc.keep <- rowSums(prop.est.mvw > qcthreshold) ==1 # truncated values -> F or T
sc.eset.qc <- sc.eset[,prop.qc.keep]
return(list(prop.qc = prop.est.mvw, sc.eset.qc = sc.eset.qc, heatfig = heat.anno))
}
######################################
#' Proportion estimation
#' @description Proportion estimation function for multi-subject case
#' @name SCDC_prop
#' @param bulk.eset ExpressionSet object for bulk samples
#' @param sc.eset ExpressionSet object for single cell samples
#' @param ct.varname variable name for 'cell types'
#' @param sample variable name for subject/samples
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param truep true cell-type proportions for bulk samples if known
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @param Transform_bisque The bulk sample transformation from bisqueRNA. Aiming to reduce the systematic difference between single cells and bulk samples.
#' @return Estimated proportion, basis matrix, predicted gene expression levels for bulk samples
#' @export
SCDC_prop <- function (bulk.eset, sc.eset, ct.varname, sample, ct.sub, iter.max = 1000,
nu = 1e-04, epsilon = 0.01, truep = NULL, weight.basis = T,
ct.cell.size = NULL, Transform_bisque = F, ...)
{
bulk.eset <- bulk.eset[rowSums(exprs(bulk.eset)) > 0, , drop = FALSE]
ct.sub <- intersect(ct.sub, unique(sc.eset@phenoData@data[,
ct.varname]))
sc.basis <- SCDC_basis(x = sc.eset, ct.sub = ct.sub, ct.varname = ct.varname,
sample = sample, ct.cell.size = ct.cell.size)
commongenes <- intersect(rownames(sc.basis$basis.mvw), rownames(bulk.eset))
if (length(commongenes) < 0.2 * min(dim(sc.eset)[1], dim(bulk.eset)[1])) {
stop("Too few common genes!")
}
message(paste("Used", length(commongenes), "common genes..."))
if (weight.basis) {
basis.mvw <- sc.basis$basis.mvw[commongenes, ct.sub]
}
else {
basis.mvw <- sc.basis$basis[commongenes, ct.sub]
}
# link to bisqueRNA, bulk transformation method. https://github.com/cozygene/bisque
if (Transform_bisque) {
GenerateSCReference <- function(sc.eset, ct.sub) {
cell.labels <- base::factor(sc.eset[[ct.sub]])
all.cell.types <- base::levels(cell.labels)
aggr.fn <- function(ct.sub) {
base::rowMeans(Biobase::exprs(sc.eset)[,cell.labels == ct.sub, drop=F])
}
template <- base::numeric(base::nrow(sc.eset))
sc.ref <- base::vapply(all.cell.types, aggr.fn, template)
return(sc.ref)
}
sc.ref <- GenerateSCReference(sc.eset, cell.types)[genes, , drop = F]
ncount <- table(sc.eset@phenoData@data[, sample], sc.eset@phenoData@data[, ct.varname])
true.prop <- ncount/rowSums(ncount, na.rm = T)
sc.props <- round(true.prop[complete.cases(true.prop), ], 2)
Y.train <- sc.ref %*% t(sc.props[, colnames(sc.ref)])
dim(Y.train)
X.pred <- exprs(bulk.eset)[commongenes, ]
sample.names <- base::colnames(Biobase::exprs(bulk.eset))
template <- base::numeric(base::length(sample.names))
base::names(template) <- sample.names
SemisupervisedTransformBulk <- function(gene, Y.train, X.pred) {
Y.train.scaled <- base::scale(Y.train[gene, , drop = T])
Y.center <- base::attr(Y.train.scaled, "scaled:center")
Y.scale <- base::attr(Y.train.scaled, "scaled:scale")
n <- base::length(Y.train.scaled)
shrink.scale <- base::sqrt(base::sum((Y.train[gene, , drop = T] - Y.center)^2)/n + 1)
X.pred.scaled <- base::scale(X.pred[gene, , drop = T])
Y.pred <- base::matrix((X.pred.scaled * shrink.scale) +
Y.center, dimnames = base::list(base::colnames(X.pred),
gene))
return(Y.pred)
}
Y.pred <- base::matrix(base::vapply(X = commongenes,
FUN = SemisupervisedTransformBulk, FUN.VALUE = template,
Y.train, X.pred, USE.NAMES = TRUE), nrow = base::length(sample.names))
indices <- base::apply(Y.pred, MARGIN = 2, FUN = function(column) {
base::anyNA(column)
})
if (base::any(indices)) {
if (sum(!indices) == 0) {
base::stop("Zero genes left for decomposition.")
}
Y.pred <- Y.pred[, !indices, drop = F]
sc.ref <- sc.ref[!indices, , drop = F]
}
results <- base::as.matrix(base::apply(Y.pred, 1, function(b) {
sol <- lsei::pnnls(sc.ref, b, sum = 1)
return(sol$x)
}))
prop.est.mvw <- t(results)
colnames(prop.est.mvw) <- colnames(sc.ref)
rownames(prop.est.mvw) <- colnames(bulk.eset)
yhat <- sc.ref %*% results
colnames(yhat) <- colnames(bulk.eset)
yobs <- exprs(bulk.eset)
yeval <- SCDC_yeval(y = yobs, yest = yhat, yest.names = c("SCDC"))
peval <- NULL
if (!is.null(truep)) {
peval <- SCDC_peval(ptrue = truep, pest = prop.est.mvw,
pest.names = c("SCDC"), select.ct = ct.sub)
}
} else {
xbulk <- getCPM0(exprs(bulk.eset)[commongenes, ])
sigma <- sc.basis$sigma[commongenes, ct.sub]
ALS.S <- sc.basis$sum.mat[ct.sub]
N.bulk <- ncol(bulk.eset)
valid.ct <- (colSums(is.na(sigma)) == 0) & (colSums(is.na(basis.mvw)) ==
0) & (!is.na(ALS.S))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution..."))
basis.mvw <- basis.mvw[, valid.ct]
ALS.S <- ALS.S[valid.ct]
sigma <- sigma[, valid.ct]
prop.est.mvw <- NULL
yhat <- NULL
yhatgene.temp <- rownames(basis.mvw)
for (i in 1:N.bulk) {
basis.mvw.temp <- basis.mvw
xbulk.temp <- xbulk[, i]*100
sigma.temp <- sigma
message(paste(colnames(xbulk)[i], "has common genes",
sum(xbulk[, i] != 0), "..."))
lm <- nnls::nnls(A = basis.mvw.temp, b = xbulk.temp)
delta <- lm$residuals
wt.gene <- 1/(nu + delta^2 + colSums((lm$x * ALS.S)^2 *
t(sigma.temp)))
x.wt <- xbulk.temp * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.temp, 1, sqrt(wt.gene), "*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
prop.wt <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max) {
wt.gene <- 1/(nu + delta^2 + colSums((lm.wt$x * ALS.S)^2 *
t(sigma.temp)))
x.wt <- xbulk.temp * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.temp, 1, sqrt(wt.gene), "*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
delta.new <- lm.wt$residuals
prop.wt.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt.new - prop.wt)) < epsilon) {
prop.wt <- prop.wt.new
delta <- delta.new
R2 <- 1 - var(xbulk.temp - basis.mvw.temp %*%
as.matrix(lm.wt$x))/var(xbulk.temp)
message("WNNLS Converged at iteration ",
iter)
break
}
prop.wt <- prop.wt.new
delta <- delta.new
}
R2 <- 1 - var(xbulk.temp - basis.mvw.temp %*% as.matrix(lm.wt$x))/var(xbulk.temp)
prop.est.mvw <- rbind(prop.est.mvw, prop.wt)
yhat.temp <- basis.mvw.temp %*% as.matrix(lm.wt$x)
yhatgene.temp <- intersect(rownames(yhat.temp), yhatgene.temp)
yhat <- cbind(yhat[yhatgene.temp, ], yhat.temp[yhatgene.temp,
])
}
colnames(prop.est.mvw) <- colnames(basis.mvw)
rownames(prop.est.mvw) <- colnames(xbulk)
colnames(yhat) <- colnames(xbulk)
yobs <- exprs(bulk.eset)
yeval <- SCDC_yeval(y = yobs, yest = yhat, yest.names = c("SCDC"))
peval <- NULL
if (!is.null(truep)) {
peval <- SCDC_peval(ptrue = truep, pest = prop.est.mvw,
pest.names = c("SCDC"), select.ct = ct.sub)
}
}
return(list(prop.est.mvw = prop.est.mvw, basis.mvw = basis.mvw,
yhat = yhat, yeval = yeval, peval = peval))
}
############################################
#' Proportion estimation function for one-subject case
#' @description Proportion estimation function for one-subject case
#' @name SCDC_prop_ONE
#' @param bulk.eset ExpressionSet object for bulk samples
#' @param sc.eset ExpressionSet object for single cell samples
#' @param ct.varname variable name for 'cell types'
#' @param sample variable name for subject/samples
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param truep true cell-type proportions for bulk samples if known
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @return Estimated proportion, basis matrix, predicted gene expression levels for bulk samples
#' @export
SCDC_prop_ONE <- function (bulk.eset, sc.eset, ct.varname, sample, truep = NULL,
ct.sub, iter.max = 2000, nu = 1e-10, epsilon = 0.01, weight.basis = T,
ct.cell.size = NULL,
...) {
bulk.eset <- bulk.eset[rowSums(exprs(bulk.eset)) > 0, , drop = FALSE]
sc.basis <- SCDC_basis_ONE(x = sc.eset, ct.sub = ct.sub,
ct.varname = ct.varname, sample = sample, ct.cell.size = ct.cell.size)
if (weight.basis) {
basis <- sc.basis$basis.mvw
}
else {
basis <- sc.basis$basis
}
commongenes <- intersect(rownames(basis), rownames(bulk.eset))
if (length(commongenes) < 0.2 * min(dim(sc.eset)[1], dim(bulk.eset)[1])) {
stop("Too few common genes!")
}
message(paste("Used", length(commongenes), "common genes..."))
basis.mvw <- basis[commongenes, ct.sub]
xbulk <- getCPM0(exprs(bulk.eset)[commongenes, ])
ALS.S <- sc.basis$sum.mat[ct.sub]
N.bulk <- ncol(bulk.eset)
valid.ct <- (colSums(is.na(basis.mvw)) == 0) & (!is.na(ALS.S))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution..."))
basis.mvw <- basis.mvw[, valid.ct]
ALS.S <- ALS.S[valid.ct]
prop.est.mvw <- NULL
yhat <- NULL
yhatgene.temp <- rownames(basis.mvw)
for (i in 1:N.bulk) {
xbulk.temp <- xbulk[, i]
message(paste(colnames(xbulk)[i], "has common genes",
sum(xbulk[, i] != 0), "..."))
lm <- nnls::nnls(A = basis.mvw, b = xbulk.temp)
delta <- lm$residuals
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp * sqrt(wt.gene)
b.wt <- sweep(basis.mvw, 1, sqrt(wt.gene), "*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
prop.wt <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max) {
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp * sqrt(wt.gene)
b.wt <- sweep(basis.mvw, 1, sqrt(wt.gene), "*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
delta.new <- lm.wt$residuals
prop.wt.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt.new - prop.wt)) < epsilon) {
prop.wt <- prop.wt.new
delta <- delta.new
message("WNNLS Converged at iteration ",
iter)
break
}
prop.wt <- prop.wt.new
delta <- delta.new
}
prop.est.mvw <- rbind(prop.est.mvw, prop.wt)
yhat.temp <- basis.mvw %*% as.matrix(lm.wt$x)
yhatgene.temp <- intersect(rownames(yhat.temp), yhatgene.temp)
yhat <- cbind(yhat[yhatgene.temp, ], yhat.temp[yhatgene.temp,
])
}
colnames(prop.est.mvw) <- colnames(basis.mvw)
rownames(prop.est.mvw) <- colnames(bulk.eset)
colnames(yhat) <- colnames(bulk.eset)
yobs <- exprs(bulk.eset)
yeval <- SCDC_yeval(y = yobs, yest = yhat, yest.names = c("SCDC"))
peval <- NULL
if (!is.null(truep)) {
if (all(rownames(truep) == rownames(prop.est.mvw))){
peval <- SCDC_peval(ptrue = truep, pest = prop.est.mvw,
pest.names = c("SCDC"), select.ct = ct.sub)
} else {
message("Your input sample names for proportion matrix and bulk.eset do not match! Please make sure sample names match.")
}
}
return(list(prop.est.mvw = prop.est.mvw, basis.mvw = basis.mvw,
yhat = yhat, yeval = yeval, peval = peval))
}
############################################
#' Tree-guided proportion estimation
#' @description Proportion estimation function for multi-subject case, and apply tree-guided deconvolution
#' @name SCDC_prop_subcl_marker
#' @param bulk.eset ExpressionSet object for bulk samples
#' @param sc.eset ExpressionSet object for single cell samples
#' @param ct.varname variable name for 'cell types'
#' @param fl.varname variable name for first-level 'meta-clusters'
#' @param sample variable name for subject/samples
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param ct.fl.sub 'cell types' for first-level 'meta-clusters'
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param weight.basis logical, use basis matrix adjusted by MVW, default is T.
#' @param select.marker logical, select marker genes to perform deconvolution in tree-guided steps. Default is T.
#' @param markers A set of marker gene that input manully to be used in deconvolution. If NULL, then
#' @param marker.varname variable name of cluster groups when selecting marker genes. If NULL, then use ct.varname.
#' @param allgenes.fl logical, use all genes in the first-level deconvolution
#' @param pseudocount.use a constant number used when selecting marker genes, default is 1.
#' @param LFC.lim a threshold of log fold change when selecting genes as input to perform Wilcoxon's test.
#' @param truep true cell-type proportions for bulk samples if known
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @param fl.cell.size default is NULL, similar to ct.cell.size. This is for first-level 'meta-clusters'.
#' @return Estimated proportion, basis matrix, predicted gene expression levels for bulk samples
#' @export
SCDC_prop_subcl_marker <- function (bulk.eset, sc.eset, ct.varname, fl.varname, sample,
ct.sub = NULL, ct.fl.sub, iter.max = 3000, nu = 1e-04, epsilon = 0.001,
weight.basis = T, truep = NULL, select.marker = T, markers = NULL,
marker.varname = NULL, allgenes.fl = F, pseudocount.use = 1,
LFC.lim = 0.5, ct.cell.size = NULL, fl.cell.size = NULL, ...)
{
if (is.null(ct.sub)) {
ct.sub <- unique(sc.eset@phenoData@data[, ct.varname])[!is.na(unique(sc.eset@phenoData@data[,
ct.varname]))]
}
ct.sub <- ct.sub[!is.na(ct.sub)]
ct.fl.sub <- ct.fl.sub[!is.na(ct.fl.sub)]
bulk.eset <- bulk.eset[rowSums(exprs(bulk.eset)) > 0, , drop = FALSE]
sc.eset <- sc.eset[, sc.eset@phenoData@data[, ct.varname] %in%
ct.sub]
sc.basis <- SCDC_basis(x = sc.eset, ct.sub = ct.sub, ct.varname = ct.varname,
sample = sample, ct.cell.size = ct.cell.size)
sc.fl.basis <- SCDC_basis(x = sc.eset, ct.sub = ct.fl.sub[!is.na(ct.fl.sub)],
ct.varname = fl.varname, sample = sample, ct.cell.size = fl.cell.size)
if (select.marker) {
if (is.null(marker.varname)) {
marker.varname <- ct.varname
}
countmat <- exprs(sc.eset)
ct.group <- sc.eset@phenoData@data[, marker.varname]
markers.wilcox <- NULL
for (u in 1:length(unique(ct.group))) {
ct.group.temp <- ct.group == unique(ct.group)[u]
group.1 <- apply(X = countmat[, ct.group.temp], MARGIN = 1,
FUN = function(x) log(x = mean(x = expm1(x = x)) +
pseudocount.use))
group.2 <- apply(X = countmat[, !ct.group.temp],
MARGIN = 1, FUN = function(x) log(x = mean(x = expm1(x = x)) +
pseudocount.use))
genes.diff <- rownames(sc.eset)[(group.1 - group.2) >
LFC.lim]
count.use <- countmat[rownames(sc.eset) %in% genes.diff,
]
p_val <- sapply(1:nrow(count.use), function(x) {
wilcox.test(count.use[x, ] ~ ct.group.temp)$p.value
})
p_val_adj <- p.adjust(p = p_val, method = "bonferroni",
n = nrow(count.use))
markers.temp <- rownames(count.use)[p_val_adj < 0.05]
markers.wilcox <- c(markers.wilcox, markers.temp)
}
markers <- unique(markers.wilcox)
message("Selected ", length(markers), " marker genes by Wilcoxon test...")
}
if (weight.basis) {
basis <- sc.basis$basis.mvw
basis.fl <- sc.fl.basis$basis.mvw
}
else {
basis <- sc.basis$basis
basis.fl <- sc.fl.basis$basis
}
if (!is.null(markers)) {
commongenes <- Reduce(intersect, list(rownames(basis),
rownames(bulk.eset), markers))
commongenes.fl <- Reduce(intersect, list(rownames(basis.fl),
rownames(bulk.eset), markers))
}
else {
commongenes <- intersect(rownames(basis), rownames(bulk.eset))
commongenes.fl <- intersect(rownames(basis.fl), rownames(bulk.eset))
if (length(commongenes) < 0.2 * min(dim(sc.eset)[1],
dim(bulk.eset)[1])) {
stop("Too few common genes!")
}
}
message(paste("Used", length(commongenes), "common genes for all cell types, \n",
"Used", length(commongenes.fl), "common genes for first level cell types..."))
basis.mvw <- basis[commongenes, ct.sub]
basis.mvw.fl <- basis.fl[commongenes.fl, ct.fl.sub]
xbulk0 <- getCPM0(exprs(bulk.eset)[commongenes, ])
xbulk <- as.matrix(xbulk0)
colnames(xbulk) <- colnames(bulk.eset)
xbulk1 <- getCPM0(exprs(bulk.eset)[commongenes.fl, ])
xbulk.fl <- as.matrix(xbulk1)
ALS.S <- sc.basis$sum.mat[ct.sub]
N.bulk <- ncol(bulk.eset)
valid.ct <- (colSums(is.na(basis.mvw)) == 0) & (!is.na(ALS.S))
ALS.S.fl <- sc.fl.basis$sum.mat[ct.fl.sub]
valid.ct.fl <- (colSums(is.na(basis.mvw.fl)) == 0) & (!is.na(ALS.S.fl))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution...\n",
"Used", sum(valid.ct.fl), "first level cell types ..."))
basis.mvw <- basis.mvw[, valid.ct]
ALS.S <- ALS.S[valid.ct]
basis.mvw.fl <- basis.mvw.fl[, valid.ct.fl]
ALS.S.fl <- ALS.S[valid.ct.fl]
prop.est <- NULL
rsquared <- NULL
for (i in 1:N.bulk) {
xbulk.temp <- xbulk[, i]
message(paste(colnames(xbulk)[i], "has common genes",
sum(xbulk[, i] != 0), "..."))
if (allgenes.fl) {
markers.fl <- names(xbulk.temp)
}
else {
markers.fl <- Reduce(intersect, list(markers, names(xbulk.temp)))
}
lm <- nnls::nnls(A = basis.mvw.fl[markers.fl, ], b = xbulk.temp[markers.fl])
delta <- lm$residuals
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp[markers.fl] * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.fl[markers.fl, ], 1, sqrt(wt.gene),
"*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
prop.wt.fl <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max) {
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp[markers.fl] * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.fl[markers.fl, ], 1, sqrt(wt.gene),
"*")
lm.wt <- nnls::nnls(A = b.wt, b = x.wt)
delta.new <- lm.wt$residuals
prop.wt.fl.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt.fl.new - prop.wt.fl)) < epsilon) {
prop.wt.fl <- prop.wt.fl.new
delta <- delta.new
message("WNNLS for First level clusters Converged at iteration ",
iter)
break
}
prop.wt.fl <- prop.wt.fl.new
delta <- delta.new
}
names(prop.wt.fl) <- colnames(basis.mvw.fl)
rt <- table(sc.eset@phenoData@data[, ct.varname], sc.eset@phenoData@data[,
fl.varname])
rt <- rt[, ct.fl.sub]
rt.list <- list()
prop.wt <- NULL
for (j in 1:ncol(rt)) {
rt.list[[j]] <- rownames(rt)[rt[, j] > 0]
names(rt.list)[j] <- colnames(rt)[j]
sub.cl <- rownames(rt)[rt[, j] > 0]
if (length(sub.cl) > 1 & prop.wt.fl[colnames(rt)[j]] >
0) {
if (is.null(dim(prop.wt.fl))) {
xbulk.j <- basis.mvw.fl[, j] * prop.wt.fl[j] +
(xbulk.temp - basis.mvw.fl %*% lm.wt$x) *
prop.wt.fl[j]
}
else {
xbulk.j <- basis.mvw.fl[, j] * prop.wt.fl[,
j] + (xbulk.temp - basis.mvw.fl %*% lm.wt$x) *
prop.wt.fl[, j]
}
markers.sl <- Reduce(intersect, list(markers,
rownames(xbulk.j)))
basis.sl <- basis.mvw[markers.sl, rownames(rt)[rt[,
j] > 0]]
lm.sl <- nnls::nnls(A = basis.sl, b = xbulk.j[markers.sl,
])
delta.sl <- lm.sl$residuals
wt.gene.sl <- 1/(nu + delta.sl^2)
x.wt.sl <- xbulk.j[markers.sl, ] * sqrt(wt.gene.sl)
b.wt.sl <- sweep(basis.sl, 1, sqrt(wt.gene.sl),
"*")
lm.wt.sl <- nnls::nnls(A = b.wt.sl, b = x.wt.sl)
prop.wt.sl <- lm.wt.sl$x/sum(lm.wt.sl$x)
delta.sl <- lm.wt.sl$residuals
for (iter in 1:iter.max) {
wt.gene.sl <- 1/(nu + delta.sl^2)
x.wt.sl <- xbulk.j[markers.sl, ] * sqrt(wt.gene.sl)
b.wt.sl <- sweep(basis.sl, 1, sqrt(wt.gene.sl),
"*")
lm.wt.sl <- nnls::nnls(A = b.wt.sl, b = x.wt.sl)
delta.sl.new <- lm.wt.sl$residuals
prop.wt.sl.new <- lm.wt.sl$x/sum(lm.wt.sl$x)
if (sum(abs(prop.wt.sl.new - prop.wt.sl)) <
epsilon) {
prop.wt.sl <- prop.wt.sl.new
delta.sl <- delta.sl.new
cat("WNNLS for Second level clusters",
rownames(rt)[rt[, j] > 0], "Converged at iteration ",
iter)
break
}
prop.wt.sl <- prop.wt.sl.new
delta.sl <- delta.sl.new
}
names(prop.wt.sl) <- sub.cl
prop.wt <- c(prop.wt, prop.wt.sl * prop.wt.fl[colnames(rt)[j]])
}
else if (length(sub.cl) == 1) {
prop.wt <- c(prop.wt, prop.wt.fl[colnames(rt)[j]])
}
else if (length(sub.cl) > 1 & prop.wt.fl[colnames(rt)[j]] ==
0) {
prop.wt.sl <- rep(0, length(sub.cl))
names(prop.wt.sl) <- sub.cl
prop.wt <- c(prop.wt, prop.wt.sl)
}
}
prop.est <- rbind(prop.est, prop.wt)
}
rownames(prop.est) <- colnames(bulk.eset)
peval <- NULL
if (!is.null(truep)) {
peval <- SCDC_peval(ptrue = truep, pest = prop.est, pest.names = c("SCDC"),
select.ct = ct.sub)
}
# calculate yhat after deconv
yhat <- sc.basis$basis.mvw %*% t(prop.est)[colnames(sc.basis$basis.mvw),]
return(list(prop.est = prop.est, prop.wt.fl = prop.wt.fl,
basis.mvw = basis.mvw, peval = peval, sc.basis = sc.basis,
sc.fl.basis = sc.fl.basis, yhat = yhat))
}
############################################
#' Tree-guided proportion estimation for ONE subject
#' @description Proportion estimation function for ONE-subject case, and apply tree-guided deconvolution
#' @name SCDC_prop_ONE_subcl_marker
#' @param bulk.eset ExpressionSet object for bulk samples
#' @param sc.eset ExpressionSet object for single cell samples
#' @param ct.varname variable name for 'cell types'
#' @param fl.varname variable name for first-level 'meta-clusters'
#' @param sample variable name for subject/samples
#' @param ct.sub a subset of cell types that are selected to construct basis matrix
#' @param ct.fl.sub 'cell types' for first-level 'meta-clusters'
#' @param iter.max the maximum number of iteration in WNNLS
#' @param nu a small constant to facilitate the calculation of variance
#' @param epsilon a small constant number used for convergence criteria
#' @param weight.basis logical, use basis matrix adjusted by MVW, default is T.
#' @param select.marker logical, select marker genes to perform deconvolution in tree-guided steps. Default is T.
#' @param markers A set of marker gene that input manully to be used in deconvolution. If NULL, then
#' @param marker.varname variable name of cluster groups when selecting marker genes. If NULL, then use ct.varname.
#' @param allgenes.fl logical, use all genes in the first-level deconvolution
#' @param pseudocount.use a constant number used when selecting marker genes, default is 1.
#' @param LFC.lim a threshold of log fold change when selecting genes as input to perform Wilcoxon's test.
#' @param truep true cell-type proportions for bulk samples if known
#' @param ct.cell.size default is NULL, which means the "library size" is calculated based on the data. Users can specify a vector of cell size factors corresponding to the ct.sub according to prior knowledge. The vector should be named: names(ct.cell.size input) should not be NULL.
#' @param fl.cell.size default is NULL, similar to ct.cell.size. This is for first-level 'meta-clusters'.
#' @return Estimated proportion, basis matrix, predicted gene expression levels for bulk samples
#' @export
SCDC_prop_ONE_subcl_marker <- function(bulk.eset, sc.eset, ct.varname, fl.varname, sample, truep = NULL,
ct.sub = NULL, ct.fl.sub, iter.max = 3000, nu = 1e-04, epsilon = 0.001,
weight.basis = F, bulk_disease = NULL, select.marker = T, markers = NULL, marker.varname = NULL,
pseudocount.use = 1, LFC.lim = 0.5, allgenes.fl = F, ct.cell.size = NULL, fl.cell.size = NULL,
...)
{
if (is.null(ct.sub)){
ct.sub <- unique(sc.eset@phenoData@data[,ct.varname])[!is.na(unique(sc.eset@phenoData@data[,ct.varname]))]
}
ct.sub <- ct.sub[!is.na(ct.sub)]
ct.fl.sub <- ct.fl.sub[!is.na(ct.fl.sub)]
bulk.eset <- bulk.eset[rowSums(exprs(bulk.eset))>0, , drop = FALSE]
sc.basis <- SCDC_basis_ONE(x = sc.eset, ct.sub = ct.sub, ct.varname = ct.varname, sample = sample, ct.cell.size = ct.cell.size)
sc.fl.basis <- SCDC_basis_ONE(x = sc.eset, ct.sub = ct.fl.sub[!is.na(ct.fl.sub)],
ct.varname = fl.varname, sample = sample, ct.cell.size = fl.cell.size)
if (select.marker){
if (is.null(marker.varname)){
marker.varname <- ct.varname
}
# wilcox test on two groups of cells for marker gene selection... (refer to seurat::FindMarkers)
countmat <- exprs(sc.eset)
ct.group <- sc.eset@phenoData@data[,marker.varname]
markers.wilcox <- NULL
# u=1
for(u in 1:length(unique(ct.group))){
ct.group.temp <- ct.group == unique(ct.group)[u]
group.1 <- apply(X = countmat[,ct.group.temp],
MARGIN = 1, FUN = function(x) log(x = mean(x = expm1(x = x)) +
pseudocount.use))
group.2 <- apply(X = countmat[,! ct.group.temp],
MARGIN = 1, FUN = function(x) log(x = mean(x = expm1(x = x)) +
pseudocount.use))
genes.diff <- rownames(sc.eset)[(group.1 - group.2) > LFC.lim]
count.use <- countmat[rownames(sc.eset) %in% genes.diff,]
##
p_val <- sapply(1:nrow(count.use), function(x){
wilcox.test(count.use[x,] ~ ct.group.temp)$p.value
})
p_val_adj <- p.adjust(p = p_val, method = "bonferroni",
n = nrow(count.use))
markers.temp <- rownames(count.use)[p_val_adj < 0.05]
markers.wilcox <- c(markers.wilcox, markers.temp)
}
markers <- unique(markers.wilcox)
message("Selected ",length(markers), " marker genes by Wilcoxon test...")
} # else need input of marker genes for clustering
# match genes / cells first
if (weight.basis){
basis <- sc.basis$basis.mvw
basis.fl <- sc.fl.basis$basis.mvw
} else {
basis <- sc.basis$basis
basis.fl <- sc.fl.basis$basis
}
if (!is.null(markers)){
commongenes <- Reduce(intersect, list(rownames(basis), rownames(bulk.eset), markers))
commongenes.fl <- Reduce(intersect, list(rownames(basis.fl), rownames(bulk.eset), markers))
} else {
commongenes <- intersect(rownames(basis), rownames(bulk.eset))
commongenes.fl <- intersect(rownames(basis.fl), rownames(bulk.eset))
# stop when few common genes exist...
if (length(commongenes) < 0.2 * min(dim(sc.eset)[1], dim(bulk.eset)[1])){
stop('Too few common genes!')
}
}
message(paste("Used", length(commongenes), "common genes for all cell types, \n",
"Used", length(commongenes.fl), "common genes for first level cell types..."))
basis.mvw <- basis[commongenes, ct.sub]
basis.mvw.fl <- basis.fl[commongenes.fl, ct.fl.sub]
xbulk0 <- getCPM0(exprs(bulk.eset)[commongenes,])
xbulk <- as.matrix(xbulk0) ## whether to normalize all /common genes
colnames(xbulk) <- colnames(bulk.eset)
xbulk1 <- getCPM0(exprs(bulk.eset)[commongenes.fl,])
xbulk.fl <- as.matrix(xbulk1)
ALS.S <- sc.basis$sum.mat[ct.sub]
N.bulk <- ncol(bulk.eset)
valid.ct <- (colSums(is.na(basis.mvw)) == 0) & (!is.na(ALS.S))
ALS.S.fl <- sc.fl.basis$sum.mat[ct.fl.sub]
valid.ct.fl <- (colSums(is.na(basis.mvw.fl)) == 0) & (!is.na(ALS.S.fl))
if (sum(valid.ct) <= 1) {
stop("Not enough valid cell type!")
}
message(paste("Used", sum(valid.ct), "cell types in deconvolution...\n",
"Used", sum(valid.ct.fl),"first level cell types ..."))
basis.mvw <- basis.mvw[, valid.ct]
ALS.S <- ALS.S[valid.ct]
basis.mvw.fl <- basis.mvw.fl[, valid.ct.fl]
ALS.S.fl <- ALS.S[valid.ct.fl]
prop.est <- NULL
rsquared <- NULL
# prop estimation for each bulk sample:
for (i in 1:N.bulk) {
# i=1
xbulk.temp <- xbulk[, i] *1e3 ## will affect a little bit
message(paste(colnames(xbulk)[i], "has common genes", sum(xbulk[, i] != 0), "..."))
if (allgenes.fl){
markers.fl <- names(xbulk.temp)
} else {
markers.fl <- Reduce(intersect, list(markers, names(xbulk.temp)))
}
# first level NNLS:
lm <- nnls::nnls(A=basis.mvw.fl[markers.fl,],b=xbulk.temp[markers.fl])
delta <- lm$residuals
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp[markers.fl] *sqrt(wt.gene)
b.wt <- sweep(basis.mvw.fl[markers.fl,],1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
prop.wt.fl <- lm.wt$x/sum(lm.wt$x)
delta <- lm.wt$residuals
for (iter in 1:iter.max){
wt.gene <- 1/(nu + delta^2)
x.wt <- xbulk.temp[markers.fl] * sqrt(wt.gene)
b.wt <- sweep(basis.mvw.fl[markers.fl,],1,sqrt(wt.gene),"*")
lm.wt <- nnls::nnls(A=b.wt, b=x.wt)
delta.new <- lm.wt$residuals
prop.wt.fl.new <- lm.wt$x/sum(lm.wt$x)
if (sum(abs(prop.wt.fl.new - prop.wt.fl)) < epsilon){
prop.wt.fl <- prop.wt.fl.new
delta <- delta.new
message("WNNLS for First level clusters Converged at iteration ", iter)
break
}
prop.wt.fl <- prop.wt.fl.new
delta <- delta.new
}
names(prop.wt.fl) <- colnames(basis.mvw.fl)
# relationship between first level and overall
rt <- table(sc.eset@phenoData@data[,ct.varname], sc.eset@phenoData@data[,fl.varname])
rt <- rt[,ct.fl.sub]
rt.list <- list()
prop.wt <- NULL
# prop.wt
for (j in 1:ncol(rt)){ # for each first level cluster
# j=1
rt.list[[j]] <- rownames(rt)[rt[,j] >0]
names(rt.list)[j] <- colnames(rt)[j]
sub.cl <- rownames(rt)[rt[,j] >0]
if (length(sub.cl) > 1 & prop.wt.fl[colnames(rt)[j]] > 0) {
if (is.null(dim(prop.wt.fl))){
# specify genes in xbulk.j??? first level genes?
xbulk.j <- basis.mvw.fl[,j]*prop.wt.fl[j] + (xbulk.temp - basis.mvw.fl %*% lm.wt$x)*prop.wt.fl[j]
} else {
xbulk.j <- basis.mvw.fl[,j]*prop.wt.fl[,j] + (xbulk.temp - basis.mvw.fl %*% lm.wt$x)*prop.wt.fl[,j]
}
markers.sl <- Reduce(intersect, list(markers, rownames(xbulk.j)))
##############################################################################
# make markers.sub as a list, for each of the first-level intra clusters.
##############################################################################
basis.sl <- basis.mvw[markers.sl,rownames(rt)[rt[,j] >0]]
lm.sl <- nnls::nnls(A=basis.sl,b=xbulk.j[markers.sl,])
delta.sl <- lm.sl$residuals
wt.gene.sl <- 1/(nu + delta.sl^2)
x.wt.sl <- xbulk.j[markers.sl,]*sqrt(wt.gene.sl)
b.wt.sl <- sweep(basis.sl,1,sqrt(wt.gene.sl),"*")
lm.wt.sl <- nnls::nnls(A=b.wt.sl, b=x.wt.sl)
prop.wt.sl <- lm.wt.sl$x/sum(lm.wt.sl$x)
delta.sl <- lm.wt.sl$residuals
for (iter in 1:iter.max){
wt.gene.sl <- 1/(nu + delta.sl^2)
x.wt.sl <- xbulk.j[markers.sl,] * sqrt(wt.gene.sl)
b.wt.sl <- sweep(basis.sl,1,sqrt(wt.gene.sl),"*")
lm.wt.sl <- nnls::nnls(A=b.wt.sl, b=x.wt.sl)
delta.sl.new <- lm.wt.sl$residuals
prop.wt.sl.new <- lm.wt.sl$x/sum(lm.wt.sl$x)
if (sum(abs(prop.wt.sl.new - prop.wt.sl)) < epsilon){
prop.wt.sl <- prop.wt.sl.new
delta.sl <- delta.sl.new
cat("WNNLS for Second level clusters",rownames(rt)[rt[,j] >0],"Converged at iteration ", iter)
break
}
prop.wt.sl <- prop.wt.sl.new
delta.sl <- delta.sl.new
}
names(prop.wt.sl) <- sub.cl
prop.wt <- c(prop.wt, prop.wt.sl*prop.wt.fl[colnames(rt)[j]])
} else if (length(sub.cl) == 1){
# j=2
prop.wt <- c(prop.wt, prop.wt.fl[colnames(rt)[j]])
} else if (length(sub.cl) > 1 & prop.wt.fl[colnames(rt)[j]] == 0){
prop.wt.sl <- rep(0, length(sub.cl))
names(prop.wt.sl) <- sub.cl
prop.wt <- c(prop.wt, prop.wt.sl)
}
}
prop.est <- rbind(prop.est, prop.wt)
}
rownames(prop.est) <- colnames(bulk.eset)
peval <- NULL
if (!is.null(truep)){
peval <- SCDC_eval(ptrue= truep, pest = prop.est, pest.names = c('SCDC'),
dtname = 'Perou', select.ct = ct.sub, bulk_obj = bulk.eset,
bulk_disease = bulk_disease)
}
return(list(prop.est = prop.est, prop.wt.fl = prop.wt.fl, basis.mvw = basis.mvw, peval = peval,
sc.basis = sc.basis, sc.fl.basis = sc.fl.basis))
}
|
# Function to calculate odds ratios and confidence intervals
# on odds ratios.
# Written by Kevin Middleton
# Successes in Column 1
# Treatment of interest in Row 2
#' Odds Ratio for 2X2 Contingency Tables
#'
#' This function calculates the odds ratio for a 2 X 2 contingency table and a
#' confidence interval (default \code{conf.level} is 95 percent) for the
#' estimated odds ratio. \code{x} should be a matrix, data frame or table. "Successes"
#' should be located in column 1 of \code{x}, and the treatment of interest
#' should be located in row 2. The odds ratio is calculated as (Odds row 2) /
#' (Odds row 1). The confidence interval is calculated from the log(OR) and
#' backtransformed.
#'
#'
#' @rdname oddsRatio
#' @param x a 2 X 2 matrix, data frame or table of counts
#' @param conf.level the confidence interval level
#' @return \item{p1, p2}{Proportions for rows 1 and 2} \item{o1, o2}{Odds for
#' rows 1 and 2} \item{OR}{Odds ratio} \item{lower}{the lower bound of the
#' confidence interval} \item{upper}{the upper bound of the confidence
#' interval} \item{conf.level}{the confidence interval level}
#' @author Kevin Middleton (\email{kmm@@csusb.edu})
#' @seealso \code{\link{chisq.test}}
#' @keywords stats
#' @export
#' @examples
#' M1 <- matrix(c(14, 38, 51, 11), nrow = 2)
#' M1
#' oddsRatio(M1)
#'
#' M2 <- matrix(c(18515, 18496, 1427, 1438), nrow = 2)
#' rownames(M2) <- c("Placebo", "Aspirin")
#' colnames(M2) <- c("No", "Yes")
#' M2
#' oddsRatio(M2)
#'
oddsRatio <- function(x, conf.level = 0.95){
rowsums <- rowSums(x)
p1 <- x[1, 1] / rowsums[1]
p2 <- x[2, 1] / rowsums[2]
o1 <- p1 / (1 - p1)
o2 <- p2 / (1 - p2)
OR <- o2 / o1
log.OR <- log(OR)
SE.log.OR <- sqrt(sum(1/x))
crit <- qnorm((1 - conf.level)/2, lower.tail = FALSE)
log.lower <- log.OR - crit * SE.log.OR
log.upper <- log.OR + crit * SE.log.OR
lower <- exp(log.lower)
upper <- exp(log.upper)
zz <- list(p1 = p1, p2 = p2, o1 = o1, o2 = o2, OR = OR,
lower = lower, upper = upper, conf.level = conf.level)
class(zz) <- "oddsRatio"
zz
}
#' @rdname oddsRatio
#' @method print oddsRatio
#' @param digits number of digits to display
#' @param \dots additional arguments
#' @export
print.oddsRatio <- function(x, digits = 4, ...){
cat("\n")
cat("Odds Ratio\n")
cat("\n")
cat("Proportions\n")
cat("\tProp. 1:\t", format(x$p1, digits = digits), "\n")
cat("\tProp. 2:\t", format(x$p2, digits = digits), "\n\n")
cat("Odds\n")
cat("\tOdds 1:\t\t", format(x$o1, digits = digits), "\n")
cat("\tOdds 2:\t\t", format(x$o2, digits = digits), "\n\n")
cat("Odds Ratio\n")
cat("\tOdds Ratio:\t", format(x$OR, digits = digits), "\n\n")
cat(format(100 * x$conf.level), "percent confidence interval:\n\t")
cat(format(x$lower, digits = digits), "< OR <", format(x$upper, digits = digits), "\n")
}
| /R/oddsRatio.R | no_license | datandrews/mosaic | R | false | false | 2,836 | r | # Function to calculate odds ratios and confidence intervals
# on odds ratios.
# Written by Kevin Middleton
# Successes in Column 1
# Treatment of interest in Row 2
#' Odds Ratio for 2X2 Contingency Tables
#'
#' This function calculates the odds ratio for a 2 X 2 contingency table and a
#' confidence interval (default \code{conf.level} is 95 percent) for the
#' estimated odds ratio. \code{x} should be a matrix, data frame or table. "Successes"
#' should be located in column 1 of \code{x}, and the treatment of interest
#' should be located in row 2. The odds ratio is calculated as (Odds row 2) /
#' (Odds row 1). The confidence interval is calculated from the log(OR) and
#' backtransformed.
#'
#'
#' @rdname oddsRatio
#' @param x a 2 X 2 matrix, data frame or table of counts
#' @param conf.level the confidence interval level
#' @return \item{p1, p2}{Proportions for rows 1 and 2} \item{o1, o2}{Odds for
#' rows 1 and 2} \item{OR}{Odds ratio} \item{lower}{the lower bound of the
#' confidence interval} \item{upper}{the upper bound of the confidence
#' interval} \item{conf.level}{the confidence interval level}
#' @author Kevin Middleton (\email{kmm@@csusb.edu})
#' @seealso \code{\link{chisq.test}}
#' @keywords stats
#' @export
#' @examples
#' M1 <- matrix(c(14, 38, 51, 11), nrow = 2)
#' M1
#' oddsRatio(M1)
#'
#' M2 <- matrix(c(18515, 18496, 1427, 1438), nrow = 2)
#' rownames(M2) <- c("Placebo", "Aspirin")
#' colnames(M2) <- c("No", "Yes")
#' M2
#' oddsRatio(M2)
#'
oddsRatio <- function(x, conf.level = 0.95){
rowsums <- rowSums(x)
p1 <- x[1, 1] / rowsums[1]
p2 <- x[2, 1] / rowsums[2]
o1 <- p1 / (1 - p1)
o2 <- p2 / (1 - p2)
OR <- o2 / o1
log.OR <- log(OR)
SE.log.OR <- sqrt(sum(1/x))
crit <- qnorm((1 - conf.level)/2, lower.tail = FALSE)
log.lower <- log.OR - crit * SE.log.OR
log.upper <- log.OR + crit * SE.log.OR
lower <- exp(log.lower)
upper <- exp(log.upper)
zz <- list(p1 = p1, p2 = p2, o1 = o1, o2 = o2, OR = OR,
lower = lower, upper = upper, conf.level = conf.level)
class(zz) <- "oddsRatio"
zz
}
#' @rdname oddsRatio
#' @method print oddsRatio
#' @param digits number of digits to display
#' @param \dots additional arguments
#' @export
print.oddsRatio <- function(x, digits = 4, ...){
cat("\n")
cat("Odds Ratio\n")
cat("\n")
cat("Proportions\n")
cat("\tProp. 1:\t", format(x$p1, digits = digits), "\n")
cat("\tProp. 2:\t", format(x$p2, digits = digits), "\n\n")
cat("Odds\n")
cat("\tOdds 1:\t\t", format(x$o1, digits = digits), "\n")
cat("\tOdds 2:\t\t", format(x$o2, digits = digits), "\n\n")
cat("Odds Ratio\n")
cat("\tOdds Ratio:\t", format(x$OR, digits = digits), "\n\n")
cat(format(100 * x$conf.level), "percent confidence interval:\n\t")
cat(format(x$lower, digits = digits), "< OR <", format(x$upper, digits = digits), "\n")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Module_Barretts.R
\name{BarrettsAll}
\alias{BarrettsAll}
\title{Run all the basic Barrett's functions}
\usage{
BarrettsAll(
Endodataframe,
EndoReportColumn,
EndoReportColumn2,
Pathdataframe,
PathColumn
)
}
\arguments{
\item{Endodataframe}{endoscopy dataframe of interest}
\item{EndoReportColumn}{Endoscopy report field of interest as a string vector}
\item{EndoReportColumn2}{Second endoscopy report field of interest as a string vector}
\item{Pathdataframe}{pathology dataframe of interest}
\item{PathColumn}{Pathology report field of interest as a string vector}
}
\value{
Newdf
}
\description{
Function to encapsulate all the Barrett's functions together. This includes the Prague
score and the worst pathological grade and then feeds both of these things into
the follow up function. The output is a dataframe with all the original data as
well as the new columns that have been created.
}
\examples{
Barretts_df <- BarrettsAll(Myendo, "Findings", "OGDReportWhole", Mypath, "Histology")
}
\seealso{
Other Disease Specific Analysis - Barretts Data:
\code{\link{BarrettsBxQual}()},
\code{\link{BarrettsParisEMR}()},
\code{\link{Barretts_FUType}()},
\code{\link{Barretts_PathStage}()},
\code{\link{Barretts_PragueScore}()}
}
\concept{Disease Specific Analysis - Barretts Data}
\keyword{Does}
\keyword{data}
\keyword{something}
\keyword{with}
| /man/BarrettsAll.Rd | permissive | ropensci/EndoMineR | R | false | true | 1,436 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Module_Barretts.R
\name{BarrettsAll}
\alias{BarrettsAll}
\title{Run all the basic Barrett's functions}
\usage{
BarrettsAll(
Endodataframe,
EndoReportColumn,
EndoReportColumn2,
Pathdataframe,
PathColumn
)
}
\arguments{
\item{Endodataframe}{endoscopy dataframe of interest}
\item{EndoReportColumn}{Endoscopy report field of interest as a string vector}
\item{EndoReportColumn2}{Second endoscopy report field of interest as a string vector}
\item{Pathdataframe}{pathology dataframe of interest}
\item{PathColumn}{Pathology report field of interest as a string vector}
}
\value{
Newdf
}
\description{
Function to encapsulate all the Barrett's functions together. This includes the Prague
score and the worst pathological grade and then feeds both of these things into
the follow up function. The output is a dataframe with all the original data as
well as the new columns that have been created.
}
\examples{
Barretts_df <- BarrettsAll(Myendo, "Findings", "OGDReportWhole", Mypath, "Histology")
}
\seealso{
Other Disease Specific Analysis - Barretts Data:
\code{\link{BarrettsBxQual}()},
\code{\link{BarrettsParisEMR}()},
\code{\link{Barretts_FUType}()},
\code{\link{Barretts_PathStage}()},
\code{\link{Barretts_PragueScore}()}
}
\concept{Disease Specific Analysis - Barretts Data}
\keyword{Does}
\keyword{data}
\keyword{something}
\keyword{with}
|
#' Reads in, cleans, and subdivides daily QMJ10QMonthly data set in data folder.
cleanAQRQMJ10QMonthly <- function() {
temp <- tempfile()
QMJ10QMonthly <- "https://www.aqr.com/~/media/files/data-sets/quality-minus-junk-10-qualitysorted-portfolios-monthly.xlsx"
download.file(QMJ10QMonthly, temp, method = "curl")
# Imports 10 quality sorted US portfolios
AQRQMJ10QualityLongUS <- read.xlsx(temp, "10 Portfolios Formed on Quality",
,startRow=19, colIndex=c(1:12))
row.names(AQRQMJ10QualityLongUS) <- NULL
names(AQRQMJ10QualityLongUS)[1] <- "Date"
x1 <- findBreak(AQRQMJ10QualityLongUS, 1000, "NA") - 1
AQRQMJ10QualityLongUS[,1] <- ymd(AQRQMJ10QualityLongUS[,1])
AQRQMJ10QualityLongUS <- AQRQMJ10QualityLongUS[(1:x1),]
# Imports 10 quality sorted global portfolios
AQRQMJ10QualityBroadGlobal <- read.xlsx(temp, "10 Portfolios Formed on Quality",
,startRow=19, colIndex=c(1,13:23))
row.names(AQRQMJ10QualityBroadGlobal) <- NULL
names(AQRQMJ10QualityBroadGlobal)[1] <- "Date"
x1 <- findBreak(AQRQMJ10QualityBroadGlobal, 1000, "NA") - 1
AQRQMJ10QualityBroadGlobal[,1] <- ymd(AQRQMJ10QualityBroadGlobal[,1])
AQRQMJ10QualityBroadGlobal <- AQRQMJ10QualityBroadGlobal[(1:x1),]
unlink(temp)
start <- system.file(package="FFAQR")
save(AQRQMJ10QualityLongUS, file=paste0(start, "/data/AQRQMJ10QualityLongUS.Rdata"))
save(AQRQMJ10QualityBroadGlobal, file=paste0(start, "/data/AQRQMJ10QualityBroadGlobal.Rdata"))
}
| /R/cleanAQRQMJ10QMonthly.R | no_license | yn1/FFAQR | R | false | false | 1,526 | r | #' Reads in, cleans, and subdivides daily QMJ10QMonthly data set in data folder.
cleanAQRQMJ10QMonthly <- function() {
temp <- tempfile()
QMJ10QMonthly <- "https://www.aqr.com/~/media/files/data-sets/quality-minus-junk-10-qualitysorted-portfolios-monthly.xlsx"
download.file(QMJ10QMonthly, temp, method = "curl")
# Imports 10 quality sorted US portfolios
AQRQMJ10QualityLongUS <- read.xlsx(temp, "10 Portfolios Formed on Quality",
,startRow=19, colIndex=c(1:12))
row.names(AQRQMJ10QualityLongUS) <- NULL
names(AQRQMJ10QualityLongUS)[1] <- "Date"
x1 <- findBreak(AQRQMJ10QualityLongUS, 1000, "NA") - 1
AQRQMJ10QualityLongUS[,1] <- ymd(AQRQMJ10QualityLongUS[,1])
AQRQMJ10QualityLongUS <- AQRQMJ10QualityLongUS[(1:x1),]
# Imports 10 quality sorted global portfolios
AQRQMJ10QualityBroadGlobal <- read.xlsx(temp, "10 Portfolios Formed on Quality",
,startRow=19, colIndex=c(1,13:23))
row.names(AQRQMJ10QualityBroadGlobal) <- NULL
names(AQRQMJ10QualityBroadGlobal)[1] <- "Date"
x1 <- findBreak(AQRQMJ10QualityBroadGlobal, 1000, "NA") - 1
AQRQMJ10QualityBroadGlobal[,1] <- ymd(AQRQMJ10QualityBroadGlobal[,1])
AQRQMJ10QualityBroadGlobal <- AQRQMJ10QualityBroadGlobal[(1:x1),]
unlink(temp)
start <- system.file(package="FFAQR")
save(AQRQMJ10QualityLongUS, file=paste0(start, "/data/AQRQMJ10QualityLongUS.Rdata"))
save(AQRQMJ10QualityBroadGlobal, file=paste0(start, "/data/AQRQMJ10QualityBroadGlobal.Rdata"))
}
|
library(plyr)
# add interaction terms
newtrain <- mutate(train,
bone_flesh = bone_length * rotting_flesh,
bone_hair = bone_length * hair_length,
bone_soul = bone_length * has_soul,
flesh_hair = rotting_flesh * hair_length,
flesh_soul = rotting_flesh * has_soul,
hair_soul = hair_length * has_soul)
newtest <- mutate(test,
bone_flesh = bone_length * rotting_flesh,
bone_hair = bone_length * hair_length,
bone_soul = bone_length * has_soul,
flesh_hair = rotting_flesh * hair_length,
flesh_soul = rotting_flesh * has_soul,
hair_soul = hair_length * has_soul)
# delete the original variables
newtrain <- newtrain[, -c(1:5)]
newtest <- newtest[, -c(1:5)]
| /data.r | no_license | celia18m/stat5330.p2 | R | false | false | 874 | r | library(plyr)
# add interaction terms
newtrain <- mutate(train,
bone_flesh = bone_length * rotting_flesh,
bone_hair = bone_length * hair_length,
bone_soul = bone_length * has_soul,
flesh_hair = rotting_flesh * hair_length,
flesh_soul = rotting_flesh * has_soul,
hair_soul = hair_length * has_soul)
newtest <- mutate(test,
bone_flesh = bone_length * rotting_flesh,
bone_hair = bone_length * hair_length,
bone_soul = bone_length * has_soul,
flesh_hair = rotting_flesh * hair_length,
flesh_soul = rotting_flesh * has_soul,
hair_soul = hair_length * has_soul)
# delete the original variables
newtrain <- newtrain[, -c(1:5)]
newtest <- newtest[, -c(1:5)]
|
vim.MLR<-function(lmodel,lpi,mat.eval,mat.data,cl,inbagg,useN=TRUE){
levs<-levels(cl)
n.lev<-length(levs)
oob<-which(!(1:length(cl))%in%inbagg)
mat.data<-mat.data[oob,]
getME<-function(x,mat) mat[,x,drop=FALSE]
vec.out<-numeric(ncol(mat.eval))
names(vec.out)<-colnames(mat.eval)
mat.prob<-matrix(0,length(oob),n.lev)
listMat<-vector("list",n.lev-1)
for(i in 2:n.lev){
mat.prob[,i]<-predict(lmodel[[i-1]],mat.data,2)
listMat[[i-1]]<-lapply(lpi[[i-1]],getME,mat=mat.eval)
}
rm(mat.eval,mat.data)
mat.prob<-exp(mat.prob)
preds<-max.col(mat.prob)
preds<-levs[preds]
n.corr<-sum(cl[oob]==preds)
mat.primes<-getMatPrime(lpi)
primes<-rownames(mat.primes)
n.primes<-length(primes)
vec.improve<-numeric(n.primes)
for(i in 1:n.primes){
ids<-which(mat.primes[i,])
tmp.prob<-mat.prob
for(j in ids)
tmp.prob[,j+1]<-getNewProbsMLR(listMat[[j]],inbagg,oob,cl[inbagg],
primes[i],levs[c(1,j+1)])
preds<-max.col(tmp.prob)
preds<-levs[preds]
vec.improve[i]<-sum(cl[oob]==preds)
}
vec.out[primes]<-n.corr-vec.improve
if(!useN)
vec.out<-vec.out/length(oob)
vec.out
names(vec.improve)<-primes
list(n.corr=n.corr,vec.improve=vec.improve,vec.out=vec.out)
}
getNewProbsMLR<-function(mats,inbagg,oob,cl.train,prime,vec.lev){
rS2<-function(x,prime) rowSums(x[,colnames(x)!=prime,drop=FALSE])>0
tmp<-lapply(mats,rS2,prime=prime)
mat.model<-matrix(unlist(tmp),ncol=length(mats))
cs<-colSums(mat.model)
if(any(cs%in%c(0,nrow(mat.model))))
mat.model<-mat.model[,!cs%in%c(0,nrow(mat.model)),drop=FALSE]
mat.model<-cbind(1,mat.model)
ids<-cl.train%in%vec.lev
y<-(cl.train[ids]==vec.lev[2])*1
x<-mat.model[inbagg,]
coef<-glm.fit(x=x[ids,],y=y,family=binomial())$coef
mat.model[oob,]%*%coef
}
getMatPrime<-function(lpi){
primes<-unique(unlist(lpi))
listPI<-lapply(lpi,function(x) unique(unlist(x)))
mat<-sapply(listPI,function(x,y) y%in%x, y=primes)
rownames(mat)<-primes
mat
}
| /R/vim.MLR.R | no_license | holgerschw/logicFS | R | false | false | 1,991 | r | vim.MLR<-function(lmodel,lpi,mat.eval,mat.data,cl,inbagg,useN=TRUE){
levs<-levels(cl)
n.lev<-length(levs)
oob<-which(!(1:length(cl))%in%inbagg)
mat.data<-mat.data[oob,]
getME<-function(x,mat) mat[,x,drop=FALSE]
vec.out<-numeric(ncol(mat.eval))
names(vec.out)<-colnames(mat.eval)
mat.prob<-matrix(0,length(oob),n.lev)
listMat<-vector("list",n.lev-1)
for(i in 2:n.lev){
mat.prob[,i]<-predict(lmodel[[i-1]],mat.data,2)
listMat[[i-1]]<-lapply(lpi[[i-1]],getME,mat=mat.eval)
}
rm(mat.eval,mat.data)
mat.prob<-exp(mat.prob)
preds<-max.col(mat.prob)
preds<-levs[preds]
n.corr<-sum(cl[oob]==preds)
mat.primes<-getMatPrime(lpi)
primes<-rownames(mat.primes)
n.primes<-length(primes)
vec.improve<-numeric(n.primes)
for(i in 1:n.primes){
ids<-which(mat.primes[i,])
tmp.prob<-mat.prob
for(j in ids)
tmp.prob[,j+1]<-getNewProbsMLR(listMat[[j]],inbagg,oob,cl[inbagg],
primes[i],levs[c(1,j+1)])
preds<-max.col(tmp.prob)
preds<-levs[preds]
vec.improve[i]<-sum(cl[oob]==preds)
}
vec.out[primes]<-n.corr-vec.improve
if(!useN)
vec.out<-vec.out/length(oob)
vec.out
names(vec.improve)<-primes
list(n.corr=n.corr,vec.improve=vec.improve,vec.out=vec.out)
}
getNewProbsMLR<-function(mats,inbagg,oob,cl.train,prime,vec.lev){
rS2<-function(x,prime) rowSums(x[,colnames(x)!=prime,drop=FALSE])>0
tmp<-lapply(mats,rS2,prime=prime)
mat.model<-matrix(unlist(tmp),ncol=length(mats))
cs<-colSums(mat.model)
if(any(cs%in%c(0,nrow(mat.model))))
mat.model<-mat.model[,!cs%in%c(0,nrow(mat.model)),drop=FALSE]
mat.model<-cbind(1,mat.model)
ids<-cl.train%in%vec.lev
y<-(cl.train[ids]==vec.lev[2])*1
x<-mat.model[inbagg,]
coef<-glm.fit(x=x[ids,],y=y,family=binomial())$coef
mat.model[oob,]%*%coef
}
getMatPrime<-function(lpi){
primes<-unique(unlist(lpi))
listPI<-lapply(lpi,function(x) unique(unlist(x)))
mat<-sapply(listPI,function(x,y) y%in%x, y=primes)
rownames(mat)<-primes
mat
}
|
################################################################
## Bayesian Statistics: Advanced IRT
## Quant III Lab 13
## December 5th 2013
################################################################
# install.packages("wnominate")
# install.packages("msm")
# install.packages("pscl")
library(pscl)
library(rstan)
# loading roll call data
# Source: http://jackman.stanford.edu/blog/
load("lab13_senate_rc.rda")
rc <- dropRollCall(rc, dropList=list(codes = "notInLegis", lop = 0))
################################################################
## BASELINE IRT MODEL
################################################################
irt <- ideal(rc, store.item=TRUE)
# analysis of results
summary(irt)
################################################################
## ASSESSING CONVERGENCE
################################################################
# Function to convert ideal objects to coda/mcmc objects
legislators_to_coda <- function(irt) mcmc(irt$x[,,1])
legislators_to_array <- function(irt) aperm(irt$x, c(1,3,2))
items_to_coda <- function(irt, par) mcmc(irt$beta[,,par])
items_to_array <- function(irt, par){
t <- irt$beta[,,par]
a <- array(t, dim=c(dim(t)[1], 1, dim(t)[2]),
dimnames=list(dimnames(t)[[1]], 1, dimnames(t)[[2]]))
return(a)
}
# Visual test
plot(legislators_to_coda(irt), ask=TRUE)
plot(items_to_coda(irt, 'Discrimination D1'), ask=TRUE)
# Summary with rstan
monitor(legislators_to_array(irt))
monitor(items_to_array(irt, 'Discrimination D1'))
### SOLUTIONS (?)
# If chain has not converged, longer chain can work
irt <- ideal(rc, store.item=TRUE, maxiter=50000, thin=200, burnin=10000,
verbose=TRUE)
load("lab14_irt_long_chain.Rdata")
# Convergence
plot(legislators_to_coda(irt), ask=TRUE)
plot(items_to_coda(irt, 'Discrimination D1'), ask=TRUE)
monitor(legislators_to_array(irt))
monitor(items_to_array(irt, 'Discrimination D1'))
# Another alternative is to use a hierarchical approach
irt <- ideal(rc, store.item=TRUE, normalize=TRUE)
# 'normalize' identifies the model imposing the constraint that ideal points
# have unit variance in each dimension (mean 0 and sd 1)
# This is equivalent to a hierarchical model where x_i ~ N(0, 1)
# where x_i has an informative prior distribution (we fix the
# hyperparameters)
monitor(legislators_to_array(irt))
monitor(items_to_array(irt, 'Discrimination D1'))
# Other solution: parameter expansion (see BDA for more details)
irt <- ideal(rc, store.item=TRUE, mda=TRUE)
################################################################
## IRT WITH >1 DIMENSIONS
################################################################
# 'ideal' function works with multiple dimensions
irt <- ideal(rc, d=2, store.item=TRUE, maxiter=50000, thin=200,
burnin=10000, verbose=TRUE)
load("lab14_irt_2D.Rdata")
# low values in first dimension
head(irt$xbar[order(irt$xbar[,1]),])
# high values in first dimension
tail(irt$xbar[order(irt$xbar[,1]),])
# low values in second dimension
head(irt$xbar[order(irt$xbar[,2]),])
# high values in second dimension
tail(irt$xbar[order(irt$xbar[,2]),])
# items that discriminate in second dimension
discrimination <- irt$betabar[,"Discrimination D2"]
# top 2 most "discriminatory" bills for POSITIVE values of scale
rc$vote.data[order(discrimination, decreasing=TRUE)[1],]
rc$vote.data[order(discrimination, decreasing=TRUE)[2],]
# top 2 most "discriminatory" bills for NEGATIVE values of scale
rc$vote.data[order(discrimination)[1],]
rc$vote.data[order(discrimination)[2],]
################################################################
## COMPARING IRT WITH WNOMINATE
################################################################
library(wnominate)
nom <- wnominate(rc, dims=2, polarity=c("Cruz (R-TX)", "Cochran (R-MS)"))
par(mfrow=c(1,2))
plot(irt$xbar[,1], nom$legislators$coord1D,
xlab="IRT ideal point (1D)", ylab="W-NOMINATE (1D)")
plot(irt$xbar[,2], nom$legislators$coord2D,
xlab="IRT ideal point (2D)", ylab="W-NOMINATE (2D)")
################################################################
## IRT MODEL FIT
################################################################
## 1) PROPORTION OF CORRECTLY PREDICTED VOTES
# baseline
repub <- ifelse(rc$legis.data$party=="R", 0, 1) # dummy 'legislator==Republican'
K <- dim(rc$votes)[2] # number of votes
tab <- table(c(rc$votes), rep(repub, K)) # baseline: all Rs vote together
sum(diag(tab))/sum(tab) # proportion of correctly predicted
# W-NOMINATE
nom$fits[1:2]
# 1-dimensional model
pred <- matrix(NA, nrow=dim(irt$xbar)[1], ncol=dim(irt$betabar)[1]) # empty matrix
for (i in 1:nrow(pred)){
for (j in 1:ncol(pred)){
# compute predicted probability that legislator i votes YES to bill j
pred[i,j] <- plogis(irt$xbar[i,1] * irt$betabar[j,1] - irt$betabar[j,3])
}
}
tab <- table(c(rc$votes), c(pred)>0.50)
sum(diag(tab))/sum(tab)
# 2-dimensional model
pred <- matrix(NA, nrow=dim(irt$xbar)[1], ncol=dim(irt$betabar)[1])
for (i in 1:nrow(pred)){
for (j in 1:ncol(pred)){
pred[i,j] <- plogis(irt$xbar[i,1] * irt$betabar[j,1] +
irt$xbar[i,2] * irt$betabar[j,2] - irt$betabar[j,3])
}
}
tab <- table(c(rc$votes), c(pred)>0.50)
sum(diag(tab))/sum(tab)
## 2) PROPORTION OF 'YES' VOTES BY PROBABILITY BINS
# (useful for sparse vote matrices)
bins <- mapply(function(x, y) which(c(pred)>x & c(pred)<=y),
seq(0, .9, .1), seq(.1, 1, .1))
pred.bins <- lapply(bins, function(x) mean(c(rc$votes)[x]==1, na.rm=TRUE))
plot(seq(.05, .95, .10), pred.bins, xlab="Probability bins", ylab="% Yeas")
lines(seq(.05, .95, .10), pred.bins)
abline(a=0, b=1)
## 3) PROPORTION OF CORRECTLY PREDICTED VOTES USING ESTIMATED CUTPOINTS
# function to compute correctly predicted votes for a single bill
# for a given cutpoint
max.pred <- function(cutpoint, vote, xbar){
tab <- table(xbar > cutpoint, vote)
pred <- sum(diag(tab)) / sum(tab)
ifelse(pred>0.50, pred, 1-pred)
}
# example
max.pred(-1, rc$votes[,1], irt$xbar[,1])
# function to compute cutpoints and % correctly predicted votes
compute.cutpoints <- function(ideal.points, votes){
# loop over votes
cutpoints <- apply(votes, 2, function(x)
optimize(max.pred, interval=range(ideal.points, na.rm=TRUE),
vote=x, xbar=ideal.points, maximum=TRUE))
return(matrix(unlist(cutpoints), ncol=2, byrow=TRUE))
}
cutpoints <- compute.cutpoints(irt$xbar[,1], rc$votes)
mean(cutpoints[,2])
cutpoints <- compute.cutpoints(irt$xbar[,2], rc$votes)
mean(cutpoints[,2])
################################################################
## IRT WITH STAN
################################################################
library(rstan)
stan.code <- '
data {
int<lower=1> J; // number of legislators
int<lower=1> K; // number of bills
int<lower=1> N; // number of observations
int<lower=1,upper=J> j[N]; // legislator for observation n
int<lower=1,upper=K> k[N]; // bill for observation n
int<lower=0,upper=1> y[N]; // vote of observation n
}
parameters {
real alpha[K];
real beta[K];
real theta[J];
}
model {
alpha ~ normal(0, 25);
beta ~ normal(0, 25);
theta ~ normal(0, 1);
for (n in 1:N)
y[n] ~ bernoulli_logit( theta[j[n]] * beta[k[n]] - alpha[k[n]] );
}
'
J <- dim(rc$votes)[1]
K <- dim(rc$votes)[2]
N <- length(rc$votes)
j <- rep(1:J, times=K)
k <- rep(1:K, each=J)
y <- c(rc$votes)
# deleting missing values
miss <- which(is.na(y))
N <- N - length(miss)
j <- j[-miss]
k <- k[-miss]
y <- y[-miss]
## data and initial values
stan.data <- list(J=J, K=K, N=N, j=j, k=k, y=y)
inits <- list(list(alpha=rnorm(K, 0, 2), beta=rnorm(K, 0, 2),
theta=ifelse(rc$legis.data$party=="R", 1, -1)))
stan.fit <- stan(model_code=stan.code, data=stan.data, iter=500, warmup=200,
chains=1, thin=2, inits=inits)
load("lab14_stan_irt.Rdata")
## convergence
traceplot(stan.fit, pars='theta', ask=TRUE)
## comparing with WNOMINATE and Jackman's ideal
estimates <- summary(stan.fit)
theta <- estimates$summary[paste0("theta[", 1:J, "]"),1]
par(mfrow=c(1,2))
plot(irt$xbar[,1], theta,
xlab="IRT ideal point (1D)", ylab="IRT ideal point (STAN)")
plot(nom$legislators$coord1D, theta,
xlab="W-NOMINATE (1D)", ylab="IRT ideal point (STAN)")
################################################################
## IRT WITH COVARIATES
################################################################
# Different ways of doing this...
# With Stan, it would be something like this:
stan.code <- '
data {
int<lower=1> J; // number of legislators
int<lower=1> K; // number of bills
int<lower=1> N; // number of observations
int<lower=1,upper=J> j[N]; // legislator for observation n
int<lower=1,upper=K> k[N]; // bill for observation n
int<lower=0,upper=1> y[N]; // vote of observation n
real party[J]; // party of legislator j (0 for D/I, 1 for R)
}
parameters {
real alpha[K];
real beta[K];
real theta[J]; # realized ideology
real gamma; # intercept for legislator ideology distr.
real beta_party; # effect of party ID
real<lower=0.1> omega; # sd of legislator ideology
}
model {
alpha ~ normal(0, 5);
beta ~ normal(0, 5);
beta_party ~ normal(0, 2);
omega ~ uniform(0, 1);
gamma ~ normal(0, 2);
for (i in 1:J){
theta[i] ~ normal(gamma + beta_party * party[i], omega);
};
for (n in 1:N)
y[n] ~ bernoulli_logit( theta[j[n]] * beta[k[n]] - alpha[k[n]] );
}
'
repub <- ifelse(rc$legis.data$party=="R", 0, 1)
stan.data <- list(J=J, K=K, N=N, j=j, k=k, y=y, party=repub)
inits <- list(list(alpha=rnorm(K, 0, 2), beta=rnorm(K, 0, 2),
theta=ifelse(rc$legis.data$party=="R", 1, -1),
beta_party=1, omega=0.5))
stan.fit <- stan(model_code=stan.code, data=stan.data, iter=500, warmup=200,
chains=1, thin=2, inits=inits)
# MCMCpack has a function to do this too
female <- ifelse(rc$legis.data$gender=="F", 1, 0)
mcmc <- MCMCirtHier1d(rc$votes, data.frame(republican=repub, female=female))
results <- summary(mcmc)
round(results$statistics['beta.republican',],2)
round(results$statistics['beta.female',], 2)
theta <- results$statistics[1:104,1]
plot(irt$xbar[,1], theta,
xlab="IRT ideal point (1D)", ylab="IRT ideal point with covariates (MCMCpack)")
| /B_analysts_sources_github/pablobarbera/quant3materials/lab14_IRT_issues.R | no_license | Irbis3/crantasticScrapper | R | false | false | 10,218 | r | ################################################################
## Bayesian Statistics: Advanced IRT
## Quant III Lab 13
## December 5th 2013
################################################################
# install.packages("wnominate")
# install.packages("msm")
# install.packages("pscl")
library(pscl)
library(rstan)
# loading roll call data
# Source: http://jackman.stanford.edu/blog/
load("lab13_senate_rc.rda")
rc <- dropRollCall(rc, dropList=list(codes = "notInLegis", lop = 0))
################################################################
## BASELINE IRT MODEL
################################################################
irt <- ideal(rc, store.item=TRUE)
# analysis of results
summary(irt)
################################################################
## ASSESSING CONVERGENCE
################################################################
# Function to convert ideal objects to coda/mcmc objects
legislators_to_coda <- function(irt) mcmc(irt$x[,,1])
legislators_to_array <- function(irt) aperm(irt$x, c(1,3,2))
items_to_coda <- function(irt, par) mcmc(irt$beta[,,par])
items_to_array <- function(irt, par){
t <- irt$beta[,,par]
a <- array(t, dim=c(dim(t)[1], 1, dim(t)[2]),
dimnames=list(dimnames(t)[[1]], 1, dimnames(t)[[2]]))
return(a)
}
# Visual test
plot(legislators_to_coda(irt), ask=TRUE)
plot(items_to_coda(irt, 'Discrimination D1'), ask=TRUE)
# Summary with rstan
monitor(legislators_to_array(irt))
monitor(items_to_array(irt, 'Discrimination D1'))
### SOLUTIONS (?)
# If chain has not converged, longer chain can work
irt <- ideal(rc, store.item=TRUE, maxiter=50000, thin=200, burnin=10000,
verbose=TRUE)
load("lab14_irt_long_chain.Rdata")
# Convergence
plot(legislators_to_coda(irt), ask=TRUE)
plot(items_to_coda(irt, 'Discrimination D1'), ask=TRUE)
monitor(legislators_to_array(irt))
monitor(items_to_array(irt, 'Discrimination D1'))
# Another alternative is to use a hierarchical approach
irt <- ideal(rc, store.item=TRUE, normalize=TRUE)
# 'normalize' identifies the model imposing the constraint that ideal points
# have unit variance in each dimension (mean 0 and sd 1)
# This is equivalent to a hierarchical model where x_i ~ N(0, 1)
# where x_i has an informative prior distribution (we fix the
# hyperparameters)
monitor(legislators_to_array(irt))
monitor(items_to_array(irt, 'Discrimination D1'))
# Other solution: parameter expansion (see BDA for more details)
irt <- ideal(rc, store.item=TRUE, mda=TRUE)
################################################################
## IRT WITH >1 DIMENSIONS
################################################################
# 'ideal' function works with multiple dimensions
irt <- ideal(rc, d=2, store.item=TRUE, maxiter=50000, thin=200,
burnin=10000, verbose=TRUE)
load("lab14_irt_2D.Rdata")
# low values in first dimension
head(irt$xbar[order(irt$xbar[,1]),])
# high values in first dimension
tail(irt$xbar[order(irt$xbar[,1]),])
# low values in second dimension
head(irt$xbar[order(irt$xbar[,2]),])
# high values in second dimension
tail(irt$xbar[order(irt$xbar[,2]),])
# items that discriminate in second dimension
discrimination <- irt$betabar[,"Discrimination D2"]
# top 2 most "discriminatory" bills for POSITIVE values of scale
rc$vote.data[order(discrimination, decreasing=TRUE)[1],]
rc$vote.data[order(discrimination, decreasing=TRUE)[2],]
# top 2 most "discriminatory" bills for NEGATIVE values of scale
rc$vote.data[order(discrimination)[1],]
rc$vote.data[order(discrimination)[2],]
################################################################
## COMPARING IRT WITH WNOMINATE
################################################################
library(wnominate)
nom <- wnominate(rc, dims=2, polarity=c("Cruz (R-TX)", "Cochran (R-MS)"))
par(mfrow=c(1,2))
plot(irt$xbar[,1], nom$legislators$coord1D,
xlab="IRT ideal point (1D)", ylab="W-NOMINATE (1D)")
plot(irt$xbar[,2], nom$legislators$coord2D,
xlab="IRT ideal point (2D)", ylab="W-NOMINATE (2D)")
################################################################
## IRT MODEL FIT
################################################################
## 1) PROPORTION OF CORRECTLY PREDICTED VOTES
# baseline
repub <- ifelse(rc$legis.data$party=="R", 0, 1) # dummy 'legislator==Republican'
K <- dim(rc$votes)[2] # number of votes
tab <- table(c(rc$votes), rep(repub, K)) # baseline: all Rs vote together
sum(diag(tab))/sum(tab) # proportion of correctly predicted
# W-NOMINATE
nom$fits[1:2]
# 1-dimensional model
pred <- matrix(NA, nrow=dim(irt$xbar)[1], ncol=dim(irt$betabar)[1]) # empty matrix
for (i in 1:nrow(pred)){
for (j in 1:ncol(pred)){
# compute predicted probability that legislator i votes YES to bill j
pred[i,j] <- plogis(irt$xbar[i,1] * irt$betabar[j,1] - irt$betabar[j,3])
}
}
tab <- table(c(rc$votes), c(pred)>0.50)
sum(diag(tab))/sum(tab)
# 2-dimensional model
pred <- matrix(NA, nrow=dim(irt$xbar)[1], ncol=dim(irt$betabar)[1])
for (i in 1:nrow(pred)){
for (j in 1:ncol(pred)){
pred[i,j] <- plogis(irt$xbar[i,1] * irt$betabar[j,1] +
irt$xbar[i,2] * irt$betabar[j,2] - irt$betabar[j,3])
}
}
tab <- table(c(rc$votes), c(pred)>0.50)
sum(diag(tab))/sum(tab)
## 2) PROPORTION OF 'YES' VOTES BY PROBABILITY BINS
# (useful for sparse vote matrices)
bins <- mapply(function(x, y) which(c(pred)>x & c(pred)<=y),
seq(0, .9, .1), seq(.1, 1, .1))
pred.bins <- lapply(bins, function(x) mean(c(rc$votes)[x]==1, na.rm=TRUE))
plot(seq(.05, .95, .10), pred.bins, xlab="Probability bins", ylab="% Yeas")
lines(seq(.05, .95, .10), pred.bins)
abline(a=0, b=1)
## 3) PROPORTION OF CORRECTLY PREDICTED VOTES USING ESTIMATED CUTPOINTS
# function to compute correctly predicted votes for a single bill
# for a given cutpoint
max.pred <- function(cutpoint, vote, xbar){
tab <- table(xbar > cutpoint, vote)
pred <- sum(diag(tab)) / sum(tab)
ifelse(pred>0.50, pred, 1-pred)
}
# example
max.pred(-1, rc$votes[,1], irt$xbar[,1])
# function to compute cutpoints and % correctly predicted votes
compute.cutpoints <- function(ideal.points, votes){
# loop over votes
cutpoints <- apply(votes, 2, function(x)
optimize(max.pred, interval=range(ideal.points, na.rm=TRUE),
vote=x, xbar=ideal.points, maximum=TRUE))
return(matrix(unlist(cutpoints), ncol=2, byrow=TRUE))
}
cutpoints <- compute.cutpoints(irt$xbar[,1], rc$votes)
mean(cutpoints[,2])
cutpoints <- compute.cutpoints(irt$xbar[,2], rc$votes)
mean(cutpoints[,2])
################################################################
## IRT WITH STAN
################################################################
library(rstan)
stan.code <- '
data {
int<lower=1> J; // number of legislators
int<lower=1> K; // number of bills
int<lower=1> N; // number of observations
int<lower=1,upper=J> j[N]; // legislator for observation n
int<lower=1,upper=K> k[N]; // bill for observation n
int<lower=0,upper=1> y[N]; // vote of observation n
}
parameters {
real alpha[K];
real beta[K];
real theta[J];
}
model {
alpha ~ normal(0, 25);
beta ~ normal(0, 25);
theta ~ normal(0, 1);
for (n in 1:N)
y[n] ~ bernoulli_logit( theta[j[n]] * beta[k[n]] - alpha[k[n]] );
}
'
J <- dim(rc$votes)[1]
K <- dim(rc$votes)[2]
N <- length(rc$votes)
j <- rep(1:J, times=K)
k <- rep(1:K, each=J)
y <- c(rc$votes)
# deleting missing values
miss <- which(is.na(y))
N <- N - length(miss)
j <- j[-miss]
k <- k[-miss]
y <- y[-miss]
## data and initial values
stan.data <- list(J=J, K=K, N=N, j=j, k=k, y=y)
inits <- list(list(alpha=rnorm(K, 0, 2), beta=rnorm(K, 0, 2),
theta=ifelse(rc$legis.data$party=="R", 1, -1)))
stan.fit <- stan(model_code=stan.code, data=stan.data, iter=500, warmup=200,
chains=1, thin=2, inits=inits)
load("lab14_stan_irt.Rdata")
## convergence
traceplot(stan.fit, pars='theta', ask=TRUE)
## comparing with WNOMINATE and Jackman's ideal
estimates <- summary(stan.fit)
theta <- estimates$summary[paste0("theta[", 1:J, "]"),1]
par(mfrow=c(1,2))
plot(irt$xbar[,1], theta,
xlab="IRT ideal point (1D)", ylab="IRT ideal point (STAN)")
plot(nom$legislators$coord1D, theta,
xlab="W-NOMINATE (1D)", ylab="IRT ideal point (STAN)")
################################################################
## IRT WITH COVARIATES
################################################################
# Different ways of doing this...
# With Stan, it would be something like this:
stan.code <- '
data {
int<lower=1> J; // number of legislators
int<lower=1> K; // number of bills
int<lower=1> N; // number of observations
int<lower=1,upper=J> j[N]; // legislator for observation n
int<lower=1,upper=K> k[N]; // bill for observation n
int<lower=0,upper=1> y[N]; // vote of observation n
real party[J]; // party of legislator j (0 for D/I, 1 for R)
}
parameters {
real alpha[K];
real beta[K];
real theta[J]; # realized ideology
real gamma; # intercept for legislator ideology distr.
real beta_party; # effect of party ID
real<lower=0.1> omega; # sd of legislator ideology
}
model {
alpha ~ normal(0, 5);
beta ~ normal(0, 5);
beta_party ~ normal(0, 2);
omega ~ uniform(0, 1);
gamma ~ normal(0, 2);
for (i in 1:J){
theta[i] ~ normal(gamma + beta_party * party[i], omega);
};
for (n in 1:N)
y[n] ~ bernoulli_logit( theta[j[n]] * beta[k[n]] - alpha[k[n]] );
}
'
repub <- ifelse(rc$legis.data$party=="R", 0, 1)
stan.data <- list(J=J, K=K, N=N, j=j, k=k, y=y, party=repub)
inits <- list(list(alpha=rnorm(K, 0, 2), beta=rnorm(K, 0, 2),
theta=ifelse(rc$legis.data$party=="R", 1, -1),
beta_party=1, omega=0.5))
stan.fit <- stan(model_code=stan.code, data=stan.data, iter=500, warmup=200,
chains=1, thin=2, inits=inits)
# MCMCpack has a function to do this too
female <- ifelse(rc$legis.data$gender=="F", 1, 0)
mcmc <- MCMCirtHier1d(rc$votes, data.frame(republican=repub, female=female))
results <- summary(mcmc)
round(results$statistics['beta.republican',],2)
round(results$statistics['beta.female',], 2)
theta <- results$statistics[1:104,1]
plot(irt$xbar[,1], theta,
xlab="IRT ideal point (1D)", ylab="IRT ideal point with covariates (MCMCpack)")
|
## The following functions create a special object that stores a matrix and caches the inverse of the matrix.
## This function creates a special 'matrix' object that can cache its inverse.It first sets the value of the matrix and gets the value of the matrix, and then sets the value of the inverse of the matrix and gets the value of the inverse.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function()x
setinverse<-function(solve) m<<-solve
getinverse<-function() m
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## This function computes the inverse of the special 'matrix' returned by 'makeCacheMatrix' above. If the inverse has already been calculated (and the matrix has not changed), then 'cacheSolve' should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m<-x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix,...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | HarrietZhou/ProgrammingAssignment2 | R | false | false | 1,079 | r | ## The following functions create a special object that stores a matrix and caches the inverse of the matrix.
## This function creates a special 'matrix' object that can cache its inverse.It first sets the value of the matrix and gets the value of the matrix, and then sets the value of the inverse of the matrix and gets the value of the inverse.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function()x
setinverse<-function(solve) m<<-solve
getinverse<-function() m
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## This function computes the inverse of the special 'matrix' returned by 'makeCacheMatrix' above. If the inverse has already been calculated (and the matrix has not changed), then 'cacheSolve' should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m<-x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix,...)
x$setinverse(m)
m
}
|
# Matrix eQTL by Andrey A. Shabalin
# http://www.bios.unc.edu/research/genomic_software/Matrix_eQTL/
#
# Be sure to use an up to date version of R and Matrix eQTL.
# source("Matrix_eQTL_R/Matrix_eQTL_engine.r");
library(MatrixEQTL)
args <- commandArgs()
cov <- args[[6]]
## Settings
data.loc = "/work-zfs/abattle4/lab_data/GTEx_v8_eqtl_practice/matrix_eqtl/"
# Linear model to use, modelANOVA, modelLINEAR, or modelLINEAR_CROSS
useModel = modelLINEAR; # modelANOVA, modelLINEAR, or modelLINEAR_CROSS
# Genotype file name
SNP_file_name = paste0(data.loc, "Whole_Blood.v8.genotype.chr10.txt");
snps_location_file_name = paste0(data.loc, "Whole_Blood.v8.snp_location.chr10.txt");
# Gene expression file name
expression_file_name = paste0(data.loc, "Whole_Blood.v8.normalized_expression.txt");
gene_location_file_name = paste0(data.loc, "Whole_Blood.v8.gene_location.txt");
# Covariates file name
covariates_file_name = paste0("data/", cov, ".txt");
# Output file name
output_file_name_cis = paste0("matrixeqtl/cis.eqtl.", cov, ".txt");
output_file_name_tra = tempfile();
# Only associations significant at this level will be saved
pvOutputThreshold_cis = 1;
pvOutputThreshold_tra = 0;
# Error covariance matrix
# Set to numeric() for identity.
errorCovariance = numeric();
# errorCovariance = read.table("Sample_Data/errorCovariance.txt");
# Distance for local gene-SNP pairs
cisDist = 1e6;
## Load genotype data
snps = SlicedData$new();
snps$fileDelimiter = "\t"; # the TAB character
snps$fileOmitCharacters = "-"; # denote missing values;
snps$fileSkipRows = 1; # one row of column labels
snps$fileSkipColumns = 1; # one column of row labels
snps$fileSliceSize = 2000; # read file in slices of 2,000 rows
snps$LoadFile(SNP_file_name);
## Load gene expression data
gene = SlicedData$new();
gene$fileDelimiter = "\t"; # the TAB character
gene$fileOmitCharacters = "NA"; # denote missing values;
gene$fileSkipRows = 1; # one row of column labels
gene$fileSkipColumns = 1; # one column of row labels
gene$fileSliceSize = 2000; # read file in slices of 2,000 rows
gene$LoadFile(expression_file_name);
## Load covariates
cvrt = SlicedData$new();
cvrt$fileDelimiter = "\t"; # the TAB character
cvrt$fileOmitCharacters = "NA"; # denote missing values;
cvrt$fileSkipRows = 1; # one row of column labels
cvrt$fileSkipColumns = 1; # one column of row labels
if(length(covariates_file_name)>0) {
cvrt$LoadFile(covariates_file_name);
}
## Run the analysis
snpspos = read.table(snps_location_file_name, header = TRUE, stringsAsFactors = FALSE);
genepos = read.table(gene_location_file_name, header = TRUE, stringsAsFactors = FALSE);
# Filter out snps with MAF<0.01
maf.list = vector('list', length(snps))
for(sl in 1:length(snps)) {
slice = snps[[sl]];
maf.list[[sl]] = rowMeans(slice,na.rm=TRUE)/2;
maf.list[[sl]] = pmin(maf.list[[sl]],1-maf.list[[sl]]);
}
maf = unlist(maf.list)
## Look at the distribution of MAF
cat('SNPs before filtering:',nrow(snps))
snps$RowReorder(maf>=0.01);
cat('SNPs after filtering:',nrow(snps))
me = Matrix_eQTL_main(
snps = snps,
gene = gene,
cvrt = cvrt,
output_file_name = output_file_name_tra,
pvOutputThreshold = pvOutputThreshold_tra,
useModel = useModel,
errorCovariance = errorCovariance,
verbose = TRUE,
output_file_name.cis = output_file_name_cis,
pvOutputThreshold.cis = pvOutputThreshold_cis,
snpspos = snpspos,
genepos = genepos,
cisDist = cisDist,
pvalue.hist = "qqplot",
min.pv.by.genesnp = FALSE,
noFDRsaveMemory = TRUE);
unlink(output_file_name_tra);
#unlink(output_file_name_cis);
## Results:
cat('Analysis done in: ', me$time.in.sec, ' seconds', '\n');
cat('Detected local eQTLs:', '\n');
show(me$cis$eqtls)
## Plot the Q-Q plot of local and distant p-values
png(paste0('plots/', cov, 'qq.png'))
plot(me)
dev.off()
| /hw1/code/eqtl_cov_input.R | no_license | jmp448/genomic-data-science | R | false | false | 3,891 | r | # Matrix eQTL by Andrey A. Shabalin
# http://www.bios.unc.edu/research/genomic_software/Matrix_eQTL/
#
# Be sure to use an up to date version of R and Matrix eQTL.
# source("Matrix_eQTL_R/Matrix_eQTL_engine.r");
library(MatrixEQTL)
args <- commandArgs()
cov <- args[[6]]
## Settings
data.loc = "/work-zfs/abattle4/lab_data/GTEx_v8_eqtl_practice/matrix_eqtl/"
# Linear model to use, modelANOVA, modelLINEAR, or modelLINEAR_CROSS
useModel = modelLINEAR; # modelANOVA, modelLINEAR, or modelLINEAR_CROSS
# Genotype file name
SNP_file_name = paste0(data.loc, "Whole_Blood.v8.genotype.chr10.txt");
snps_location_file_name = paste0(data.loc, "Whole_Blood.v8.snp_location.chr10.txt");
# Gene expression file name
expression_file_name = paste0(data.loc, "Whole_Blood.v8.normalized_expression.txt");
gene_location_file_name = paste0(data.loc, "Whole_Blood.v8.gene_location.txt");
# Covariates file name
covariates_file_name = paste0("data/", cov, ".txt");
# Output file name
output_file_name_cis = paste0("matrixeqtl/cis.eqtl.", cov, ".txt");
output_file_name_tra = tempfile();
# Only associations significant at this level will be saved
pvOutputThreshold_cis = 1;
pvOutputThreshold_tra = 0;
# Error covariance matrix
# Set to numeric() for identity.
errorCovariance = numeric();
# errorCovariance = read.table("Sample_Data/errorCovariance.txt");
# Distance for local gene-SNP pairs
cisDist = 1e6;
## Load genotype data
snps = SlicedData$new();
snps$fileDelimiter = "\t"; # the TAB character
snps$fileOmitCharacters = "-"; # denote missing values;
snps$fileSkipRows = 1; # one row of column labels
snps$fileSkipColumns = 1; # one column of row labels
snps$fileSliceSize = 2000; # read file in slices of 2,000 rows
snps$LoadFile(SNP_file_name);
## Load gene expression data
gene = SlicedData$new();
gene$fileDelimiter = "\t"; # the TAB character
gene$fileOmitCharacters = "NA"; # denote missing values;
gene$fileSkipRows = 1; # one row of column labels
gene$fileSkipColumns = 1; # one column of row labels
gene$fileSliceSize = 2000; # read file in slices of 2,000 rows
gene$LoadFile(expression_file_name);
## Load covariates
cvrt = SlicedData$new();
cvrt$fileDelimiter = "\t"; # the TAB character
cvrt$fileOmitCharacters = "NA"; # denote missing values;
cvrt$fileSkipRows = 1; # one row of column labels
cvrt$fileSkipColumns = 1; # one column of row labels
if(length(covariates_file_name)>0) {
cvrt$LoadFile(covariates_file_name);
}
## Run the analysis
snpspos = read.table(snps_location_file_name, header = TRUE, stringsAsFactors = FALSE);
genepos = read.table(gene_location_file_name, header = TRUE, stringsAsFactors = FALSE);
# Filter out snps with MAF<0.01
maf.list = vector('list', length(snps))
for(sl in 1:length(snps)) {
slice = snps[[sl]];
maf.list[[sl]] = rowMeans(slice,na.rm=TRUE)/2;
maf.list[[sl]] = pmin(maf.list[[sl]],1-maf.list[[sl]]);
}
maf = unlist(maf.list)
## Look at the distribution of MAF
cat('SNPs before filtering:',nrow(snps))
snps$RowReorder(maf>=0.01);
cat('SNPs after filtering:',nrow(snps))
me = Matrix_eQTL_main(
snps = snps,
gene = gene,
cvrt = cvrt,
output_file_name = output_file_name_tra,
pvOutputThreshold = pvOutputThreshold_tra,
useModel = useModel,
errorCovariance = errorCovariance,
verbose = TRUE,
output_file_name.cis = output_file_name_cis,
pvOutputThreshold.cis = pvOutputThreshold_cis,
snpspos = snpspos,
genepos = genepos,
cisDist = cisDist,
pvalue.hist = "qqplot",
min.pv.by.genesnp = FALSE,
noFDRsaveMemory = TRUE);
unlink(output_file_name_tra);
#unlink(output_file_name_cis);
## Results:
cat('Analysis done in: ', me$time.in.sec, ' seconds', '\n');
cat('Detected local eQTLs:', '\n');
show(me$cis$eqtls)
## Plot the Q-Q plot of local and distant p-values
png(paste0('plots/', cov, 'qq.png'))
plot(me)
dev.off()
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2113
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2113
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query25_query08_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 790
c no.of clauses 2113
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2113
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query25_query08_1344.qdimacs 790 2113 E1 [] 0 16 774 2113 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query25_query08_1344/query25_query08_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 708 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2113
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2113
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query25_query08_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 790
c no.of clauses 2113
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2113
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query25_query08_1344.qdimacs 790 2113 E1 [] 0 16 774 2113 NONE
|
library(tidyverse)
library(skimr)
library(corrplot)
library(Cairo)
library(gganimate)
library(glue)
theme_set(theme_minimal())
nyc_squirrels <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-10-29/nyc_squirrels.csv")
skim(nyc_squirrels)
# Tidy data (ish, its pretty good already)
tidy_squirrels <-
nyc_squirrels %>%
mutate(date = lubridate::mdy(date))
# Correlations between squirrel actions
behaviors <- nyc_squirrels %>%
select_if(is.logical) %>%
mutate_each(as.integer)
behaviors_corrs <- cor(behaviors)
corrplot(
behaviors_corrs,
method = 'square',
order = 'FPC',
cl.pos = 'b',
tl.col = 'grey20',
tl.cex = 0.5,
tl.pos = 'd',
col = c('black', 'white'),
bg = 'grey',
diag = TRUE
)
# animate -----------------------------------------------------------------
p <- tidy_squirrels %>%
filter(age %in% c('Adult', 'Juvenile')) %>%
ggplot(aes(long, lat)) +
geom_point(aes(color = age), alpha = 0.6, size = 6, shape = 17) +
ggtitle('test title')+
labs(x = 'Longitude', y = 'Latititude') +
ggthemes::scale_color_canva(palette = 'Summer sunflower') +
theme_void() +
theme(plot.title = element_text(size = 26, hjust = 0.5))
p
anim <- p +
transition_states(date, transition_length = 20, state_length = 10) +
ease_aes('cubic-in-out') +
ggtitle('Squirrel locations on {closest_state}')
animate(anim, fps = 5.5)
anim_save('SquirrelPositions.gif', anim, fps = 5.5)
| /R/2019/2019_Week44_NYCSquirrels.R | no_license | MaiaPelletier/tidytuesday | R | false | false | 1,475 | r | library(tidyverse)
library(skimr)
library(corrplot)
library(Cairo)
library(gganimate)
library(glue)
theme_set(theme_minimal())
nyc_squirrels <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-10-29/nyc_squirrels.csv")
skim(nyc_squirrels)
# Tidy data (ish, its pretty good already)
tidy_squirrels <-
nyc_squirrels %>%
mutate(date = lubridate::mdy(date))
# Correlations between squirrel actions
behaviors <- nyc_squirrels %>%
select_if(is.logical) %>%
mutate_each(as.integer)
behaviors_corrs <- cor(behaviors)
corrplot(
behaviors_corrs,
method = 'square',
order = 'FPC',
cl.pos = 'b',
tl.col = 'grey20',
tl.cex = 0.5,
tl.pos = 'd',
col = c('black', 'white'),
bg = 'grey',
diag = TRUE
)
# animate -----------------------------------------------------------------
p <- tidy_squirrels %>%
filter(age %in% c('Adult', 'Juvenile')) %>%
ggplot(aes(long, lat)) +
geom_point(aes(color = age), alpha = 0.6, size = 6, shape = 17) +
ggtitle('test title')+
labs(x = 'Longitude', y = 'Latititude') +
ggthemes::scale_color_canva(palette = 'Summer sunflower') +
theme_void() +
theme(plot.title = element_text(size = 26, hjust = 0.5))
p
anim <- p +
transition_states(date, transition_length = 20, state_length = 10) +
ease_aes('cubic-in-out') +
ggtitle('Squirrel locations on {closest_state}')
animate(anim, fps = 5.5)
anim_save('SquirrelPositions.gif', anim, fps = 5.5)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below are a pair of functions that are used to create a special object that
## stores a matrix and caches its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | VEDHA2001/ProgrammingAssignment2 | R | false | false | 1,531 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below are a pair of functions that are used to create a special object that
## stores a matrix and caches its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
## Return a matrix that is the inverse of 'x'
}
|
# Function to calculate Z50 knowing Z95 and the cumulated root proportion V at a given depth Z
.root_ldrZ50 <- function(V,Z,Z95){
if(sum(V >= 0.9497887)>0) {stop("The function is not defined for V >= 0.9497887")}
if(sum(Z == Z95)>0) {stop("The function is not defined for Z = Z95")}
a <- log(V/(1-V))/2.94
Z50 <- (Z/Z95^a)^(1/(1-a))
return(Z50)
}
# Convenience function for finding the inverse solution of a given function
.inverse <- function (f, lower, upper) {
function (y) uniroot((function (x) f(x) - y), lower = lower, upper = upper, extendInt = "yes")[1]
}
spwb_ldrExploration<-function(x, soil, meteo, cohorts = NULL,
RZmin = 301, RZmax = 4000, V1min = 0.01, V1max = 0.94, resolution = 10,
heat_stop = 0, transformation = "identity", verbose = FALSE,
...) {
# define the days to keep in the analysis
op_days <- (heat_stop+1):nrow(meteo)
# Define the values of Z50 and Z90 to be explored
trans <- function(x) do.call(transformation, list(x))
inverse_trans <- .inverse(trans, lower = 0.01, upper = 100) # inverse of the function used for the transformation
if(RZmax > soil$SoilDepth){
if(verbose) cat("\n RZmax is larger than soil depth\n")
}
RZ_trans <- seq(trans(RZmin), trans(RZmax), length.out = resolution)
RZ <- as.numeric(unlist(sapply(RZ_trans, FUN = inverse_trans)))
# the case where RZ = Z1 will create problems when using the LDR model -> remove if it exists
Z1 <- soil$dVec[1]
if(sum(RZ == Z1) > 0){
if(verbose) cat("\nThe function to derive the root proportion in each soil layer is not defined for RZ = Z1 (depth of the first soil layer)\n",
"This value is removed\n",
paste("Resolution is now\n", resolution-1))
RZ <- RZ[-which(RZ == Z1)]
}
if(V1max >= 0.9497887){
if(verbose) cat("\nThe function to derive the root proportion in each soil layer is only defined for V1 c ]0,0.949[\nV1max is set to 0.94\n")
V1max <- 0.94
}
if(V1min <= 0){
if(verbose) cat("\nThe function to derive the root proportion in each soil layer is only defined for V1 c ]0,0.949[\nV1min is set to 0.01\n")
V1min <- 0.001
}
V1 <- seq(V1min,V1max,length.out = length(RZ)) # the proportion of root in the first soil layer
# Create a matrix with V1 as rows and RZ as column, filled with logical values to indicate the parameter combinations to explore
mExplore <- matrix(T, nrow = length(V1), ncol = length(RZ), dimnames = list(V1 = V1, RZ = RZ))
# mExplore[lower.tri(mExplore, diag = T)] <- F
# Calculate Z50
Z50 <- .root_ldrZ50(V = array(V1,dim = dim(mExplore)), Z = array(Z1, dim = dim(mExplore)), Z95 = t(array(RZ, dim = dim(mExplore))))
dimnames(Z50) <- dimnames(mExplore)
# Prepare array for V
V <- array(dim = c(length(soil$dVec),length(V1), length(RZ)),
dimnames = list(layer = 1:length(soil$dVec), V1 = V1, RZ = RZ))
# Sum LAI of all species
x$above$LAI_live <- sum(x$above$LAI_live)
x$above$LAI_expanded <- sum(x$above$LAI_expanded)
x$above$LAI_dead <- sum(x$above$LAI_dead)
# Data outputs
if(is.null(cohorts)) cohorts = row.names(x$cohorts)
An<-E <- PsiMin <- array(dim = c(length(cohorts), length(V1), length(RZ)), dimnames = list(cohort = cohorts, V1 = V1, RZ = RZ))
# Start loop
cc <- which(mExplore == T, arr.ind = T)
# Reset input
resetInputs(x, soil)
for(ci in 1:length(cohorts)){
coh = cohorts[ci]
sp = which(row.names(x$cohorts)==coh)
cat(paste("Exploring root distribution of cohort", coh,"(", x$cohorts$Name[sp],"):\n"))
x_1sp <- x
x_1sp$cohorts <- x$cohorts[sp,,drop = FALSE]
x_1sp$above <- x$above[sp,,drop = FALSE]
x_1sp$below <- x$below
x_1sp$below$V <- x$below$V[sp,,drop = FALSE]
x_1sp$paramsInterception <- x$paramsInterception[sp,,drop = FALSE]
x_1sp$paramsTransp <- x$paramsTransp[sp,,drop = FALSE]
x_1sp$Transpiration <- x$Transpiration[sp,drop = FALSE]
x_1sp$Photosynthesis <- x$Photosynthesis[sp,drop = FALSE]
if(x_1sp$control$transpirationMode=="Granier") {
x_1sp$PLC <- x$PLC[sp,drop = FALSE]
} else {
x_1sp$below$VGrhizo_kmax <- x$below$V[sp,,drop = FALSE]
x_1sp$below$VCroot_kmax <- x$below$V[sp,,drop = FALSE]
x_1sp$paramsAnatomy <- x$paramsAnatomy[sp,,drop = FALSE]
x_1sp$paramsWaterStorage <- x$paramsWaterStorage[sp,,drop = FALSE]
x_1sp$StemPLC <- x$StemPLC[sp,drop = FALSE]
x_1sp$Einst <- x$Einst[sp,drop = FALSE]
x_1sp$RhizoPsi <- x$RhizoPsi[sp,,drop = FALSE]
x_1sp$RootCrownPsi <- x$RootCrownPsi[sp,drop = FALSE]
x_1sp$StemSympPsi <- x$StemSympPsi[sp,drop = FALSE]
x_1sp$StemPsi1 <- x$StemPsi1[sp,drop = FALSE]
x_1sp$StemPsi2 <- x$StemPsi2[sp,drop = FALSE]
x_1sp$LeafSympPsi <- x$LeafSympPsi[sp,drop = FALSE]
x_1sp$LeafPsi <- x$LeafPsi[sp,drop = FALSE]
}
x_1sp$control$verbose <- F
pb <- txtProgressBar(max = nrow(cc), style = 3)
for(row in 1:nrow(cc)){
i <- cc[row,1]
j <- cc[row,2]
# Update the depth of the different soil layer to match RZ
s. <- soil
s.$SoilDepth <- RZ[j]
dCum <- cumsum(s.$dVec)
layersWithinRZ <- dCum < RZ[j]
layersWithinRZ <- c(T,layersWithinRZ[-length(layersWithinRZ)])
s.$dVec <- s.$dVec[layersWithinRZ] # remove the layers not included
nl <- length(s.$dVec) #new number of layers
s.$dVec[nl] <- s.$dVec[nl]-dCum[nl]+RZ[j] # adjust the width of the last layer
# s.$Water_FC[nl] = soil$Water_FC[nl]*(s.$dVec[nl]/soil$dVec[nl]) #Adjust volume of the last layer
# Adjust the other soil parameters to the new number of layers
s.[["sand"]] <- s.[["sand"]][1:nl]
s.[["clay"]] <- s.[["clay"]][1:nl]
s.[["om"]] <- s.[["om"]][1:nl]
s.[["rfc"]] <- s.[["rfc"]][1:nl]
s.[["macro"]] <- s.[["macro"]][1:nl]
s.[["W"]] <- s.[["W"]][1:nl]
s.[["Temp"]] <- s.[["Temp"]][1:nl]
s.[["VG_alpha"]] <- s.[["VG_alpha"]][1:nl]
s.[["VG_theta_res"]] <- s.[["VG_theta_res"]][1:nl]
s.[["VG_theta_sat"]] <- s.[["VG_theta_sat"]][1:nl]
s.[["Ksat"]] <- s.[["Ksat"]][1:nl]
V[,i,j] <- 0
x_1sp$below$V = x$below$V[sp,1:nl,drop = FALSE]
x_1sp$below$V[1,] <- root_ldrDistribution(Z50 = Z50[i,j], Z95 = RZ[j], d=s.$dVec)
V[1:length(x_1sp$below$V),i,j] <- x_1sp$below$V
s_res <- spwb(x = x_1sp, meteo = meteo, soil = s., ...)
# Outputs
years <- substr(as.Date(rownames(meteo)), start = 1, stop = 4)
ma <- function(x,n=10){
f = filter(x,rep(1/n,n), method = "convolution", sides = 2)
f = f[!is.na(f)]
# print(sum(is.na(f)))
f
}
if(x_1sp$control$transpirationMode=="Granier") {
PsiMin[ci,i,j] <- mean(aggregate(s_res$Plants$PlantPsi[op_days],
by = list(years[op_days]),
FUN = function(x) min(ma(x)))$x)
} else {
PsiMin[ci,i,j] <- mean(aggregate(s_res$Plants$StemPsi[op_days],
by = list(years[op_days]),
FUN = function(x) min(ma(x)))$x)
}
# if(verbose) print(s_res$spwbInput)
E[ci,i,j] <- mean(s_res$Plants$Transpiration[op_days], na.rm=T)
if(x_1sp$control$transpirationMode=="Granier") {
An[ci,i,j] <- mean(s_res$Plants$Photosynthesis[op_days], na.rm=T)
} else {
An[ci,i,j] <- mean(s_res$Plants$NetPhotosynthesis[op_days], na.rm=T)
}
setTxtProgressBar(pb, row)
}
cat("\n")
}
res <-list(cohorts = cohorts, RZ = RZ, V1 = V1, Z50 = Z50, E = E, An = An, PsiMin = PsiMin)
class(res)<-list("spwb_ldrExploration","list")
return(res)
}
spwb_ldrOptimization<-function(y, psi_crit, opt_mode = 1) {
E = y$E
An = y$An
PsiMin = y$PsiMin
V1 = y$V1
RZ = y$RZ
Z50 = y$Z50
cohorts = y$cohorts
if(length(psi_crit)!= length(cohorts)) stop("The length of 'psi_crit' must be equal to the number of cohorts in 'y'.")
optim <- data.frame(psi_crit = psi_crit, Z50 = NA, Z95 = NA, V1 = NA)
row.names(optim) = cohorts
for (i in 1:length(cohorts)){
psimin <- PsiMin[i,,]
e <- E[i,,]
an <- An[i,,]
if(opt_mode==1) {
# emax <- max(e)
# e[e >= emax-0.05*emax] <- emax - 0.05*emax
# cost <- (matrix(z, ncol = 1)%*%(1-v) + matrix(300, ncol = 1, nrow = length(z))%*%v)^(3/2)
supinf <- matrix(0, ncol = ncol(psimin), nrow = nrow(psimin))
supinf[psimin >= psi_crit[i]] <- 1
subselb <- rbind(supinf[-nrow(supinf),]-supinf[-1,], rep(0, ncol(supinf)))
subselt <- rbind(rep(0, ncol(supinf)), supinf[-1,]-supinf[-nrow(supinf),])
subsell <- cbind(rep(0, nrow(supinf)), supinf[,-1]-supinf[,-ncol(supinf)])
subselr <- cbind(supinf[,-ncol(supinf)]-supinf[,-1], rep(0, nrow(supinf)))
sel <- matrix(F, ncol = ncol(psimin), nrow = nrow(psimin))
sel[subselb == 1 | subselt == 1 | subsell == 1 | subselr == 1] <- T
if(length(e[sel])==0) {
warning(paste("Psi value", psi_crit[i],"for cohort ",row.names(cohorts)[i],"not reached for any combination."))
optim[i,] <- NA
} else {
point <- which(sel & e == max(e[sel]), arr.ind = T)
optim$Z50[i] <- Z50[point[1], point[2]]
optim$V1[i] <- V1[point[1]]
optim$Z95[i] <- RZ[point[2]]
}
}
else if(opt_mode==2) {
selPsi = (psimin > psi_crit[i]) # Select combinations with less stress than psi_crit
if(sum(selPsi)==0) selPsi = (psimin == max(psimin)) # If none, select combination of minimum stress
maxE = max(e[selPsi]) # Find maximum transpiration (among combinations selected)
sel2 = selPsi & (e == maxE) # Select combination with maximum transpiration
point = which(sel2, arr.ind = TRUE)
optim$Z50[i] <- Z50[point[1], point[2]]
optim$V1[i] <- V1[point[1]]
optim$Z95[i] <- RZ[point[2]]
}
else if(opt_mode==3) {
selPsi = (psimin > psi_crit[i]) # Select combinations with less stress than psi_crit
if(sum(selPsi)==0) selPsi = (psimin == max(psimin)) # If none, select combination of minimum stress
maxAn = max(an[selPsi]) # Find maximum transpiration (among combinations selected)
sel2 = selPsi & (an == maxAn) # Select combinations with maximum photosynthesis
point = which(sel2, arr.ind = TRUE)
optim$Z50[i] <- Z50[point[1], point[2]]
optim$V1[i] <- V1[point[1]]
optim$Z95[i] <- RZ[point[2]]
}
else if(opt_mode==4) {
selPsi = (psimin > psi_crit[i]) # Select combinations with less stress than psi_crit
if(sum(selPsi)==0) selPsi = (psimin == max(psimin)) # If none, select combination of minimum stress
maxE = max(e[selPsi]) # Find maximum transpiration (among combinations selected)
sel2 = selPsi & (e >= maxE*0.95) # Select combinations with > 95% of maximum transpiration
points = as.data.frame(which(sel2, arr.ind = TRUE))
minZ = min(points$RZ, na.rm=T) # Minimum rooting depth
maxV1 = max(points$V1[points$RZ==minZ], na.rm=T) # Maximum V1
point = c(maxV1, minZ)
optim$Z50[i] <- Z50[point[1], point[2]]
optim$V1[i] <- V1[point[1]]
optim$Z95[i] <- RZ[point[2]]
}
else if(opt_mode==5) {
selPsi = (psimin > psi_crit[i]) # Select combinations with less stress than psi_crit
if(sum(selPsi)==0) selPsi = (psimin == max(psimin)) # If none, select combination of minimum stress
maxAn = max(an[selPsi]) # Find maximum transpiration (among combinations selected)
sel2 = selPsi & (an >= maxAn*0.95) # Select combinations with > 95% of maximum photosynthesis
points = as.data.frame(which(sel2, arr.ind = TRUE))
minZ = min(points$RZ, na.rm=T) # Minimum rooting depth
maxV1 = max(points$V1[points$RZ==minZ], na.rm=T) # Maximum V1
point = c(maxV1, minZ)
optim$Z50[i] <- Z50[point[1], point[2]]
optim$V1[i] <- V1[point[1]]
optim$Z95[i] <- RZ[point[2]]
}
}
return(optim)
}
# Function for plotting the outputs of spwb_ldrOptimization
# works with the libraries ggplot2, reshape and viridis
# x is the output of the function spwb_ldrOptimization with explore_out = T
# .plot.ldrOptimization <- function(x, SP = 1, raster_var = "E", contour_var = "E", special_breaks_var = "Psi",
# legend_pos = c(1,1), xaxis_pos = "bottom", yaxis_pos = "left", special_breaks = 0, axis_trans = "identity"){
#
# Psi.xyz <- melt(x$explore_out$PsiMin[SP,,])
# E.xyz <- melt(x$explore_out$E[SP,,]*365)
# xy <- Psi.xyz[,c("V1", "RZ")]
#
# # Raster layer
# if(raster_var == "Psi"){
# leg_title <- expression(paste(Psi[min],"(MPa)"))
# data_raster <- Psi.xyz
# }
# if(raster_var == "E"){
# leg_title <- expression(paste("E (mm ", yr^{-1}, ")"))
# data_raster <- E.xyz
# }
#
# # Contour layer
# if(contour_var == "Psi"){
# data_contour <- Psi.xyz
# bw1 <- 1
# bw2 <- 0.2
# }
# if(contour_var == "E"){
# data_contour <- E.xyz
# bw1 <- 50
# bw2 <- 10
# }
#
# # Add special break
# if(special_breaks_var == "Psi"){
# data_special_breaks <- Psi.xyz
# }
# if(special_breaks_var == "E"){
# data_special_breaks <- E.xyz
# }
#
# # Optimized parameters
# x$optim$RZ <- x$optim$Z95
#
# # Plot
# p <- ggplot(xy, aes(x = RZ, y = V1))+
# geom_raster(data = data_raster, aes(fill = value))+
# geom_contour(data = data_contour, aes(z = value), colour = "white", binwidth = bw1, size = 1)+
# geom_contour(data = data_contour, aes(z = value), colour = "white", binwidth = bw2, size = 0.5)+
# geom_contour(data = data_special_breaks, aes(z = value), colour = "red", breaks = special_breaks, size = 1)+
# geom_point(data = x$optim[SP,], aes(x = RZ, y = V1), color = "black", fill = "red", shape = 21, size = 4, inherit.aes = F)+
# scale_fill_viridis(name = leg_title)+
# coord_cartesian(expand = F)+
# ylab(expression(paste(V[1])))+
# xlab(expression(paste(RZ, "(mm)")))+
# theme_bw()+
# theme(legend.position = legend_pos, legend.justification = legend_pos, legend.background = element_rect(fill = rgb(1,1,1,0.7)))+
# scale_x_continuous(position = xaxis_pos, trans = axis_trans)+
# scale_y_continuous(position = yaxis_pos, trans = "identity")
#
# return(p)
# } | /medfate/R/spwb_ldrOptimization.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 14,809 | r | # Function to calculate Z50 knowing Z95 and the cumulated root proportion V at a given depth Z
.root_ldrZ50 <- function(V,Z,Z95){
if(sum(V >= 0.9497887)>0) {stop("The function is not defined for V >= 0.9497887")}
if(sum(Z == Z95)>0) {stop("The function is not defined for Z = Z95")}
a <- log(V/(1-V))/2.94
Z50 <- (Z/Z95^a)^(1/(1-a))
return(Z50)
}
# Convenience function for finding the inverse solution of a given function
.inverse <- function (f, lower, upper) {
function (y) uniroot((function (x) f(x) - y), lower = lower, upper = upper, extendInt = "yes")[1]
}
spwb_ldrExploration<-function(x, soil, meteo, cohorts = NULL,
RZmin = 301, RZmax = 4000, V1min = 0.01, V1max = 0.94, resolution = 10,
heat_stop = 0, transformation = "identity", verbose = FALSE,
...) {
# define the days to keep in the analysis
op_days <- (heat_stop+1):nrow(meteo)
# Define the values of Z50 and Z90 to be explored
trans <- function(x) do.call(transformation, list(x))
inverse_trans <- .inverse(trans, lower = 0.01, upper = 100) # inverse of the function used for the transformation
if(RZmax > soil$SoilDepth){
if(verbose) cat("\n RZmax is larger than soil depth\n")
}
RZ_trans <- seq(trans(RZmin), trans(RZmax), length.out = resolution)
RZ <- as.numeric(unlist(sapply(RZ_trans, FUN = inverse_trans)))
# the case where RZ = Z1 will create problems when using the LDR model -> remove if it exists
Z1 <- soil$dVec[1]
if(sum(RZ == Z1) > 0){
if(verbose) cat("\nThe function to derive the root proportion in each soil layer is not defined for RZ = Z1 (depth of the first soil layer)\n",
"This value is removed\n",
paste("Resolution is now\n", resolution-1))
RZ <- RZ[-which(RZ == Z1)]
}
if(V1max >= 0.9497887){
if(verbose) cat("\nThe function to derive the root proportion in each soil layer is only defined for V1 c ]0,0.949[\nV1max is set to 0.94\n")
V1max <- 0.94
}
if(V1min <= 0){
if(verbose) cat("\nThe function to derive the root proportion in each soil layer is only defined for V1 c ]0,0.949[\nV1min is set to 0.01\n")
V1min <- 0.001
}
V1 <- seq(V1min,V1max,length.out = length(RZ)) # the proportion of root in the first soil layer
# Create a matrix with V1 as rows and RZ as column, filled with logical values to indicate the parameter combinations to explore
mExplore <- matrix(T, nrow = length(V1), ncol = length(RZ), dimnames = list(V1 = V1, RZ = RZ))
# mExplore[lower.tri(mExplore, diag = T)] <- F
# Calculate Z50
Z50 <- .root_ldrZ50(V = array(V1,dim = dim(mExplore)), Z = array(Z1, dim = dim(mExplore)), Z95 = t(array(RZ, dim = dim(mExplore))))
dimnames(Z50) <- dimnames(mExplore)
# Prepare array for V
V <- array(dim = c(length(soil$dVec),length(V1), length(RZ)),
dimnames = list(layer = 1:length(soil$dVec), V1 = V1, RZ = RZ))
# Sum LAI of all species
x$above$LAI_live <- sum(x$above$LAI_live)
x$above$LAI_expanded <- sum(x$above$LAI_expanded)
x$above$LAI_dead <- sum(x$above$LAI_dead)
# Data outputs
if(is.null(cohorts)) cohorts = row.names(x$cohorts)
An<-E <- PsiMin <- array(dim = c(length(cohorts), length(V1), length(RZ)), dimnames = list(cohort = cohorts, V1 = V1, RZ = RZ))
# Start loop
cc <- which(mExplore == T, arr.ind = T)
# Reset input
resetInputs(x, soil)
for(ci in 1:length(cohorts)){
coh = cohorts[ci]
sp = which(row.names(x$cohorts)==coh)
cat(paste("Exploring root distribution of cohort", coh,"(", x$cohorts$Name[sp],"):\n"))
x_1sp <- x
x_1sp$cohorts <- x$cohorts[sp,,drop = FALSE]
x_1sp$above <- x$above[sp,,drop = FALSE]
x_1sp$below <- x$below
x_1sp$below$V <- x$below$V[sp,,drop = FALSE]
x_1sp$paramsInterception <- x$paramsInterception[sp,,drop = FALSE]
x_1sp$paramsTransp <- x$paramsTransp[sp,,drop = FALSE]
x_1sp$Transpiration <- x$Transpiration[sp,drop = FALSE]
x_1sp$Photosynthesis <- x$Photosynthesis[sp,drop = FALSE]
if(x_1sp$control$transpirationMode=="Granier") {
x_1sp$PLC <- x$PLC[sp,drop = FALSE]
} else {
x_1sp$below$VGrhizo_kmax <- x$below$V[sp,,drop = FALSE]
x_1sp$below$VCroot_kmax <- x$below$V[sp,,drop = FALSE]
x_1sp$paramsAnatomy <- x$paramsAnatomy[sp,,drop = FALSE]
x_1sp$paramsWaterStorage <- x$paramsWaterStorage[sp,,drop = FALSE]
x_1sp$StemPLC <- x$StemPLC[sp,drop = FALSE]
x_1sp$Einst <- x$Einst[sp,drop = FALSE]
x_1sp$RhizoPsi <- x$RhizoPsi[sp,,drop = FALSE]
x_1sp$RootCrownPsi <- x$RootCrownPsi[sp,drop = FALSE]
x_1sp$StemSympPsi <- x$StemSympPsi[sp,drop = FALSE]
x_1sp$StemPsi1 <- x$StemPsi1[sp,drop = FALSE]
x_1sp$StemPsi2 <- x$StemPsi2[sp,drop = FALSE]
x_1sp$LeafSympPsi <- x$LeafSympPsi[sp,drop = FALSE]
x_1sp$LeafPsi <- x$LeafPsi[sp,drop = FALSE]
}
x_1sp$control$verbose <- F
pb <- txtProgressBar(max = nrow(cc), style = 3)
for(row in 1:nrow(cc)){
i <- cc[row,1]
j <- cc[row,2]
# Update the depth of the different soil layer to match RZ
s. <- soil
s.$SoilDepth <- RZ[j]
dCum <- cumsum(s.$dVec)
layersWithinRZ <- dCum < RZ[j]
layersWithinRZ <- c(T,layersWithinRZ[-length(layersWithinRZ)])
s.$dVec <- s.$dVec[layersWithinRZ] # remove the layers not included
nl <- length(s.$dVec) #new number of layers
s.$dVec[nl] <- s.$dVec[nl]-dCum[nl]+RZ[j] # adjust the width of the last layer
# s.$Water_FC[nl] = soil$Water_FC[nl]*(s.$dVec[nl]/soil$dVec[nl]) #Adjust volume of the last layer
# Adjust the other soil parameters to the new number of layers
s.[["sand"]] <- s.[["sand"]][1:nl]
s.[["clay"]] <- s.[["clay"]][1:nl]
s.[["om"]] <- s.[["om"]][1:nl]
s.[["rfc"]] <- s.[["rfc"]][1:nl]
s.[["macro"]] <- s.[["macro"]][1:nl]
s.[["W"]] <- s.[["W"]][1:nl]
s.[["Temp"]] <- s.[["Temp"]][1:nl]
s.[["VG_alpha"]] <- s.[["VG_alpha"]][1:nl]
s.[["VG_theta_res"]] <- s.[["VG_theta_res"]][1:nl]
s.[["VG_theta_sat"]] <- s.[["VG_theta_sat"]][1:nl]
s.[["Ksat"]] <- s.[["Ksat"]][1:nl]
V[,i,j] <- 0
x_1sp$below$V = x$below$V[sp,1:nl,drop = FALSE]
x_1sp$below$V[1,] <- root_ldrDistribution(Z50 = Z50[i,j], Z95 = RZ[j], d=s.$dVec)
V[1:length(x_1sp$below$V),i,j] <- x_1sp$below$V
s_res <- spwb(x = x_1sp, meteo = meteo, soil = s., ...)
# Outputs
years <- substr(as.Date(rownames(meteo)), start = 1, stop = 4)
ma <- function(x,n=10){
f = filter(x,rep(1/n,n), method = "convolution", sides = 2)
f = f[!is.na(f)]
# print(sum(is.na(f)))
f
}
if(x_1sp$control$transpirationMode=="Granier") {
PsiMin[ci,i,j] <- mean(aggregate(s_res$Plants$PlantPsi[op_days],
by = list(years[op_days]),
FUN = function(x) min(ma(x)))$x)
} else {
PsiMin[ci,i,j] <- mean(aggregate(s_res$Plants$StemPsi[op_days],
by = list(years[op_days]),
FUN = function(x) min(ma(x)))$x)
}
# if(verbose) print(s_res$spwbInput)
E[ci,i,j] <- mean(s_res$Plants$Transpiration[op_days], na.rm=T)
if(x_1sp$control$transpirationMode=="Granier") {
An[ci,i,j] <- mean(s_res$Plants$Photosynthesis[op_days], na.rm=T)
} else {
An[ci,i,j] <- mean(s_res$Plants$NetPhotosynthesis[op_days], na.rm=T)
}
setTxtProgressBar(pb, row)
}
cat("\n")
}
res <-list(cohorts = cohorts, RZ = RZ, V1 = V1, Z50 = Z50, E = E, An = An, PsiMin = PsiMin)
class(res)<-list("spwb_ldrExploration","list")
return(res)
}
spwb_ldrOptimization<-function(y, psi_crit, opt_mode = 1) {
E = y$E
An = y$An
PsiMin = y$PsiMin
V1 = y$V1
RZ = y$RZ
Z50 = y$Z50
cohorts = y$cohorts
if(length(psi_crit)!= length(cohorts)) stop("The length of 'psi_crit' must be equal to the number of cohorts in 'y'.")
optim <- data.frame(psi_crit = psi_crit, Z50 = NA, Z95 = NA, V1 = NA)
row.names(optim) = cohorts
for (i in 1:length(cohorts)){
psimin <- PsiMin[i,,]
e <- E[i,,]
an <- An[i,,]
if(opt_mode==1) {
# emax <- max(e)
# e[e >= emax-0.05*emax] <- emax - 0.05*emax
# cost <- (matrix(z, ncol = 1)%*%(1-v) + matrix(300, ncol = 1, nrow = length(z))%*%v)^(3/2)
supinf <- matrix(0, ncol = ncol(psimin), nrow = nrow(psimin))
supinf[psimin >= psi_crit[i]] <- 1
subselb <- rbind(supinf[-nrow(supinf),]-supinf[-1,], rep(0, ncol(supinf)))
subselt <- rbind(rep(0, ncol(supinf)), supinf[-1,]-supinf[-nrow(supinf),])
subsell <- cbind(rep(0, nrow(supinf)), supinf[,-1]-supinf[,-ncol(supinf)])
subselr <- cbind(supinf[,-ncol(supinf)]-supinf[,-1], rep(0, nrow(supinf)))
sel <- matrix(F, ncol = ncol(psimin), nrow = nrow(psimin))
sel[subselb == 1 | subselt == 1 | subsell == 1 | subselr == 1] <- T
if(length(e[sel])==0) {
warning(paste("Psi value", psi_crit[i],"for cohort ",row.names(cohorts)[i],"not reached for any combination."))
optim[i,] <- NA
} else {
point <- which(sel & e == max(e[sel]), arr.ind = T)
optim$Z50[i] <- Z50[point[1], point[2]]
optim$V1[i] <- V1[point[1]]
optim$Z95[i] <- RZ[point[2]]
}
}
else if(opt_mode==2) {
selPsi = (psimin > psi_crit[i]) # Select combinations with less stress than psi_crit
if(sum(selPsi)==0) selPsi = (psimin == max(psimin)) # If none, select combination of minimum stress
maxE = max(e[selPsi]) # Find maximum transpiration (among combinations selected)
sel2 = selPsi & (e == maxE) # Select combination with maximum transpiration
point = which(sel2, arr.ind = TRUE)
optim$Z50[i] <- Z50[point[1], point[2]]
optim$V1[i] <- V1[point[1]]
optim$Z95[i] <- RZ[point[2]]
}
else if(opt_mode==3) {
selPsi = (psimin > psi_crit[i]) # Select combinations with less stress than psi_crit
if(sum(selPsi)==0) selPsi = (psimin == max(psimin)) # If none, select combination of minimum stress
maxAn = max(an[selPsi]) # Find maximum transpiration (among combinations selected)
sel2 = selPsi & (an == maxAn) # Select combinations with maximum photosynthesis
point = which(sel2, arr.ind = TRUE)
optim$Z50[i] <- Z50[point[1], point[2]]
optim$V1[i] <- V1[point[1]]
optim$Z95[i] <- RZ[point[2]]
}
else if(opt_mode==4) {
selPsi = (psimin > psi_crit[i]) # Select combinations with less stress than psi_crit
if(sum(selPsi)==0) selPsi = (psimin == max(psimin)) # If none, select combination of minimum stress
maxE = max(e[selPsi]) # Find maximum transpiration (among combinations selected)
sel2 = selPsi & (e >= maxE*0.95) # Select combinations with > 95% of maximum transpiration
points = as.data.frame(which(sel2, arr.ind = TRUE))
minZ = min(points$RZ, na.rm=T) # Minimum rooting depth
maxV1 = max(points$V1[points$RZ==minZ], na.rm=T) # Maximum V1
point = c(maxV1, minZ)
optim$Z50[i] <- Z50[point[1], point[2]]
optim$V1[i] <- V1[point[1]]
optim$Z95[i] <- RZ[point[2]]
}
else if(opt_mode==5) {
selPsi = (psimin > psi_crit[i]) # Select combinations with less stress than psi_crit
if(sum(selPsi)==0) selPsi = (psimin == max(psimin)) # If none, select combination of minimum stress
maxAn = max(an[selPsi]) # Find maximum transpiration (among combinations selected)
sel2 = selPsi & (an >= maxAn*0.95) # Select combinations with > 95% of maximum photosynthesis
points = as.data.frame(which(sel2, arr.ind = TRUE))
minZ = min(points$RZ, na.rm=T) # Minimum rooting depth
maxV1 = max(points$V1[points$RZ==minZ], na.rm=T) # Maximum V1
point = c(maxV1, minZ)
optim$Z50[i] <- Z50[point[1], point[2]]
optim$V1[i] <- V1[point[1]]
optim$Z95[i] <- RZ[point[2]]
}
}
return(optim)
}
# Function for plotting the outputs of spwb_ldrOptimization
# works with the libraries ggplot2, reshape and viridis
# x is the output of the function spwb_ldrOptimization with explore_out = T
# .plot.ldrOptimization <- function(x, SP = 1, raster_var = "E", contour_var = "E", special_breaks_var = "Psi",
# legend_pos = c(1,1), xaxis_pos = "bottom", yaxis_pos = "left", special_breaks = 0, axis_trans = "identity"){
#
# Psi.xyz <- melt(x$explore_out$PsiMin[SP,,])
# E.xyz <- melt(x$explore_out$E[SP,,]*365)
# xy <- Psi.xyz[,c("V1", "RZ")]
#
# # Raster layer
# if(raster_var == "Psi"){
# leg_title <- expression(paste(Psi[min],"(MPa)"))
# data_raster <- Psi.xyz
# }
# if(raster_var == "E"){
# leg_title <- expression(paste("E (mm ", yr^{-1}, ")"))
# data_raster <- E.xyz
# }
#
# # Contour layer
# if(contour_var == "Psi"){
# data_contour <- Psi.xyz
# bw1 <- 1
# bw2 <- 0.2
# }
# if(contour_var == "E"){
# data_contour <- E.xyz
# bw1 <- 50
# bw2 <- 10
# }
#
# # Add special break
# if(special_breaks_var == "Psi"){
# data_special_breaks <- Psi.xyz
# }
# if(special_breaks_var == "E"){
# data_special_breaks <- E.xyz
# }
#
# # Optimized parameters
# x$optim$RZ <- x$optim$Z95
#
# # Plot
# p <- ggplot(xy, aes(x = RZ, y = V1))+
# geom_raster(data = data_raster, aes(fill = value))+
# geom_contour(data = data_contour, aes(z = value), colour = "white", binwidth = bw1, size = 1)+
# geom_contour(data = data_contour, aes(z = value), colour = "white", binwidth = bw2, size = 0.5)+
# geom_contour(data = data_special_breaks, aes(z = value), colour = "red", breaks = special_breaks, size = 1)+
# geom_point(data = x$optim[SP,], aes(x = RZ, y = V1), color = "black", fill = "red", shape = 21, size = 4, inherit.aes = F)+
# scale_fill_viridis(name = leg_title)+
# coord_cartesian(expand = F)+
# ylab(expression(paste(V[1])))+
# xlab(expression(paste(RZ, "(mm)")))+
# theme_bw()+
# theme(legend.position = legend_pos, legend.justification = legend_pos, legend.background = element_rect(fill = rgb(1,1,1,0.7)))+
# scale_x_continuous(position = xaxis_pos, trans = axis_trans)+
# scale_y_continuous(position = yaxis_pos, trans = "identity")
#
# return(p)
# } |
# Function that implements multi-class logistic regression.
#############################################################
# Description of supplied parameters:
# X - n x p training data, 1st column should be 1s to account for intercept
# Y - a vector of size n of class labels, from 0 to K-1
# Xt - ntest x p testing data, 1st column should be 1s to account for intercept
# Yt - a vector of size ntest of test class labels, from 0 to K-1
# numIter - number of FIXED iterations of the algorithm, default value is 50
# eta - learning rate, default value is 0.1
# lambda - ridge parameter, default value is 0.1
# beta_init - (optional) initial starting values of beta for the algorithm, should be p x K matrix
## Return output
##########################################################################
# beta - p x K matrix of estimated beta values after numIter iterations
# error_train - (numIter + 1) length vector of training error % at each iteration (+ starting value)
# error_test - (numIter + 1) length vector of testing error % at each iteration (+ starting value)
# objective - (numIter + 1) length vector of objective values of the function that we are minimizing at each iteration (+ starting value)
LRMultiClass <- function(X, Y, Xt, Yt, numIter = 50, eta = 0.1, lambda = 0.1, beta_init = NULL){
## Check the supplied parameters
###################################
# Check that the first column of X and Xt are 1s, if not - display appropriate message and stop execution.
if(sum(X[, 1] == rep(1, nrow(X))) < nrow(X)){
stop('The elements of first column in X are not all 1')
}
if(sum(Xt[, 1] == rep(1, nrow(Xt))) < nrow(Xt)){
stop('The elements of first column in Xt are not all 1')
}
# Check for compatibility between X and Y
if(nrow(X) != length(Y)){
stop('The length of X and Y cannot be different')
}
# Check for compatibility between Xt and Yt
if(nrow(Xt) != length(Yt)){
stop('The length of Xt and Yt cannot be different ')
}
# Check for compatibility between X and Xt
if(ncol(X) != ncol(Xt)){
stop('The column size in X and Xt cannot be different')
}
# Check eta is positive
if(eta <= 0){
stop('Learning rate (eta) cannot be negative or zero')
}
# Check lambda is non-negative
if(lambda < 0){
stop('Regularization parameter (lambda) cannot be negative')
}
# Check whether beta_init is NULL. If NULL, initialize beta with p x K matrix of zeroes and construct corresponding pbeta. If not NULL, check for compatibility with what has been already supplied.
if(length(beta_init) == 0){
beta_init = matrix(0, ncol(X), length(unique(Y)))
}else{
if(nrow(beta_init) != ncol(X)){
stop('The number of features in beta_init and X cannot be different')
}
if(ncol(beta_init) != length(unique(Y))){
stop('The number of classes in beta_init and Y cannot be different')
}
}
# sigmoid calculation
sigmoid = function(x, beta){
return(exp(x %*% beta))
}
# Vector to store training error
error_train = rep(0, numIter+1)
# Vector to store test error
error_test = rep(0, numIter+1)
# Vectore to store objective function value
objective = rep(0, numIter+1)
# Y actual
Y_train = matrix(0, nrow(X), length(unique(Y)))
# Assigning 1 if a class is present
for(i in 1:ncol(beta_init)){
Y_train[sort(unique(Y))[i] == Y, i] = 1
}
## Calculate corresponding pk, objective value at the starting point, training error and testing error given the starting point
##########################################################################
# Probability of each data point for each class
prob_train = sigmoid(X, beta_init)
prob_train = prob_train / rowSums(prob_train)
# 1 - Probability of each data point for each class
prob_train_0 = 1 - prob_train
# Probability of each data point for each class
prob_test = sigmoid(Xt, beta_init)
prob_test = prob_test / rowSums(prob_test)
# Class assignment train
train_pred = apply(prob_train, 1, which.max) - 1
# Class assignment test
test_pred = apply(prob_test, 1, which.max) - 1
# Error train set
error_train[1] = sum(train_pred != Y) / length(Y) * 100
# Error test set
error_test[1] = sum(test_pred != Yt) / length(Yt) * 100
# Objective function value
objective[1] = - sum(Y_train * log(prob_train, base = exp(1))) + (lambda/2) * sum(beta_init * beta_init)
## Newton's method cycle - implement the update EXACTLY numIter iterations
##########################################################################
# Within one iteration: perform the update, calculate updated objective function and training/testing errors in %
for(k in 1:numIter){
# Matrix to store pk * 1-pk
combined_prob = prob_train * prob_train_0
# Update beta
for(l in 1:ncol(beta_init)){
# Calculates diag(W) %*% X
W_X = X * combined_prob[, l]
# Calculates t(X) %*% diag(W) %*% X
product = crossprod(X, W_X)
# Claculates (t(X) %*% diag(W) %*% X + lambda * Identity)
inverse = solve(product + (lambda * diag(rep(1, ncol(X)))))
# Update beta in each class
beta_init[, l] = beta_init[, l] - eta * inverse %*% ((t(X) %*% (prob_train[, l]-Y_train[, l])) + lambda * beta_init[, l])
}
# Probability of each data point for each class in train set
prob_train = sigmoid(X, beta_init)
prob_train = prob_train / rowSums(prob_train)
# 1 - Probability of each data point for each class in train set
prob_train_0 = 1 - prob_train
# Probability of each data point for each class in test set
prob_test = sigmoid(Xt, beta_init)
prob_test = prob_test / rowSums(prob_test)
# class assignment train set
train_pred = apply(prob_train, 1, which.max) - 1
# class assignment test set
test_pred = apply(prob_test, 1, which.max) - 1
# Error train set
error_train[k+1] = sum(train_pred != Y) / length(Y) * 100
# Error test set
error_test[k+1] = sum(test_pred != Yt) / length(Yt) * 100
# Objective function value
objective[k+1] = - sum(Y_train * log(prob_train, base = exp(1))) + (lambda/2) * sum(beta_init * beta_init)
}
## Return output
##########################################################################
# beta - p x K matrix of estimated beta values after numIter iterations
# error_train - (numIter + 1) length vector of training error % at each iteration (+ starting value)
# error_test - (numIter + 1) length vector of testing error % at each iteration (+ starting value)
# objective - (numIter + 1) length vector of objective values of the function that we are minimizing at each iteration (+ starting value)
return(list(beta = beta_init, error_train = error_train, error_test = error_test, objective = objective))
}
| /FunctionsLR.R | no_license | nitesh-1507/Logistic-Regression-Multiclass | R | false | false | 7,057 | r | # Function that implements multi-class logistic regression.
#############################################################
# Description of supplied parameters:
# X - n x p training data, 1st column should be 1s to account for intercept
# Y - a vector of size n of class labels, from 0 to K-1
# Xt - ntest x p testing data, 1st column should be 1s to account for intercept
# Yt - a vector of size ntest of test class labels, from 0 to K-1
# numIter - number of FIXED iterations of the algorithm, default value is 50
# eta - learning rate, default value is 0.1
# lambda - ridge parameter, default value is 0.1
# beta_init - (optional) initial starting values of beta for the algorithm, should be p x K matrix
## Return output
##########################################################################
# beta - p x K matrix of estimated beta values after numIter iterations
# error_train - (numIter + 1) length vector of training error % at each iteration (+ starting value)
# error_test - (numIter + 1) length vector of testing error % at each iteration (+ starting value)
# objective - (numIter + 1) length vector of objective values of the function that we are minimizing at each iteration (+ starting value)
LRMultiClass <- function(X, Y, Xt, Yt, numIter = 50, eta = 0.1, lambda = 0.1, beta_init = NULL){
## Check the supplied parameters
###################################
# Check that the first column of X and Xt are 1s, if not - display appropriate message and stop execution.
if(sum(X[, 1] == rep(1, nrow(X))) < nrow(X)){
stop('The elements of first column in X are not all 1')
}
if(sum(Xt[, 1] == rep(1, nrow(Xt))) < nrow(Xt)){
stop('The elements of first column in Xt are not all 1')
}
# Check for compatibility between X and Y
if(nrow(X) != length(Y)){
stop('The length of X and Y cannot be different')
}
# Check for compatibility between Xt and Yt
if(nrow(Xt) != length(Yt)){
stop('The length of Xt and Yt cannot be different ')
}
# Check for compatibility between X and Xt
if(ncol(X) != ncol(Xt)){
stop('The column size in X and Xt cannot be different')
}
# Check eta is positive
if(eta <= 0){
stop('Learning rate (eta) cannot be negative or zero')
}
# Check lambda is non-negative
if(lambda < 0){
stop('Regularization parameter (lambda) cannot be negative')
}
# Check whether beta_init is NULL. If NULL, initialize beta with p x K matrix of zeroes and construct corresponding pbeta. If not NULL, check for compatibility with what has been already supplied.
if(length(beta_init) == 0){
beta_init = matrix(0, ncol(X), length(unique(Y)))
}else{
if(nrow(beta_init) != ncol(X)){
stop('The number of features in beta_init and X cannot be different')
}
if(ncol(beta_init) != length(unique(Y))){
stop('The number of classes in beta_init and Y cannot be different')
}
}
# sigmoid calculation
sigmoid = function(x, beta){
return(exp(x %*% beta))
}
# Vector to store training error
error_train = rep(0, numIter+1)
# Vector to store test error
error_test = rep(0, numIter+1)
# Vectore to store objective function value
objective = rep(0, numIter+1)
# Y actual
Y_train = matrix(0, nrow(X), length(unique(Y)))
# Assigning 1 if a class is present
for(i in 1:ncol(beta_init)){
Y_train[sort(unique(Y))[i] == Y, i] = 1
}
## Calculate corresponding pk, objective value at the starting point, training error and testing error given the starting point
##########################################################################
# Probability of each data point for each class
prob_train = sigmoid(X, beta_init)
prob_train = prob_train / rowSums(prob_train)
# 1 - Probability of each data point for each class
prob_train_0 = 1 - prob_train
# Probability of each data point for each class
prob_test = sigmoid(Xt, beta_init)
prob_test = prob_test / rowSums(prob_test)
# Class assignment train
train_pred = apply(prob_train, 1, which.max) - 1
# Class assignment test
test_pred = apply(prob_test, 1, which.max) - 1
# Error train set
error_train[1] = sum(train_pred != Y) / length(Y) * 100
# Error test set
error_test[1] = sum(test_pred != Yt) / length(Yt) * 100
# Objective function value
objective[1] = - sum(Y_train * log(prob_train, base = exp(1))) + (lambda/2) * sum(beta_init * beta_init)
## Newton's method cycle - implement the update EXACTLY numIter iterations
##########################################################################
# Within one iteration: perform the update, calculate updated objective function and training/testing errors in %
for(k in 1:numIter){
# Matrix to store pk * 1-pk
combined_prob = prob_train * prob_train_0
# Update beta
for(l in 1:ncol(beta_init)){
# Calculates diag(W) %*% X
W_X = X * combined_prob[, l]
# Calculates t(X) %*% diag(W) %*% X
product = crossprod(X, W_X)
# Claculates (t(X) %*% diag(W) %*% X + lambda * Identity)
inverse = solve(product + (lambda * diag(rep(1, ncol(X)))))
# Update beta in each class
beta_init[, l] = beta_init[, l] - eta * inverse %*% ((t(X) %*% (prob_train[, l]-Y_train[, l])) + lambda * beta_init[, l])
}
# Probability of each data point for each class in train set
prob_train = sigmoid(X, beta_init)
prob_train = prob_train / rowSums(prob_train)
# 1 - Probability of each data point for each class in train set
prob_train_0 = 1 - prob_train
# Probability of each data point for each class in test set
prob_test = sigmoid(Xt, beta_init)
prob_test = prob_test / rowSums(prob_test)
# class assignment train set
train_pred = apply(prob_train, 1, which.max) - 1
# class assignment test set
test_pred = apply(prob_test, 1, which.max) - 1
# Error train set
error_train[k+1] = sum(train_pred != Y) / length(Y) * 100
# Error test set
error_test[k+1] = sum(test_pred != Yt) / length(Yt) * 100
# Objective function value
objective[k+1] = - sum(Y_train * log(prob_train, base = exp(1))) + (lambda/2) * sum(beta_init * beta_init)
}
## Return output
##########################################################################
# beta - p x K matrix of estimated beta values after numIter iterations
# error_train - (numIter + 1) length vector of training error % at each iteration (+ starting value)
# error_test - (numIter + 1) length vector of testing error % at each iteration (+ starting value)
# objective - (numIter + 1) length vector of objective values of the function that we are minimizing at each iteration (+ starting value)
return(list(beta = beta_init, error_train = error_train, error_test = error_test, objective = objective))
}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "climate-model-simulation-crashes")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "outcome")
lrn = makeLearner("classif.kknn", par.vals = list(), predict.type = "prob")
#:# hash
#:# 0bf1651fbab35c0e1febdf3bfb1546ae
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_climate-model-simulation-crashes/classification_outcome/0bf1651fbab35c0e1febdf3bfb1546ae/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 706 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "climate-model-simulation-crashes")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "outcome")
lrn = makeLearner("classif.kknn", par.vals = list(), predict.type = "prob")
#:# hash
#:# 0bf1651fbab35c0e1febdf3bfb1546ae
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_.R
\name{is_character}
\alias{is_character}
\title{Element-wise wrapper for is.character}
\usage{
is_character(x)
}
\arguments{
\item{x}{a vector or object}
}
\value{
a logical vector
}
\description{
Element-wise wrapper for is.character
}
| /man/is_character.Rd | no_license | oucru-biostats/Range306 | R | false | true | 321 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_.R
\name{is_character}
\alias{is_character}
\title{Element-wise wrapper for is.character}
\usage{
is_character(x)
}
\arguments{
\item{x}{a vector or object}
}
\value{
a logical vector
}
\description{
Element-wise wrapper for is.character
}
|
#' @slot x numeric vector containing allocations for each player
| /CoopGame/man-roxygen/slot/x.R | no_license | anwanjohannes/CoopGame | R | false | false | 66 | r | #' @slot x numeric vector containing allocations for each player
|
# making norming dists
# proc_ace_t1 is "wide" output data from proc_by_module() called on Jessica Younger's prepared trialwise data
proc_ace_t3_wide = full_join(proc_ace_t3$ALL_OTHER_DATA,
filter(proc_ace_t3$FILTER, FILTER.distractors == 0)) %>%
distinct() %>%
mutate(BOXED.score = (((BOXED.rt_mean.conjunction_12 - BOXED.rt_mean.conjunction_4) / BOXED.rt_mean.conjunction_4) * 100) + 100)
proc_ace_t3_4 = proc_ace_t3_wide %>%
filter(grade == 4)
proc_ace_t3_6 = proc_ace_t3_wide %>%
filter(grade == 6)
proc_ace_t3_8 = proc_ace_t3_wide %>%
filter(grade == 8)
ace_t3_norms = vector("list", 0)
for (this.grade in c("fourth", "sixth", "eighth")) {
data = switch(this.grade,
fourth = proc_ace_t3_4,
sixth = proc_ace_t3_6,
eighth = proc_ace_t3_8)
ace_t3_norms[[this.grade]] = data.frame(
BACKWARDSSPATIALSPAN.object_count_span.overall = quantile(data$BACKWARDSSPATIALSPAN.object_count_span.overall, probs = seq(0, 1, .01), na.rm = TRUE),
SPATIALSPAN.object_count_span.overall = quantile(data$SPATIALSPAN.object_count_span.overall, probs = seq(0, 1, .01), na.rm = TRUE),
FILTER.k.2 = quantile(data$FILTER.k.2, probs = seq(0, 1, .01), na.rm = TRUE),
FLANKER.rt_mean.cost = sort(quantile(data$FLANKER.rt_mean.cost, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
STROOP.rt_mean.cost = sort(quantile(data$STROOP.rt_mean.cost, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
SAAT.rt_mean.sustained = sort(quantile(data$SAAT.rt_mean.sustained, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
SAAT.rt_mean.impulsive = sort(quantile(data$SAAT.rt_mean.impulsive, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
TNT.rt_mean.cost = sort(quantile(data$TNT.rt_mean.cost, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
TASKSWITCH.rt_mean.cost = sort(quantile(data$TASKSWITCH.rt_mean.cost, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
BOXED.score = sort(quantile(data$BOXED.score, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE))
}
| /inst/data-raw/create-norm-percentiles.R | permissive | Mattlk13/aceR | R | false | false | 2,087 | r | # making norming dists
# proc_ace_t1 is "wide" output data from proc_by_module() called on Jessica Younger's prepared trialwise data
proc_ace_t3_wide = full_join(proc_ace_t3$ALL_OTHER_DATA,
filter(proc_ace_t3$FILTER, FILTER.distractors == 0)) %>%
distinct() %>%
mutate(BOXED.score = (((BOXED.rt_mean.conjunction_12 - BOXED.rt_mean.conjunction_4) / BOXED.rt_mean.conjunction_4) * 100) + 100)
proc_ace_t3_4 = proc_ace_t3_wide %>%
filter(grade == 4)
proc_ace_t3_6 = proc_ace_t3_wide %>%
filter(grade == 6)
proc_ace_t3_8 = proc_ace_t3_wide %>%
filter(grade == 8)
ace_t3_norms = vector("list", 0)
for (this.grade in c("fourth", "sixth", "eighth")) {
data = switch(this.grade,
fourth = proc_ace_t3_4,
sixth = proc_ace_t3_6,
eighth = proc_ace_t3_8)
ace_t3_norms[[this.grade]] = data.frame(
BACKWARDSSPATIALSPAN.object_count_span.overall = quantile(data$BACKWARDSSPATIALSPAN.object_count_span.overall, probs = seq(0, 1, .01), na.rm = TRUE),
SPATIALSPAN.object_count_span.overall = quantile(data$SPATIALSPAN.object_count_span.overall, probs = seq(0, 1, .01), na.rm = TRUE),
FILTER.k.2 = quantile(data$FILTER.k.2, probs = seq(0, 1, .01), na.rm = TRUE),
FLANKER.rt_mean.cost = sort(quantile(data$FLANKER.rt_mean.cost, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
STROOP.rt_mean.cost = sort(quantile(data$STROOP.rt_mean.cost, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
SAAT.rt_mean.sustained = sort(quantile(data$SAAT.rt_mean.sustained, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
SAAT.rt_mean.impulsive = sort(quantile(data$SAAT.rt_mean.impulsive, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
TNT.rt_mean.cost = sort(quantile(data$TNT.rt_mean.cost, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
TASKSWITCH.rt_mean.cost = sort(quantile(data$TASKSWITCH.rt_mean.cost, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE),
BOXED.score = sort(quantile(data$BOXED.score, probs = seq(0, 1, .01), na.rm = TRUE), decreasing = TRUE))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.sphet.R
\name{print.sphet}
\alias{print.sphet}
\title{print method for class sphet}
\usage{
\method{print}{sphet}(x, digits = max(3, getOption("digits") - 3),...)
}
\arguments{
\item{x}{an object of class 'sphet'}
\item{digits}{minimal number of significant digits, see \code{print.default}}
\item{...}{additional arguments to be passed}
}
\description{
Method used to print objects of class \code{'summary.sphet'} and \code{'sphet'}
}
\details{
The summary function summary.sphet returns an objects of class 'sphet'
organized in a coefficient matrix.
}
\examples{
library(spdep)
data(columbus)
listw <- nb2listw(col.gal.nb)
res <- spreg(CRIME~HOVAL + INC, data=columbus, listw=listw, model ="sarar")
summary(res)
}
\seealso{
\code{\link{gstslshet}}, \code{\link{stslshac}}
}
\author{
Gianfranco Piras\email{gpiras@mac.com}
}
| /man/print.sphet.Rd | no_license | gpiras/sphet | R | false | true | 915 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.sphet.R
\name{print.sphet}
\alias{print.sphet}
\title{print method for class sphet}
\usage{
\method{print}{sphet}(x, digits = max(3, getOption("digits") - 3),...)
}
\arguments{
\item{x}{an object of class 'sphet'}
\item{digits}{minimal number of significant digits, see \code{print.default}}
\item{...}{additional arguments to be passed}
}
\description{
Method used to print objects of class \code{'summary.sphet'} and \code{'sphet'}
}
\details{
The summary function summary.sphet returns an objects of class 'sphet'
organized in a coefficient matrix.
}
\examples{
library(spdep)
data(columbus)
listw <- nb2listw(col.gal.nb)
res <- spreg(CRIME~HOVAL + INC, data=columbus, listw=listw, model ="sarar")
summary(res)
}
\seealso{
\code{\link{gstslshet}}, \code{\link{stslshac}}
}
\author{
Gianfranco Piras\email{gpiras@mac.com}
}
|
library(tidyverse)
library(edwr)
library(lubridate)
library(stringr)
library(icd)
dir_raw <- "data/raw/pilot1"
# run MBO query
# * Patients - by Discharge Unit
# - Facility (Curr): HH HERMANN
# - Nurse Unit (Curr): HH 6EJP;HH 6WJP
# - Date Only - Admit: 1/29/2018 - 2/28/2018
pts <- read_data(dir_raw, "patients", FALSE) %>%
as.patients() %>%
filter(
discharge.datetime >= mdy("1/29/2018", tz = "US/Central"),
discharge.datetime < mdy("2/28/2018", tz = "US/Central"),
visit.type == "Inpatient"
)
mbo_id <- concat_encounters(pts$millennium.id)
# run MBO queries
# * Demographics
# * Diagnosis - ICD-9/10-CM
# * Identifiers - by Millennium Encounter ID
# * Location History
# * Medications - Inpatient - All
# * Pain Scores
units <- c("HH 6EJP", "HH 6WJP")
set.seed(77123)
demog <- read_data(dir_raw, "demographics", FALSE) %>%
as.demographics() %>%
filter(age >= 18) %>%
sample_n(110)
id <- read_data(dir_raw, "identifiers", FALSE) %>%
as.id()
icd <- read_data(dir_raw, "diagnosis", FALSE) %>%
as.diagnosis()
primary <- icd %>%
filter(
diag.type == "FINAL",
diag.seq == "Primary"
) %>%
mutate_at("diag.code", as.icd10) %>%
mutate(
"icd" = icd_decimal_to_short(diag.code),
desc = icd_explain_table(icd)$short_desc
)
# locations <- read_data(dir_raw, "location", FALSE) %>%
# as.locations() %>%
# filter(unit.name %in% units) %>%
# semi_join(demog, by = "millennium.id")
scores <- read_data(dir_raw, "^pain-scores", FALSE) %>%
as.pain_scores() %>%
filter(event.location %in% units)
pain_meds <- med_lookup("analgesics") %>%
mutate_at("med.name", str_to_lower)
meds <- read_data(dir_raw, "meds-inpt", FALSE) %>%
as.meds_inpt() %>%
filter(
med %in% pain_meds$med.name,
med != "aspirin",
med.location %in% units
)
orders <- meds %>%
mutate(order_id = order.parent.id) %>%
mutate_at("order_id", funs(na_if(., 0))) %>%
mutate_at("order_id", funs(coalesce(., order.id)))
mbo_order <- concat_encounters(orders$order_id)
# run MBO query
# * Orders Meds - Details - by Order Id
details <- read_data(dir_raw, "orders", FALSE) %>%
rename(
millennium.id = `Encounter Identifier`,
order_id = `Order Id`,
freq = Frequency,
prn = `PRN Indicator`
)
set.seed(77123)
data_patients <- demog %>%
left_join(primary, by = "millennium.id") %>%
left_join(id, by = "millennium.id") %>%
filter(
!is.na(diag.code),
!is.na(fin)
) %>%
# sample_n(100) %>%
select(millennium.id, fin, age, gender, diag.code, desc)
data_meds <- orders %>%
left_join(details, by = c("millennium.id", "order_id")) %>%
semi_join(data_patients, by = "millennium.id") %>%
select(millennium.id, med.datetime:route, freq, prn, event.tag)
data_scores <- scores %>%
semi_join(data_patients, by = "millennium.id") %>%
select(millennium.id:event.result)
write_csv(data_patients, "data/external/pilot/patients.csv")
write_csv(data_meds, "data/external/pilot/meds.csv")
write_csv(data_scores, "data/external/pilot/scores.csv")
| /src/02_find-patients_pilot.R | no_license | bgulbis/pain_scores_qi | R | false | false | 3,198 | r | library(tidyverse)
library(edwr)
library(lubridate)
library(stringr)
library(icd)
dir_raw <- "data/raw/pilot1"
# run MBO query
# * Patients - by Discharge Unit
# - Facility (Curr): HH HERMANN
# - Nurse Unit (Curr): HH 6EJP;HH 6WJP
# - Date Only - Admit: 1/29/2018 - 2/28/2018
pts <- read_data(dir_raw, "patients", FALSE) %>%
as.patients() %>%
filter(
discharge.datetime >= mdy("1/29/2018", tz = "US/Central"),
discharge.datetime < mdy("2/28/2018", tz = "US/Central"),
visit.type == "Inpatient"
)
mbo_id <- concat_encounters(pts$millennium.id)
# run MBO queries
# * Demographics
# * Diagnosis - ICD-9/10-CM
# * Identifiers - by Millennium Encounter ID
# * Location History
# * Medications - Inpatient - All
# * Pain Scores
units <- c("HH 6EJP", "HH 6WJP")
set.seed(77123)
demog <- read_data(dir_raw, "demographics", FALSE) %>%
as.demographics() %>%
filter(age >= 18) %>%
sample_n(110)
id <- read_data(dir_raw, "identifiers", FALSE) %>%
as.id()
icd <- read_data(dir_raw, "diagnosis", FALSE) %>%
as.diagnosis()
primary <- icd %>%
filter(
diag.type == "FINAL",
diag.seq == "Primary"
) %>%
mutate_at("diag.code", as.icd10) %>%
mutate(
"icd" = icd_decimal_to_short(diag.code),
desc = icd_explain_table(icd)$short_desc
)
# locations <- read_data(dir_raw, "location", FALSE) %>%
# as.locations() %>%
# filter(unit.name %in% units) %>%
# semi_join(demog, by = "millennium.id")
scores <- read_data(dir_raw, "^pain-scores", FALSE) %>%
as.pain_scores() %>%
filter(event.location %in% units)
pain_meds <- med_lookup("analgesics") %>%
mutate_at("med.name", str_to_lower)
meds <- read_data(dir_raw, "meds-inpt", FALSE) %>%
as.meds_inpt() %>%
filter(
med %in% pain_meds$med.name,
med != "aspirin",
med.location %in% units
)
orders <- meds %>%
mutate(order_id = order.parent.id) %>%
mutate_at("order_id", funs(na_if(., 0))) %>%
mutate_at("order_id", funs(coalesce(., order.id)))
mbo_order <- concat_encounters(orders$order_id)
# run MBO query
# * Orders Meds - Details - by Order Id
details <- read_data(dir_raw, "orders", FALSE) %>%
rename(
millennium.id = `Encounter Identifier`,
order_id = `Order Id`,
freq = Frequency,
prn = `PRN Indicator`
)
set.seed(77123)
data_patients <- demog %>%
left_join(primary, by = "millennium.id") %>%
left_join(id, by = "millennium.id") %>%
filter(
!is.na(diag.code),
!is.na(fin)
) %>%
# sample_n(100) %>%
select(millennium.id, fin, age, gender, diag.code, desc)
data_meds <- orders %>%
left_join(details, by = c("millennium.id", "order_id")) %>%
semi_join(data_patients, by = "millennium.id") %>%
select(millennium.id, med.datetime:route, freq, prn, event.tag)
data_scores <- scores %>%
semi_join(data_patients, by = "millennium.id") %>%
select(millennium.id:event.result)
write_csv(data_patients, "data/external/pilot/patients.csv")
write_csv(data_meds, "data/external/pilot/meds.csv")
write_csv(data_scores, "data/external/pilot/scores.csv")
|
################################################
# Functions for Forecasting Challenge Analyses #
# #
# nicole.nova@stanford.edu #
################################################
# Out of sample forecasts
# Generating forecasts while updating the model/attractor using only specified past training data
forecast.test <- function(PR, train.length, ts.dim, target){
# Edge/outer part of a simplex
n_start <- 2
n_end <- 2
# Creating simplexes using data from half a year, a year, and 1.5 years ago
previous.simplexes <- c(seq(1, 20), seq(50, 59), seq(74, 88))
# Generate multiple datasets with varying lengths
quarter <- 13 # weeks
train.length <- train.length # length of the original training dataset in weeks
# Create new dataframes with added quarters sequentially thoughout the testing seasons
for (i in 0:16) {
end <- train.length + quarter*i
assign(paste0("PR.", i), PR[1:end, ])
}
# Set parameters for length of data on the first week in each season (1-4)
s1 <- length(PR.0$cases) + 1
s2 <- length(PR.4$cases) + 1
s3 <- length(PR.8$cases) + 1
s4 <- length(PR.12$cases) + 1
# Specify which dataset to use for forecasting and update the attractor for new forecasts
for (dfNumber in seq(4, 16, 4)) {
df <- eval(parse(text = paste0("PR.", dfNumber)))
# Length of new dataframe
dfl <- length(df$date)
# Season number
season <- dfNumber/4
# Obtain the denormalized observations for a particular season
obs.s <- df$cases*sd(PR.orig$cases) + mean(PR.orig$cases)
start.season <- eval(parse(text = paste0("s", season)))
obs.s <- obs.s[start.season:length(df$cases)]
# Initialize forecast results for each forecasting week f_0, f_4, ..., f_24
sample1 <- NULL
sample2 <- NULL
sample3 <- NULL
sample4 <- NULL
sample5 <- NULL
sample6 <- NULL
sample7 <- NULL
for (nn in n_start:n_end) {
for (sf in previous.simplexes) { # Varying over simplex forecasts starting from closest week to season
# Initialize the forecasts for each seasons on weeks 0, 4, ..., 24
f_0 <- NULL
f_4 <- NULL
f_8 <- NULL
f_12 <- NULL
f_16 <- NULL
f_20 <- NULL
f_24 <- NULL
# Add new observations for f_4, f_8, f_12, f_16, f_20, f_24
f_4 <- c(f_4, obs.s[1:4])
f_8 <- c(f_8, obs.s[1:8])
f_12 <- c(f_12, obs.s[1:12])
f_16 <- c(f_16, obs.s[1:16])
f_20 <- c(f_20, obs.s[1:20])
f_24 <- c(f_24, obs.s[1:24])
s <- start.season - sf
for (forecast.horizon in sf:(51+sf)) { # For each forecast horizon: tp = 1, 2, .., 52
ssr.cases <- block_lnlp(df, columns = ts.dim.cases.drivers,
target_column = "cases", stats_only = F,
method = "simplex", exclusion_radius = NULL, tp = forecast.horizon,
num_neighbors = nn)
stats <- ssr.cases[[1]]$stats
ssr.cases <- ssr.cases[[1]]$model_output
ssr.cases.obs <- ssr.cases$obs*sd(PR.orig$cases) + mean(PR.orig$cases)
ssr.cases.pred <- ssr.cases$pred*sd(PR.orig$cases) + mean(PR.orig$cases)
f_0 <- c(f_0, ssr.cases.pred[s])
f_4 <- c(f_4, ssr.cases.pred[s+4])
f_8 <- c(f_8, ssr.cases.pred[s+8])
f_12 <- c(f_12, ssr.cases.pred[s+12])
f_16 <- c(f_16, ssr.cases.pred[s+16])
f_20 <- c(f_20, ssr.cases.pred[s+20])
f_24 <- c(f_24, ssr.cases.pred[s+24])
}
e <- 52 # weeks in a year
f <- list(f_0[1:e], f_4[1:e], f_8[1:e], f_12[1:e], f_16[1:e], f_20[1:e], f_24[1:e])
if (target == 0) {
# These are the n forecasts for each season
sample1 <- append(sample1, list(f[[1]]))
sample2 <- append(sample2, list(f[[2]]))
sample3 <- append(sample3, list(f[[3]]))
sample4 <- append(sample4, list(f[[4]]))
sample5 <- append(sample5, list(f[[5]]))
sample6 <- append(sample6, list(f[[6]]))
sample7 <- append(sample7, list(f[[7]]))
} else if (target == 1) {
# These are the estimates of peak week for each n forecast
sample1 <- c(sample1, which.max(f[[1]]))
sample2 <- c(sample2, which.max(f[[2]]))
sample3 <- c(sample3, which.max(f[[3]]))
sample4 <- c(sample4, which.max(f[[4]]))
sample5 <- c(sample5, which.max(f[[5]]))
sample6 <- c(sample6, which.max(f[[6]]))
sample7 <- c(sample7, which.max(f[[7]]))
} else if (target == 2) {
# These are the estimates of peak incidence for each n forecast
sample1 <- c(sample1, max(f[[1]]))
sample2 <- c(sample2, max(f[[2]]))
sample3 <- c(sample3, max(f[[3]]))
sample4 <- c(sample4, max(f[[4]]))
sample5 <- c(sample5, max(f[[5]]))
sample6 <- c(sample6, max(f[[6]]))
sample7 <- c(sample7, max(f[[7]]))
} else {
# These are the estimates of seasonal incidence for each n forecast
sample1 <- c(sample1, sum(f[[1]]))
sample2 <- c(sample2, sum(f[[2]]))
sample3 <- c(sample3, sum(f[[3]]))
sample4 <- c(sample4, sum(f[[4]]))
sample5 <- c(sample5, sum(f[[5]]))
sample6 <- c(sample6, sum(f[[6]]))
sample7 <- c(sample7, sum(f[[7]]))
}
}
}
# Collect forecasts for each forecasting week for each season (1-4)
assign(paste0("sample.", season), list(sample1, sample2, sample3, sample4, sample5, sample6, sample7) )
} # End of for loop for each season
forecast.seasons <- list(sample.1, sample.2, sample.3, sample.4)
return(forecast.seasons)
}
# Calculating the peak score from distribution
calc.score.peak.wk <- function(forecast.seasons, seasonNumber, obs.sx) {
sfMax <- 45 # length(previous.simplexes)
# Week where the true peak is
obs <- which.max(obs.sx)
sx.p_i <- NULL
p <- NULL
for (i in 1:7) { # For each forecasting week 0, 4, ..., 24
# Generate the distribution based on varying forecasts
preds <- forecast.seasons[[seasonNumber]][[i]][1:sfMax]
if (var(preds) == 0) {
p <- c(p, 1)
} else {
ds <- NULL
for (wk in 1:52) {
ds <- c(ds, dsnorm(wk, mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]]) )
}
# The distribution from all sf forecasts
plot(dsnorm(seq(0, 52, by = 1), mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]]), type="l", ylab = "")
}
p <- ds
p_i <- p[obs]
log(p_i)
sx.p_i <- c(sx.p_i, log(p_i))
}
score.sx <- mean(sx.p_i)
return(score.sx)
}
# Calculating the score for peak incidence or seasonal incidence
calc.score.peak.tot.inc <- function(forecast.seasons, seasonNumber, opt.obs.sx, binMin, binMax, maxInc) {
sfMax <- 45 # length(previous.simplexes)
p <- NULL
for (i in 1:7) { # For each forecasting week 0, 4, ..., 24
preds <- forecast.seasons[[seasonNumber]][[i]][1:sfMax]
if (var(preds) == 0) {
p <- c(p, 1)
} else {
p <- c(p, psnorm(binMax, mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]])
- psnorm(binMin, mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]]))
plot(dsnorm(seq(0, maxInc, by = 1), mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]]), type="l", ylab = "")
abline(v = c(binMin, opt.obs.sx, binMax))
}
}
inc.score <- mean(log(p))
return(inc.score)
}
# Plotting forecasting figures
plot.forecasts <- function(forecast.seasons, seasonNumber, obs.sx, maxInc, fig) {
sfMax <- 45 # length(previous.simplexes)
time <- 1:52
for (i in 1:7) { # For each forecasting week 0, 4, ..., 24
wk <- seq(0, 24, 4)[[i]]
preds <- list()
for (t in time) {
time.ds <- NULL # Distribution across sf for a time point
for (sf in 1:sfMax) { # For each time point obtain ds metrics across sf
time.ds <- c(time.ds, forecast.seasons[[seasonNumber]][[i]][[sf]][[t]])
}
preds[[t]] <- time.ds
}
# Values of a target
low.q <- NULL
mean.sn <- NULL
med.q <- NULL
up.q <- NULL
for (t in time) {
# Lower quantile
low.q <- c(low.q, qsnorm(0.025,
mean = snormFit(preds[[t]])$par[[1]],
sd = snormFit(preds[[t]])$par[[2]],
if (snormFit(preds[[t]])$par[[3]] > 5) {
xi = 5 # Keep large numbers but avoid infinity
} else {
xi = snormFit(preds[[t]])$par[[3]]
}) )
# Mean
mean.sn <- c(mean.sn, snormFit(preds[[t]])$par[[1]] )
# Upper quantile
up.q <- c(up.q, qsnorm(0.975,
mean = snormFit(preds[[t]])$par[[1]],
sd = snormFit(preds[[t]])$par[[2]],
xi = snormFit(preds[[t]])$par[[3]]) )
}
if (is.na(low.q[1]) == T) {
low.q[1] <- 0
} else {
low.q[1] <- low.q[1]
}
if (is.na(low.q[length(low.q)]) == T) {
low.q[length(low.q)] <- 0
} else {
low.q[length(low.q)] <- low.q[length(low.q)]
}
if (is.na(up.q[1]) == T) {
up.q[1] <- 0
} else {
up.q[1] <- up.q[1]
}
if (is.na(up.q[length(up.q)]) == T) {
up.q[length(up.q)] <- 0
} else {
up.q[length(up.q)] <- up.q[length(up.q)]
}
# Add observations for a particular forecasting week
obs <- data.frame(x = time[1:wk], y = obs.sx[1:wk])
low <- data.frame(x = time[wk:52], y = low.q[wk:52])
slow <- as.data.frame(supsmu(low$x, low$y, bass = 2))
slow$y[slow$y < 0] <- 0
slow <- slow[-1,]
lower <- rbind(obs, slow)
up <- data.frame(x = time[wk:52], y = up.q[wk:52])
sup <- as.data.frame(supsmu(up$x, up$y, bass = 2))
sup$y[sup$y < 0] <- 0
sup <- sup[-1,]
upper <- rbind(obs, sup)
# Average forecast
m <- data.frame(x = time[wk:52], y = mean.sn[wk:52])
sm <- as.data.frame(supsmu(m$x, m$y, bass = 2))
sm$y[sm$y < 0] <- 0
sm <- sm[-1,]
avg.sn <- rbind(obs, sm)
# Set colors
col_obs <- "black"
col_pred <- "#00BFC4"
col_bound <- rgb(col2rgb(col_pred)[1]/255, col2rgb(col_pred)[2]/255, col2rgb(col_pred)[3]/255, 0.4)
# Plot observations for a season
par(mar = c(5.1, 6.1, 4.1, 2.1), mgp = c(2, 0.5, 0))
plot(obs.sx, type = "l", lwd = 2, col = col_obs, xlim = c(0, 52), ylim = c(0, maxInc),
xlab = "Season week", ylab = "", las = 1, tck = -0.04,
cex.axis = 1.2, cex.lab = 1.3)
title(ylab = "Incidence (cases/week)", mgp = c(3, 0.5, 0), cex.lab = 1.3)
lines(avg.sn$x[(wk+1):52], avg.sn$y[(wk+1):52], type = "l", lwd = 4, col = col_pred)
polygon(c(upper$x, rev(lower$x) ), c(upper$y, rev(lower$y) ), col = col_bound, border = NA)
rect(wk, 0, wk+1, maxInc, col = "white", border = NA)
rect(52, 0, 53, maxInc, col = "white", border = NA)
lines(obs.sx, type = "l", lwd = 2, col = col_obs)
abline(v = wk, lwd = 2, lty = 2) # The forecasting week
file_name = paste("../output/", fig,"/forecast_wk_", wk, "_season_", seasonNumber, ".pdf", sep="")
dev.copy(pdf, file = file_name, width = 4, height = 3)
dev.off()
}
}
# Create boxplots for all 7 forecasts for a target and season
boxplot.target <- function(forecast.seasons, seasonNumber, obs.sx, fig, target) {
# Set colors
col_pred <- "#00BFC4"
col_bound <- rgb(col2rgb(col_pred)[1]/255, col2rgb(col_pred)[2]/255, col2rgb(col_pred)[3]/255, 0.4)
sfMax <- 45 # length(previous.simplexes)
ds <- list()
for (i in 1:7) { # For each forecasting week 0, 4, ..., 24
# Obtain the skew normal distribution based on forecasts
preds <- forecast.seasons[[seasonNumber]][[i]][1:sfMax]
ds[[i]] <- rsnorm(10000, mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]])
if (target == 1) {
ds[[i]][ds[[i]] < 1] <- 1
ds[[i]][ds[[i]] > 52] <- 52
} else {
ds[[i]][ds[[i]] < 0] <- 0
}
}
# Target-specific parameters
if (target == 1) {
targetName <- "peak.wk"
obs <- which.max(obs.sx)
y.lim <- c(1, 52)
y.lab <- "Peak week"
y.place <- 2
} else if (target == 2) {
targetName <- "peak.inc"
obs <- max(obs.sx)
y.lim <- c(0, 400)
y.lab <- "Peak incidence"
y.place <- 2.8
} else {
targetName <- "tot.inc"
obs <- sum(obs.sx)
y.lim <- c(0, 8000)
y.lab <- "Seasonal incidence"
y.place <- 3.6
}
par(mar = c(5.1, 6.1, 4.1, 2.1), mgp = c(2, 0.5, 0))
boxplot(ds, col = col_bound, names = c(0, 4, 8, 12, 16, 20, 24), outline = FALSE, ylim = y.lim,
xlab = "", ylab = "", las = 1, medcol = col_pred, xaxt = "n", yaxt = "n")
title(xlab = "Forecasting week", mgp = c(2, 0.5, 0), cex.lab = 1.3)
title(ylab = y.lab, mgp = c(y.place, 0.5, 0), cex.lab = 1.3)
axis(1, tck = -0.04, cex.axis = 1.2, cex.lab = 1.3, at = 1:7, labels = c(0, 4, 8, 12, 16, 20, 24))
axis(2, tck = -0.04, cex.axis = 1.2, cex.lab = 1.3, las = 1)
abline(h = obs, lwd = 2, lty = 2)
file_name = paste("../output/", fig, "/", targetName, "_season_", seasonNumber, ".pdf", sep="")
dev.copy(pdf, file = file_name, width = 3.5, height = 3.5)
dev.off()
}
# END OF SCRIPT | /code/forecast_challenge_functions.R | no_license | rafalopespx/EDMdengue | R | false | false | 14,252 | r | ################################################
# Functions for Forecasting Challenge Analyses #
# #
# nicole.nova@stanford.edu #
################################################
# Out of sample forecasts
# Generating forecasts while updating the model/attractor using only specified past training data
forecast.test <- function(PR, train.length, ts.dim, target){
# Edge/outer part of a simplex
n_start <- 2
n_end <- 2
# Creating simplexes using data from half a year, a year, and 1.5 years ago
previous.simplexes <- c(seq(1, 20), seq(50, 59), seq(74, 88))
# Generate multiple datasets with varying lengths
quarter <- 13 # weeks
train.length <- train.length # length of the original training dataset in weeks
# Create new dataframes with added quarters sequentially thoughout the testing seasons
for (i in 0:16) {
end <- train.length + quarter*i
assign(paste0("PR.", i), PR[1:end, ])
}
# Set parameters for length of data on the first week in each season (1-4)
s1 <- length(PR.0$cases) + 1
s2 <- length(PR.4$cases) + 1
s3 <- length(PR.8$cases) + 1
s4 <- length(PR.12$cases) + 1
# Specify which dataset to use for forecasting and update the attractor for new forecasts
for (dfNumber in seq(4, 16, 4)) {
df <- eval(parse(text = paste0("PR.", dfNumber)))
# Length of new dataframe
dfl <- length(df$date)
# Season number
season <- dfNumber/4
# Obtain the denormalized observations for a particular season
obs.s <- df$cases*sd(PR.orig$cases) + mean(PR.orig$cases)
start.season <- eval(parse(text = paste0("s", season)))
obs.s <- obs.s[start.season:length(df$cases)]
# Initialize forecast results for each forecasting week f_0, f_4, ..., f_24
sample1 <- NULL
sample2 <- NULL
sample3 <- NULL
sample4 <- NULL
sample5 <- NULL
sample6 <- NULL
sample7 <- NULL
for (nn in n_start:n_end) {
for (sf in previous.simplexes) { # Varying over simplex forecasts starting from closest week to season
# Initialize the forecasts for each seasons on weeks 0, 4, ..., 24
f_0 <- NULL
f_4 <- NULL
f_8 <- NULL
f_12 <- NULL
f_16 <- NULL
f_20 <- NULL
f_24 <- NULL
# Add new observations for f_4, f_8, f_12, f_16, f_20, f_24
f_4 <- c(f_4, obs.s[1:4])
f_8 <- c(f_8, obs.s[1:8])
f_12 <- c(f_12, obs.s[1:12])
f_16 <- c(f_16, obs.s[1:16])
f_20 <- c(f_20, obs.s[1:20])
f_24 <- c(f_24, obs.s[1:24])
s <- start.season - sf
for (forecast.horizon in sf:(51+sf)) { # For each forecast horizon: tp = 1, 2, .., 52
ssr.cases <- block_lnlp(df, columns = ts.dim.cases.drivers,
target_column = "cases", stats_only = F,
method = "simplex", exclusion_radius = NULL, tp = forecast.horizon,
num_neighbors = nn)
stats <- ssr.cases[[1]]$stats
ssr.cases <- ssr.cases[[1]]$model_output
ssr.cases.obs <- ssr.cases$obs*sd(PR.orig$cases) + mean(PR.orig$cases)
ssr.cases.pred <- ssr.cases$pred*sd(PR.orig$cases) + mean(PR.orig$cases)
f_0 <- c(f_0, ssr.cases.pred[s])
f_4 <- c(f_4, ssr.cases.pred[s+4])
f_8 <- c(f_8, ssr.cases.pred[s+8])
f_12 <- c(f_12, ssr.cases.pred[s+12])
f_16 <- c(f_16, ssr.cases.pred[s+16])
f_20 <- c(f_20, ssr.cases.pred[s+20])
f_24 <- c(f_24, ssr.cases.pred[s+24])
}
e <- 52 # weeks in a year
f <- list(f_0[1:e], f_4[1:e], f_8[1:e], f_12[1:e], f_16[1:e], f_20[1:e], f_24[1:e])
if (target == 0) {
# These are the n forecasts for each season
sample1 <- append(sample1, list(f[[1]]))
sample2 <- append(sample2, list(f[[2]]))
sample3 <- append(sample3, list(f[[3]]))
sample4 <- append(sample4, list(f[[4]]))
sample5 <- append(sample5, list(f[[5]]))
sample6 <- append(sample6, list(f[[6]]))
sample7 <- append(sample7, list(f[[7]]))
} else if (target == 1) {
# These are the estimates of peak week for each n forecast
sample1 <- c(sample1, which.max(f[[1]]))
sample2 <- c(sample2, which.max(f[[2]]))
sample3 <- c(sample3, which.max(f[[3]]))
sample4 <- c(sample4, which.max(f[[4]]))
sample5 <- c(sample5, which.max(f[[5]]))
sample6 <- c(sample6, which.max(f[[6]]))
sample7 <- c(sample7, which.max(f[[7]]))
} else if (target == 2) {
# These are the estimates of peak incidence for each n forecast
sample1 <- c(sample1, max(f[[1]]))
sample2 <- c(sample2, max(f[[2]]))
sample3 <- c(sample3, max(f[[3]]))
sample4 <- c(sample4, max(f[[4]]))
sample5 <- c(sample5, max(f[[5]]))
sample6 <- c(sample6, max(f[[6]]))
sample7 <- c(sample7, max(f[[7]]))
} else {
# These are the estimates of seasonal incidence for each n forecast
sample1 <- c(sample1, sum(f[[1]]))
sample2 <- c(sample2, sum(f[[2]]))
sample3 <- c(sample3, sum(f[[3]]))
sample4 <- c(sample4, sum(f[[4]]))
sample5 <- c(sample5, sum(f[[5]]))
sample6 <- c(sample6, sum(f[[6]]))
sample7 <- c(sample7, sum(f[[7]]))
}
}
}
# Collect forecasts for each forecasting week for each season (1-4)
assign(paste0("sample.", season), list(sample1, sample2, sample3, sample4, sample5, sample6, sample7) )
} # End of for loop for each season
forecast.seasons <- list(sample.1, sample.2, sample.3, sample.4)
return(forecast.seasons)
}
# Calculating the peak score from distribution
calc.score.peak.wk <- function(forecast.seasons, seasonNumber, obs.sx) {
sfMax <- 45 # length(previous.simplexes)
# Week where the true peak is
obs <- which.max(obs.sx)
sx.p_i <- NULL
p <- NULL
for (i in 1:7) { # For each forecasting week 0, 4, ..., 24
# Generate the distribution based on varying forecasts
preds <- forecast.seasons[[seasonNumber]][[i]][1:sfMax]
if (var(preds) == 0) {
p <- c(p, 1)
} else {
ds <- NULL
for (wk in 1:52) {
ds <- c(ds, dsnorm(wk, mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]]) )
}
# The distribution from all sf forecasts
plot(dsnorm(seq(0, 52, by = 1), mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]]), type="l", ylab = "")
}
p <- ds
p_i <- p[obs]
log(p_i)
sx.p_i <- c(sx.p_i, log(p_i))
}
score.sx <- mean(sx.p_i)
return(score.sx)
}
# Calculating the score for peak incidence or seasonal incidence
calc.score.peak.tot.inc <- function(forecast.seasons, seasonNumber, opt.obs.sx, binMin, binMax, maxInc) {
sfMax <- 45 # length(previous.simplexes)
p <- NULL
for (i in 1:7) { # For each forecasting week 0, 4, ..., 24
preds <- forecast.seasons[[seasonNumber]][[i]][1:sfMax]
if (var(preds) == 0) {
p <- c(p, 1)
} else {
p <- c(p, psnorm(binMax, mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]])
- psnorm(binMin, mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]]))
plot(dsnorm(seq(0, maxInc, by = 1), mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]]), type="l", ylab = "")
abline(v = c(binMin, opt.obs.sx, binMax))
}
}
inc.score <- mean(log(p))
return(inc.score)
}
# Plotting forecasting figures
plot.forecasts <- function(forecast.seasons, seasonNumber, obs.sx, maxInc, fig) {
sfMax <- 45 # length(previous.simplexes)
time <- 1:52
for (i in 1:7) { # For each forecasting week 0, 4, ..., 24
wk <- seq(0, 24, 4)[[i]]
preds <- list()
for (t in time) {
time.ds <- NULL # Distribution across sf for a time point
for (sf in 1:sfMax) { # For each time point obtain ds metrics across sf
time.ds <- c(time.ds, forecast.seasons[[seasonNumber]][[i]][[sf]][[t]])
}
preds[[t]] <- time.ds
}
# Values of a target
low.q <- NULL
mean.sn <- NULL
med.q <- NULL
up.q <- NULL
for (t in time) {
# Lower quantile
low.q <- c(low.q, qsnorm(0.025,
mean = snormFit(preds[[t]])$par[[1]],
sd = snormFit(preds[[t]])$par[[2]],
if (snormFit(preds[[t]])$par[[3]] > 5) {
xi = 5 # Keep large numbers but avoid infinity
} else {
xi = snormFit(preds[[t]])$par[[3]]
}) )
# Mean
mean.sn <- c(mean.sn, snormFit(preds[[t]])$par[[1]] )
# Upper quantile
up.q <- c(up.q, qsnorm(0.975,
mean = snormFit(preds[[t]])$par[[1]],
sd = snormFit(preds[[t]])$par[[2]],
xi = snormFit(preds[[t]])$par[[3]]) )
}
if (is.na(low.q[1]) == T) {
low.q[1] <- 0
} else {
low.q[1] <- low.q[1]
}
if (is.na(low.q[length(low.q)]) == T) {
low.q[length(low.q)] <- 0
} else {
low.q[length(low.q)] <- low.q[length(low.q)]
}
if (is.na(up.q[1]) == T) {
up.q[1] <- 0
} else {
up.q[1] <- up.q[1]
}
if (is.na(up.q[length(up.q)]) == T) {
up.q[length(up.q)] <- 0
} else {
up.q[length(up.q)] <- up.q[length(up.q)]
}
# Add observations for a particular forecasting week
obs <- data.frame(x = time[1:wk], y = obs.sx[1:wk])
low <- data.frame(x = time[wk:52], y = low.q[wk:52])
slow <- as.data.frame(supsmu(low$x, low$y, bass = 2))
slow$y[slow$y < 0] <- 0
slow <- slow[-1,]
lower <- rbind(obs, slow)
up <- data.frame(x = time[wk:52], y = up.q[wk:52])
sup <- as.data.frame(supsmu(up$x, up$y, bass = 2))
sup$y[sup$y < 0] <- 0
sup <- sup[-1,]
upper <- rbind(obs, sup)
# Average forecast
m <- data.frame(x = time[wk:52], y = mean.sn[wk:52])
sm <- as.data.frame(supsmu(m$x, m$y, bass = 2))
sm$y[sm$y < 0] <- 0
sm <- sm[-1,]
avg.sn <- rbind(obs, sm)
# Set colors
col_obs <- "black"
col_pred <- "#00BFC4"
col_bound <- rgb(col2rgb(col_pred)[1]/255, col2rgb(col_pred)[2]/255, col2rgb(col_pred)[3]/255, 0.4)
# Plot observations for a season
par(mar = c(5.1, 6.1, 4.1, 2.1), mgp = c(2, 0.5, 0))
plot(obs.sx, type = "l", lwd = 2, col = col_obs, xlim = c(0, 52), ylim = c(0, maxInc),
xlab = "Season week", ylab = "", las = 1, tck = -0.04,
cex.axis = 1.2, cex.lab = 1.3)
title(ylab = "Incidence (cases/week)", mgp = c(3, 0.5, 0), cex.lab = 1.3)
lines(avg.sn$x[(wk+1):52], avg.sn$y[(wk+1):52], type = "l", lwd = 4, col = col_pred)
polygon(c(upper$x, rev(lower$x) ), c(upper$y, rev(lower$y) ), col = col_bound, border = NA)
rect(wk, 0, wk+1, maxInc, col = "white", border = NA)
rect(52, 0, 53, maxInc, col = "white", border = NA)
lines(obs.sx, type = "l", lwd = 2, col = col_obs)
abline(v = wk, lwd = 2, lty = 2) # The forecasting week
file_name = paste("../output/", fig,"/forecast_wk_", wk, "_season_", seasonNumber, ".pdf", sep="")
dev.copy(pdf, file = file_name, width = 4, height = 3)
dev.off()
}
}
# Create boxplots for all 7 forecasts for a target and season
boxplot.target <- function(forecast.seasons, seasonNumber, obs.sx, fig, target) {
# Set colors
col_pred <- "#00BFC4"
col_bound <- rgb(col2rgb(col_pred)[1]/255, col2rgb(col_pred)[2]/255, col2rgb(col_pred)[3]/255, 0.4)
sfMax <- 45 # length(previous.simplexes)
ds <- list()
for (i in 1:7) { # For each forecasting week 0, 4, ..., 24
# Obtain the skew normal distribution based on forecasts
preds <- forecast.seasons[[seasonNumber]][[i]][1:sfMax]
ds[[i]] <- rsnorm(10000, mean = snormFit(preds)$par[[1]],
sd = snormFit(preds)$par[[2]],
xi = snormFit(preds)$par[[3]])
if (target == 1) {
ds[[i]][ds[[i]] < 1] <- 1
ds[[i]][ds[[i]] > 52] <- 52
} else {
ds[[i]][ds[[i]] < 0] <- 0
}
}
# Target-specific parameters
if (target == 1) {
targetName <- "peak.wk"
obs <- which.max(obs.sx)
y.lim <- c(1, 52)
y.lab <- "Peak week"
y.place <- 2
} else if (target == 2) {
targetName <- "peak.inc"
obs <- max(obs.sx)
y.lim <- c(0, 400)
y.lab <- "Peak incidence"
y.place <- 2.8
} else {
targetName <- "tot.inc"
obs <- sum(obs.sx)
y.lim <- c(0, 8000)
y.lab <- "Seasonal incidence"
y.place <- 3.6
}
par(mar = c(5.1, 6.1, 4.1, 2.1), mgp = c(2, 0.5, 0))
boxplot(ds, col = col_bound, names = c(0, 4, 8, 12, 16, 20, 24), outline = FALSE, ylim = y.lim,
xlab = "", ylab = "", las = 1, medcol = col_pred, xaxt = "n", yaxt = "n")
title(xlab = "Forecasting week", mgp = c(2, 0.5, 0), cex.lab = 1.3)
title(ylab = y.lab, mgp = c(y.place, 0.5, 0), cex.lab = 1.3)
axis(1, tck = -0.04, cex.axis = 1.2, cex.lab = 1.3, at = 1:7, labels = c(0, 4, 8, 12, 16, 20, 24))
axis(2, tck = -0.04, cex.axis = 1.2, cex.lab = 1.3, las = 1)
abline(h = obs, lwd = 2, lty = 2)
file_name = paste("../output/", fig, "/", targetName, "_season_", seasonNumber, ".pdf", sep="")
dev.copy(pdf, file = file_name, width = 3.5, height = 3.5)
dev.off()
}
# END OF SCRIPT |
require(data.table)
DATADIR=Sys.getenv("DOX_DATA")
#DATADIR="~/gdrive/dox_data/"
genotype=fread(paste0("zcat < ",DATADIR, "genotype.txt.gz"), data.table = F, header = T)
rownames(genotype)=genotype$snpid
genotype$snpid=NULL
genotype=as.matrix(genotype)
anno_findiv=read.table( "../data/annotation_findiv.txt" , header=T, stringsAsFactors = F) %>%
select(cell_line, findiv) %>%
distinct()
sample_anno=read.table( paste0(DATADIR, "annotation.txt") , header=T, stringsAsFactors = F)
anno_findiv = sample_anno %>% select(cell_line, dbgap) %>% distinct() %>% left_join(anno_findiv, by="cell_line")
colnames(genotype)=data.frame(findiv=as.integer(colnames(genotype))) %>% left_join(anno_findiv, by="findiv") %>% .$dbgap
gz=gzfile(paste0(DATADIR, "genotype_dbgap.txt.gz"),"w")
genotype %>% write.table(gz, quote=F, row.names=T, col.names=T, sep="\t")
close(gz)
| /code/rename_genotype_cols.R | permissive | davidaknowles/dox | R | false | false | 867 | r | require(data.table)
DATADIR=Sys.getenv("DOX_DATA")
#DATADIR="~/gdrive/dox_data/"
genotype=fread(paste0("zcat < ",DATADIR, "genotype.txt.gz"), data.table = F, header = T)
rownames(genotype)=genotype$snpid
genotype$snpid=NULL
genotype=as.matrix(genotype)
anno_findiv=read.table( "../data/annotation_findiv.txt" , header=T, stringsAsFactors = F) %>%
select(cell_line, findiv) %>%
distinct()
sample_anno=read.table( paste0(DATADIR, "annotation.txt") , header=T, stringsAsFactors = F)
anno_findiv = sample_anno %>% select(cell_line, dbgap) %>% distinct() %>% left_join(anno_findiv, by="cell_line")
colnames(genotype)=data.frame(findiv=as.integer(colnames(genotype))) %>% left_join(anno_findiv, by="findiv") %>% .$dbgap
gz=gzfile(paste0(DATADIR, "genotype_dbgap.txt.gz"),"w")
genotype %>% write.table(gz, quote=F, row.names=T, col.names=T, sep="\t")
close(gz)
|
## This is for the third plot, week 1, for Exploratory Data Analysis
setwd("C:/Users/aamar/Desktop")
library(dplyr)
hpc <- read.table("household_power_consumption.txt", header = TRUE, sep = ";",
colClasses = c("character","character",
rep("numeric",7),
na.strings = "?"))
hpcx <- filter(hpc, Date == "2/2/2007" | Date == "1/2/2007")
hpcx$Date <- as.Date(hpcx$Date, format = "%d/%m/%Y")
hpcx$DateTime <- as.POSIXct(paste(hpcx$Date, hpcx$Time))
Sys.setlocale("LC_TIME", "English")
## 3
plot(x = hpcx$DateTime,
y = as.numeric(as.character(hpcx$Sub_metering_1)),
"n",
xlab = "",
ylab = "Energy sub metering")
points(hpcx$DateTime, as.numeric(as.character(hpcx$Sub_metering_1)),
type = "line", col = "black")
points(hpcx$DateTime, as.numeric(as.character(hpcx$Sub_metering_2)),
type = "line", col = "red")
points(hpcx$DateTime, as.numeric(as.character(hpcx$Sub_metering_3)),
type = "line", col = "blue")
legend <- c(paste(as.character(colnames(hpcx[7]))),
paste(as.character(colnames(hpcx[8]))),
paste(as.character(colnames(hpcx[9]))))
legend("topright",
legend = legend,
col = c("black", "red", "blue"),
lty = c(1, 1, 1))
dev.copy(png,'plot3.png')
dev.off() | /Exploratory Data Analysis Week 1/plot3.R | no_license | aamari94/datasciencecoursera | R | false | false | 1,348 | r | ## This is for the third plot, week 1, for Exploratory Data Analysis
setwd("C:/Users/aamar/Desktop")
library(dplyr)
hpc <- read.table("household_power_consumption.txt", header = TRUE, sep = ";",
colClasses = c("character","character",
rep("numeric",7),
na.strings = "?"))
hpcx <- filter(hpc, Date == "2/2/2007" | Date == "1/2/2007")
hpcx$Date <- as.Date(hpcx$Date, format = "%d/%m/%Y")
hpcx$DateTime <- as.POSIXct(paste(hpcx$Date, hpcx$Time))
Sys.setlocale("LC_TIME", "English")
## 3
plot(x = hpcx$DateTime,
y = as.numeric(as.character(hpcx$Sub_metering_1)),
"n",
xlab = "",
ylab = "Energy sub metering")
points(hpcx$DateTime, as.numeric(as.character(hpcx$Sub_metering_1)),
type = "line", col = "black")
points(hpcx$DateTime, as.numeric(as.character(hpcx$Sub_metering_2)),
type = "line", col = "red")
points(hpcx$DateTime, as.numeric(as.character(hpcx$Sub_metering_3)),
type = "line", col = "blue")
legend <- c(paste(as.character(colnames(hpcx[7]))),
paste(as.character(colnames(hpcx[8]))),
paste(as.character(colnames(hpcx[9]))))
legend("topright",
legend = legend,
col = c("black", "red", "blue"),
lty = c(1, 1, 1))
dev.copy(png,'plot3.png')
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constants.R
\docType{data}
\name{AF_PER_CFS_DAY}
\alias{AF_PER_CFS_DAY}
\alias{CFS_DAY_PER_AF}
\alias{CFS_PER_MGD}
\alias{MGD_PER_CFS}
\title{Hydrologic constants}
\format{An object of class \code{numeric} of length 1.}
\usage{
AF_PER_CFS_DAY
}
\description{
used for converting between flows in (k)cfs and volumes in (K)AF or between cfs and MGD
}
\note{
acre-feet to CFS-day is an exact conversion as cubic feet per acre-foot is 43560, seconds in a day is 86400, simplifing the conversion gets 24/12.1
}
\author{
Evan Heisman
}
\keyword{datasets}
| /man/AF_PER_CFS_DAY.Rd | permissive | eheisman/hydroutils | R | false | true | 628 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constants.R
\docType{data}
\name{AF_PER_CFS_DAY}
\alias{AF_PER_CFS_DAY}
\alias{CFS_DAY_PER_AF}
\alias{CFS_PER_MGD}
\alias{MGD_PER_CFS}
\title{Hydrologic constants}
\format{An object of class \code{numeric} of length 1.}
\usage{
AF_PER_CFS_DAY
}
\description{
used for converting between flows in (k)cfs and volumes in (K)AF or between cfs and MGD
}
\note{
acre-feet to CFS-day is an exact conversion as cubic feet per acre-foot is 43560, seconds in a day is 86400, simplifing the conversion gets 24/12.1
}
\author{
Evan Heisman
}
\keyword{datasets}
|
# Figure 5 part 1: correlation of normalized celltypes across patients,
# heatmaps of patient frequencies ordered by LCAM score
library(matrixStats)
library(seriation)
library(RColorBrewer)
figure_5a <- function(){
load("data/lung_ldm.rd")
sample_annots <- read.csv("tables/table_s2.csv",r=1,h=1,stringsAsFactors = F)
annots_list <- read.csv("tables/annots_list.csv",r=1,h=1,stringsAsFactors = F)
annots_list$norm_group[annots_list$lineage=="MNP"] <- "MNP"
## This part of the code figures out the right cluster order, and plots the correlation matrix, using only the V2 beads
#####
cell_mask <- names(lung_ldm$dataset$cell_to_sample)[
sample_annots[lung_ldm$dataset$cell_to_sample,"library_chemistry"]=="V2" &
sample_annots[lung_ldm$dataset$cell_to_sample,"prep"]=="beads" &
sample_annots[lung_ldm$dataset$cell_to_sample,"tissue"]=="Tumor"
]
tab <- table(sample_annots[lung_ldm$dataset$cell_to_sample[cell_mask],"patient_ID"],
annots_list[lung_ldm$dataset$cell_to_cluster[cell_mask],"sub_lineage"])
tab <- tab[,-1]
tab <- tab/rowSums(tab)
for(norm_group in c("T","B&plasma","MNP","lin_neg")){
tab_tmp <- tab[,annots_list$norm_group[match(colnames(tab),annots_list$sub_lineage)]==norm_group]
tab_tmp <- tab_tmp / rowSums(tab_tmp)
tab[,colnames(tab_tmp)] <- tab_tmp
}
clust_cor <- cor(log10(1e-2+tab),method="spearman")
LCAM <- c("T_activated","IgG","MoMac-II")
LCAM_score <- rowSums(log(tab[,LCAM]+1e-2))
#clust_ord <- order(cor(tab,LCAM_score[rownames(tab)],method="spearman")[,1])
clust_ord <- get_order(seriate(cor(tab,method="spearman")),method="OLO")
mat <- clust_cor[clust_ord,clust_ord]
thresh <- 0.7
mat[mat > thresh] <- thresh
mat[mat < -thresh] <- -thresh
mat <- mat/thresh
mat <- round((mat+1)/2*49)+1
#diag(mat) <- max(mat)+1
mat <- mat[,ncol(mat):1]
pal_cor <- colorRampPalette(rev(brewer.pal(11,"PiYG")))
col <- c(pal_cor(50)[min(mat):max(mat)])
#plotting 5a
png(file.path("output/figures/figure_5a.png"),height=3,width=3.5,pointsize=5,res=300,units="in",bg="transparent")
layout(matrix(1:2,nrow=1),widths=c(10,2))
par(pin=c(1.5,1.5),mar=c(2,2,2,2),oma=c(5,5,5,5))
image(mat,col=col,xaxt="n",yaxt="n")
box()
mtext(side=1,at=seq(0,1,1/(nrow(mat)-1)),rownames(mat),las=2,line=0.25)
mtext(side=2,at=seq(0,1,1/(nrow(mat)-1)),rev(rownames(mat)),las=2,line=0.25)
mtext("Celltype frequency spearman cor.",line=0.5,cex=2)
par(pin=c(0.125,1))
image(t(1:100),col=pal_cor(100),xaxt="n",yaxt="n")
mtext(paste(c("<",">"),thresh),at=c(0,1),side=4,las=2,line=0.5)
box()
dev.off()
LCAM_score <- rowSums(log10(tab[,LCAM]+1e-2))
resting_clusts <- c("B","AM","cDC2","AZU1_mac","Tcm/naive_II","cDC1")
resting_score <- rowSums(log10(tab[,resting_clusts]+1e-2))
tumor_names <- names(LCAM_score)[grepl("Tumor",names(LCAM_score))]
col <- 1+as.numeric(grepl("Lambrechts",tumor_names))+2*as.numeric(grepl("zilionis",tumor_names))
png("output/figures/figure_5d.png",height=2,width=1.84,units="in",res=300,pointsize=5)
par(mar=c(5,5,5,1))
plot(10^LCAM_score[grepl("Tumor",names(LCAM_score))],10^resting_score[grepl("Tumor",names(LCAM_score))],log="xy",
col=col,pch=16,cex=1.5,xaxt="n",yaxt="n",xlab="",ylab="")
mtext("LCAMhi vs. LCAMlo scores",cex=1.7,line=0.1)
legend(c("Dataset:","Mt. Sinai","Lambrechts","Zilionis"),x="bottomleft",pch=16,col=c(0,1:3))
mtext(side=1,"LCAMhi score",line=3,cex=1.5)
mtext(side=2,"LCAMlo score",line=3,cex=1.5)
axis(side=1,at=c(1e-5,1e-3,1e-1))
axis(side=2,at=c(1e-9,1e-7,1e-5))
dev.off()
###### Figure 5B, C, D and S5A
tab <- table(apply(sample_annots[lung_ldm$dataset$cell_to_sample,c("patient_ID","tissue")],1,paste,collapse=" "),
annots_list[lung_ldm$dataset$cell_to_cluster,"sub_lineage"])
rm("lung_ldm")
load("data/lambrechts_ldm_200519.rd")
tab <- rbind(tab,table(apply(sample_annots[lambrechts_ldm$dataset$cell_to_sample,c("patient_ID","tissue")],1,paste,collapse=" "),
annots_list[lambrechts_ldm$dataset$cell_to_cluster,"sub_lineage"]))
rm("lambrechts_ldm")
load("data/zilionis_ldm.rd")
tab <- rbind(tab,table(apply(sample_annots[lung_ldm_zili$dataset$cell_to_sample,c("patient_ID","tissue")],1,paste,collapse=" "),
annots_list[lung_ldm_zili$dataset$cell_to_cluster,"sub_lineage"]))
rm("lung_ldm_zili")
tab <- tab[,-1]
tab <- tab/rowSums(tab)
tab_raw <- tab
for(norm_group in c("T","B&plasma","MNP","lin_neg")){
tab_tmp <- tab[,annots_list$norm_group[match(colnames(tab),annots_list$sub_lineage)]==norm_group]
tab_tmp <- tab_tmp / rowSums(tab_tmp)
tab[,colnames(tab_tmp)] <- tab_tmp
}
LCAM_score <- rowSums(log(tab[,LCAM]+1e-2))
resting_clusts <- c("B","AM","cDC2","AZU1_mac","Tcm/naive_II","cDC1")
resting_score <- rowSums(log(tab[,resting_clusts]+1e-2))
pat_ord_tumor <- order((LCAM_score-resting_score)[grep("Tumor",rownames(tab),v=T)])
pat_ord_normal <- order((LCAM_score-resting_score)[grep("Normal",rownames(tab),v=T)])
# plot highlighted clusters
tab_tumor <- tab[grep("Tumor",rownames(tab)),]
tab_normal <- tab[grep("Normal",rownames(tab)),]
mat_tumor <- t(tab_tumor[pat_ord_tumor,rev(c(resting_clusts,LCAM))])
norm_ord <- match(unlist(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]})),
unlist(lapply(strsplit(rownames(tab_normal)," "),function(x){x[1]})))
mat_normal <- t(tab_normal[norm_ord,rev(c(resting_clusts,LCAM))])
mat_normal <- mat_normal[,!is.na(colnames(mat_normal))]
clust.means <- rowMeans(cbind(mat_normal,mat_tumor))
mat_normal <- log2((1e-2+mat_normal)/(1e-2+clust.means))
mat_tumor <- log2((1e-2+mat_tumor)/(1e-2+clust.means))
thresh <- 2
mat_normal[mat_normal < -thresh] <- -thresh
mat_normal[mat_normal > thresh] <- thresh
mat_tumor[mat_tumor < -thresh] <- -thresh
mat_tumor[mat_tumor > thresh] <- thresh
mat_normal <- mat_normal/thresh
mat_tumor <- mat_tumor/thresh
mat_normal <- round((mat_normal + 1)/2*49)+1
mat_tumor <- round((mat_tumor+1)/2*49)+1
col_normal <- bluered(50)[min(mat_normal):max(mat_normal)]
col_tumor <- bluered(50)[min(mat_normal):max(mat_normal)]
h <- seq(-0.5,8.5)[4]/8
col_dataset_normal <- array(1,ncol(mat_normal)) + as.numeric(grepl("Lambrechts",colnames(mat_normal)))
col_dataset_tumor <- array(1,ncol(mat_tumor)) + as.numeric(grepl("Lambrechts",colnames(mat_tumor)))+2*as.numeric(grepl("zilionis",colnames(mat_tumor)))
png(file.path("output/figures/figure_5b.png"),height=2.7,width=4.76,pointsize=5,res=300,units="in",bg="transparent")
par(oma=c(5,5,5,5))
layout(matrix(1:6,nrow=2,ncol=3,byrow = T),widths = c(10,10,3),heights=c(6,2.5))
image(t(mat_normal),col=col_normal,xaxt="n",yaxt="n")
mtext(rownames(mat_normal),side=2,at=seq(0,1,1/(nrow(mat_normal)-1)),las=2,line=0.25)
mtext(lapply(strsplit(colnames(mat_normal)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_normal)-1)),las=2,line=0.25,cex=0.7)
mtext("nLung samples",cex=2,line=0.5)
abline(h=h,col="green",lwd=2)
box()
segments(y0=c(-0.5,2.5,8.5)/8,y1=c(-0.5,2.5,8.5)/8,x0=23.5/23,x1=1.25,xpd=NA,lty=2)
image(t(mat_tumor),col=col_tumor,xaxt="n",yaxt="n")
mtext(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_tumor)-1)),las=2,line=0.25,cex=0.7)
mtext("Tumor samples",cex=2,line=0.5)
abline(h=h,col="green",lwd=2)
box()
#par(pin=c(0.125,0.5))
image(t(1:100),col=bluered(100),xaxt="n",yaxt="n"); box()
mtext("Norm. frequency",line=0.5)
mtext(side=4,at=c(0,1),paste(c("< -",">"),thresh,sep=""),line=0.25,las=2)
par(pin=c())
image(as.matrix(col_dataset_normal),col=c(1,2),xaxt="n",yaxt="n")
abline(v=seq(-.5/length(col_dataset_normal),1+.5/(length(col_dataset_normal)+1),(1+2*.5/length(col_dataset_normal))/length(col_dataset_normal)),col="white",lwd=0.2)
box()
image(as.matrix(col_dataset_tumor),col=c(1,2,3),xaxt="n",yaxt="n")
abline(v=seq(-.5/length(col_dataset_tumor),1+.5/(length(col_dataset_tumor)+1),(1+2*.5/length(col_dataset_tumor))/length(col_dataset_tumor)),col="white",lwd=0.2)
box()
dev.off()
### show all clusters in supp:
tab_tumor <- tab[grep("Tumor",rownames(tab)),]
tab_normal <- tab[grep("Normal",rownames(tab)),]
mat_tumor <- t(tab_tumor[pat_ord_tumor,rev(clust_ord)])
norm_ord <- match(unlist(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]})),
unlist(lapply(strsplit(rownames(tab_normal)," "),function(x){x[1]})))
mat_normal <- t(tab_normal[norm_ord,rev(clust_ord)])
mat_normal <- mat_normal[,!is.na(colnames(mat_normal))]
clust.means <- rowMeans(cbind(mat_normal,mat_tumor),na.rm=T)
mat_normal <- log2((1e-2+mat_normal)/(1e-2+clust.means))
mat_tumor <- log2((1e-2+mat_tumor)/(1e-2+clust.means))
thresh <- 2
mat_normal[mat_normal < -thresh] <- -thresh
mat_normal[mat_normal > thresh] <- thresh
mat_tumor[mat_tumor < -thresh] <- -thresh
mat_tumor[mat_tumor > thresh] <- thresh
mat_normal <- mat_normal/thresh
mat_tumor <- mat_tumor/thresh
mat_normal <- round((mat_normal + 1)/2*49)+1
mat_tumor <- round((mat_tumor+1)/2*49)+1
mat_normal[is.na(mat_normal)] <- 25
mat_tumor[is.na(mat_tumor)] <- 25
col_normal <- bluered(50)[min(mat_normal):max(mat_normal)]
col_tumor <- bluered(50)[min(mat_normal):max(mat_normal)]
h <- seq(-0.5,8.5)[4]/8
plot_me <- function(){
par(oma=c(5,5,5,5))
layout(matrix(1:3,nrow=1),widths = c(10,10,3))
image(t(mat_normal),col=col_normal,xaxt="n",yaxt="n")
mtext(rownames(mat_normal),side=2,at=seq(0,1,1/(nrow(mat_normal)-1)),las=2,line=0.25,cex=0.5)
mtext(lapply(strsplit(colnames(mat_normal)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_normal)-1)),las=2,line=0.25,cex=0.6)
mtext("nLung samples",cex=2,line=0.5)
box()
image(t(mat_tumor),col=col_tumor,xaxt="n",yaxt="n")
mtext(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_tumor)-1)),las=2,line=0.25,cex=0.6)
mtext("Tumor samples",cex=2,line=0.5)
box()
par(pin=c(0.125,0.5))
image(t(1:100),col=bluered(100),xaxt="n",yaxt="n"); box()
mtext("Norm. frequency",line=0.5)
mtext(side=4,at=c(0,1),paste(c("< -",">"),thresh,sep=""),line=0.25,las=2)
}
png(file.path("output/figures/figure_s5a.png"),height=2,width=4.76,pointsize=5,res=500,units="in",bg="transparent")
plot_me()
dev.off()
# Plot heatmap by lineage
lin_ord <- rev(c("NK","T","MNP","pDC","B&plasma","mast"))
tab <- tab_raw
lin_tab <- matrix(NA,nrow=nrow(tab),ncol=length(lin_ord),dimnames=list(rownames(tab),lin_ord))
for(lin in lin_ord){
lin_tab[,lin] <- rowSums(tab[,annots_list$lineage[match(colnames(tab),annots_list$sub_lineage)]==lin,drop=F])
}
tab <- lin_tab
tab_tumor <- tab[grep("Tumor",rownames(tab)),]
tab_normal <- tab[grep("Normal",rownames(tab)),]
mat_tumor <- t(tab_tumor[pat_ord_tumor,])
norm_ord <- match(unlist(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]})),
unlist(lapply(strsplit(rownames(tab_normal)," "),function(x){x[1]})))
mat_normal <- t(tab_normal[norm_ord,])
mat_normal <- mat_normal[,!is.na(colnames(mat_normal))]
clust.means <- rowMeans(cbind(mat_normal,mat_tumor),na.rm=T)
mat_normal <- log2((1e-2+mat_normal)/(1e-2+clust.means))
mat_tumor <- log2((1e-2+mat_tumor)/(1e-2+clust.means))
thresh <- 2
mat_normal[mat_normal < -thresh] <- -thresh
mat_normal[mat_normal > thresh] <- thresh
mat_tumor[mat_tumor < -thresh] <- -thresh
mat_tumor[mat_tumor > thresh] <- thresh
mat_normal <- mat_normal/thresh
mat_tumor <- mat_tumor/thresh
mat_normal <- round((mat_normal + 1)/2*49)+1
mat_tumor <- round((mat_tumor+1)/2*49)+1
mat_normal[is.na(mat_normal)] <- 25
mat_tumor[is.na(mat_tumor)] <- 25
col_normal <- bluered(50)[min(mat_normal):max(mat_normal)]
col_tumor <- bluered(50)[min(mat_normal):max(mat_normal)]
h <- seq(-0.5,8.5)[4]/8
plot_me <- function(){
par(oma=c(5,5,5,5))
layout(matrix(1:3,nrow=2),widths = c(10,10,3))
image(t(mat_normal),col=col_normal,xaxt="n",yaxt="n")
mtext(rownames(mat_normal),side=2,at=seq(0,1,1/(nrow(mat_normal)-1)),las=2,line=0.25,cex=1)
mtext(lapply(strsplit(colnames(mat_normal)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_normal)-1)),las=2,line=0.25,cex=0.6)
#mtext("nLung samples",cex=2,line=0.5)
box()
image(t(mat_tumor),col=col_tumor,xaxt="n",yaxt="n")
mtext(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_tumor)-1)),las=2,line=0.25,cex=0.6)
#mtext("Tumor samples",cex=2,line=0.5)
box()
par(pin=c(0.125,0.5))
image(t(1:100),col=bluered(100),xaxt="n",yaxt="n"); box()
mtext("Norm. frequency",line=0.5)
mtext(side=4,at=c(0,1),paste(c("< -",">"),thresh,sep=""),line=0.25,las=2)
}
png("output/figures/figure_5c.png",height=1.5,width=4.76,pointsize=5,res=500,units="in",bg="transparent")
plot_me()
dev.off()
} | /figure_scripts/figure_5abcd_s5a.R | no_license | leaderam/Leader_CITEseq_scripts | R | false | false | 12,593 | r |
# Figure 5 part 1: correlation of normalized celltypes across patients,
# heatmaps of patient frequencies ordered by LCAM score
library(matrixStats)
library(seriation)
library(RColorBrewer)
figure_5a <- function(){
load("data/lung_ldm.rd")
sample_annots <- read.csv("tables/table_s2.csv",r=1,h=1,stringsAsFactors = F)
annots_list <- read.csv("tables/annots_list.csv",r=1,h=1,stringsAsFactors = F)
annots_list$norm_group[annots_list$lineage=="MNP"] <- "MNP"
## This part of the code figures out the right cluster order, and plots the correlation matrix, using only the V2 beads
#####
cell_mask <- names(lung_ldm$dataset$cell_to_sample)[
sample_annots[lung_ldm$dataset$cell_to_sample,"library_chemistry"]=="V2" &
sample_annots[lung_ldm$dataset$cell_to_sample,"prep"]=="beads" &
sample_annots[lung_ldm$dataset$cell_to_sample,"tissue"]=="Tumor"
]
tab <- table(sample_annots[lung_ldm$dataset$cell_to_sample[cell_mask],"patient_ID"],
annots_list[lung_ldm$dataset$cell_to_cluster[cell_mask],"sub_lineage"])
tab <- tab[,-1]
tab <- tab/rowSums(tab)
for(norm_group in c("T","B&plasma","MNP","lin_neg")){
tab_tmp <- tab[,annots_list$norm_group[match(colnames(tab),annots_list$sub_lineage)]==norm_group]
tab_tmp <- tab_tmp / rowSums(tab_tmp)
tab[,colnames(tab_tmp)] <- tab_tmp
}
clust_cor <- cor(log10(1e-2+tab),method="spearman")
LCAM <- c("T_activated","IgG","MoMac-II")
LCAM_score <- rowSums(log(tab[,LCAM]+1e-2))
#clust_ord <- order(cor(tab,LCAM_score[rownames(tab)],method="spearman")[,1])
clust_ord <- get_order(seriate(cor(tab,method="spearman")),method="OLO")
mat <- clust_cor[clust_ord,clust_ord]
thresh <- 0.7
mat[mat > thresh] <- thresh
mat[mat < -thresh] <- -thresh
mat <- mat/thresh
mat <- round((mat+1)/2*49)+1
#diag(mat) <- max(mat)+1
mat <- mat[,ncol(mat):1]
pal_cor <- colorRampPalette(rev(brewer.pal(11,"PiYG")))
col <- c(pal_cor(50)[min(mat):max(mat)])
#plotting 5a
png(file.path("output/figures/figure_5a.png"),height=3,width=3.5,pointsize=5,res=300,units="in",bg="transparent")
layout(matrix(1:2,nrow=1),widths=c(10,2))
par(pin=c(1.5,1.5),mar=c(2,2,2,2),oma=c(5,5,5,5))
image(mat,col=col,xaxt="n",yaxt="n")
box()
mtext(side=1,at=seq(0,1,1/(nrow(mat)-1)),rownames(mat),las=2,line=0.25)
mtext(side=2,at=seq(0,1,1/(nrow(mat)-1)),rev(rownames(mat)),las=2,line=0.25)
mtext("Celltype frequency spearman cor.",line=0.5,cex=2)
par(pin=c(0.125,1))
image(t(1:100),col=pal_cor(100),xaxt="n",yaxt="n")
mtext(paste(c("<",">"),thresh),at=c(0,1),side=4,las=2,line=0.5)
box()
dev.off()
LCAM_score <- rowSums(log10(tab[,LCAM]+1e-2))
resting_clusts <- c("B","AM","cDC2","AZU1_mac","Tcm/naive_II","cDC1")
resting_score <- rowSums(log10(tab[,resting_clusts]+1e-2))
tumor_names <- names(LCAM_score)[grepl("Tumor",names(LCAM_score))]
col <- 1+as.numeric(grepl("Lambrechts",tumor_names))+2*as.numeric(grepl("zilionis",tumor_names))
png("output/figures/figure_5d.png",height=2,width=1.84,units="in",res=300,pointsize=5)
par(mar=c(5,5,5,1))
plot(10^LCAM_score[grepl("Tumor",names(LCAM_score))],10^resting_score[grepl("Tumor",names(LCAM_score))],log="xy",
col=col,pch=16,cex=1.5,xaxt="n",yaxt="n",xlab="",ylab="")
mtext("LCAMhi vs. LCAMlo scores",cex=1.7,line=0.1)
legend(c("Dataset:","Mt. Sinai","Lambrechts","Zilionis"),x="bottomleft",pch=16,col=c(0,1:3))
mtext(side=1,"LCAMhi score",line=3,cex=1.5)
mtext(side=2,"LCAMlo score",line=3,cex=1.5)
axis(side=1,at=c(1e-5,1e-3,1e-1))
axis(side=2,at=c(1e-9,1e-7,1e-5))
dev.off()
###### Figure 5B, C, D and S5A
tab <- table(apply(sample_annots[lung_ldm$dataset$cell_to_sample,c("patient_ID","tissue")],1,paste,collapse=" "),
annots_list[lung_ldm$dataset$cell_to_cluster,"sub_lineage"])
rm("lung_ldm")
load("data/lambrechts_ldm_200519.rd")
tab <- rbind(tab,table(apply(sample_annots[lambrechts_ldm$dataset$cell_to_sample,c("patient_ID","tissue")],1,paste,collapse=" "),
annots_list[lambrechts_ldm$dataset$cell_to_cluster,"sub_lineage"]))
rm("lambrechts_ldm")
load("data/zilionis_ldm.rd")
tab <- rbind(tab,table(apply(sample_annots[lung_ldm_zili$dataset$cell_to_sample,c("patient_ID","tissue")],1,paste,collapse=" "),
annots_list[lung_ldm_zili$dataset$cell_to_cluster,"sub_lineage"]))
rm("lung_ldm_zili")
tab <- tab[,-1]
tab <- tab/rowSums(tab)
tab_raw <- tab
for(norm_group in c("T","B&plasma","MNP","lin_neg")){
tab_tmp <- tab[,annots_list$norm_group[match(colnames(tab),annots_list$sub_lineage)]==norm_group]
tab_tmp <- tab_tmp / rowSums(tab_tmp)
tab[,colnames(tab_tmp)] <- tab_tmp
}
LCAM_score <- rowSums(log(tab[,LCAM]+1e-2))
resting_clusts <- c("B","AM","cDC2","AZU1_mac","Tcm/naive_II","cDC1")
resting_score <- rowSums(log(tab[,resting_clusts]+1e-2))
pat_ord_tumor <- order((LCAM_score-resting_score)[grep("Tumor",rownames(tab),v=T)])
pat_ord_normal <- order((LCAM_score-resting_score)[grep("Normal",rownames(tab),v=T)])
# plot highlighted clusters
tab_tumor <- tab[grep("Tumor",rownames(tab)),]
tab_normal <- tab[grep("Normal",rownames(tab)),]
mat_tumor <- t(tab_tumor[pat_ord_tumor,rev(c(resting_clusts,LCAM))])
norm_ord <- match(unlist(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]})),
unlist(lapply(strsplit(rownames(tab_normal)," "),function(x){x[1]})))
mat_normal <- t(tab_normal[norm_ord,rev(c(resting_clusts,LCAM))])
mat_normal <- mat_normal[,!is.na(colnames(mat_normal))]
clust.means <- rowMeans(cbind(mat_normal,mat_tumor))
mat_normal <- log2((1e-2+mat_normal)/(1e-2+clust.means))
mat_tumor <- log2((1e-2+mat_tumor)/(1e-2+clust.means))
thresh <- 2
mat_normal[mat_normal < -thresh] <- -thresh
mat_normal[mat_normal > thresh] <- thresh
mat_tumor[mat_tumor < -thresh] <- -thresh
mat_tumor[mat_tumor > thresh] <- thresh
mat_normal <- mat_normal/thresh
mat_tumor <- mat_tumor/thresh
mat_normal <- round((mat_normal + 1)/2*49)+1
mat_tumor <- round((mat_tumor+1)/2*49)+1
col_normal <- bluered(50)[min(mat_normal):max(mat_normal)]
col_tumor <- bluered(50)[min(mat_normal):max(mat_normal)]
h <- seq(-0.5,8.5)[4]/8
col_dataset_normal <- array(1,ncol(mat_normal)) + as.numeric(grepl("Lambrechts",colnames(mat_normal)))
col_dataset_tumor <- array(1,ncol(mat_tumor)) + as.numeric(grepl("Lambrechts",colnames(mat_tumor)))+2*as.numeric(grepl("zilionis",colnames(mat_tumor)))
png(file.path("output/figures/figure_5b.png"),height=2.7,width=4.76,pointsize=5,res=300,units="in",bg="transparent")
par(oma=c(5,5,5,5))
layout(matrix(1:6,nrow=2,ncol=3,byrow = T),widths = c(10,10,3),heights=c(6,2.5))
image(t(mat_normal),col=col_normal,xaxt="n",yaxt="n")
mtext(rownames(mat_normal),side=2,at=seq(0,1,1/(nrow(mat_normal)-1)),las=2,line=0.25)
mtext(lapply(strsplit(colnames(mat_normal)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_normal)-1)),las=2,line=0.25,cex=0.7)
mtext("nLung samples",cex=2,line=0.5)
abline(h=h,col="green",lwd=2)
box()
segments(y0=c(-0.5,2.5,8.5)/8,y1=c(-0.5,2.5,8.5)/8,x0=23.5/23,x1=1.25,xpd=NA,lty=2)
image(t(mat_tumor),col=col_tumor,xaxt="n",yaxt="n")
mtext(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_tumor)-1)),las=2,line=0.25,cex=0.7)
mtext("Tumor samples",cex=2,line=0.5)
abline(h=h,col="green",lwd=2)
box()
#par(pin=c(0.125,0.5))
image(t(1:100),col=bluered(100),xaxt="n",yaxt="n"); box()
mtext("Norm. frequency",line=0.5)
mtext(side=4,at=c(0,1),paste(c("< -",">"),thresh,sep=""),line=0.25,las=2)
par(pin=c())
image(as.matrix(col_dataset_normal),col=c(1,2),xaxt="n",yaxt="n")
abline(v=seq(-.5/length(col_dataset_normal),1+.5/(length(col_dataset_normal)+1),(1+2*.5/length(col_dataset_normal))/length(col_dataset_normal)),col="white",lwd=0.2)
box()
image(as.matrix(col_dataset_tumor),col=c(1,2,3),xaxt="n",yaxt="n")
abline(v=seq(-.5/length(col_dataset_tumor),1+.5/(length(col_dataset_tumor)+1),(1+2*.5/length(col_dataset_tumor))/length(col_dataset_tumor)),col="white",lwd=0.2)
box()
dev.off()
### show all clusters in supp:
tab_tumor <- tab[grep("Tumor",rownames(tab)),]
tab_normal <- tab[grep("Normal",rownames(tab)),]
mat_tumor <- t(tab_tumor[pat_ord_tumor,rev(clust_ord)])
norm_ord <- match(unlist(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]})),
unlist(lapply(strsplit(rownames(tab_normal)," "),function(x){x[1]})))
mat_normal <- t(tab_normal[norm_ord,rev(clust_ord)])
mat_normal <- mat_normal[,!is.na(colnames(mat_normal))]
clust.means <- rowMeans(cbind(mat_normal,mat_tumor),na.rm=T)
mat_normal <- log2((1e-2+mat_normal)/(1e-2+clust.means))
mat_tumor <- log2((1e-2+mat_tumor)/(1e-2+clust.means))
thresh <- 2
mat_normal[mat_normal < -thresh] <- -thresh
mat_normal[mat_normal > thresh] <- thresh
mat_tumor[mat_tumor < -thresh] <- -thresh
mat_tumor[mat_tumor > thresh] <- thresh
mat_normal <- mat_normal/thresh
mat_tumor <- mat_tumor/thresh
mat_normal <- round((mat_normal + 1)/2*49)+1
mat_tumor <- round((mat_tumor+1)/2*49)+1
mat_normal[is.na(mat_normal)] <- 25
mat_tumor[is.na(mat_tumor)] <- 25
col_normal <- bluered(50)[min(mat_normal):max(mat_normal)]
col_tumor <- bluered(50)[min(mat_normal):max(mat_normal)]
h <- seq(-0.5,8.5)[4]/8
plot_me <- function(){
par(oma=c(5,5,5,5))
layout(matrix(1:3,nrow=1),widths = c(10,10,3))
image(t(mat_normal),col=col_normal,xaxt="n",yaxt="n")
mtext(rownames(mat_normal),side=2,at=seq(0,1,1/(nrow(mat_normal)-1)),las=2,line=0.25,cex=0.5)
mtext(lapply(strsplit(colnames(mat_normal)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_normal)-1)),las=2,line=0.25,cex=0.6)
mtext("nLung samples",cex=2,line=0.5)
box()
image(t(mat_tumor),col=col_tumor,xaxt="n",yaxt="n")
mtext(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_tumor)-1)),las=2,line=0.25,cex=0.6)
mtext("Tumor samples",cex=2,line=0.5)
box()
par(pin=c(0.125,0.5))
image(t(1:100),col=bluered(100),xaxt="n",yaxt="n"); box()
mtext("Norm. frequency",line=0.5)
mtext(side=4,at=c(0,1),paste(c("< -",">"),thresh,sep=""),line=0.25,las=2)
}
png(file.path("output/figures/figure_s5a.png"),height=2,width=4.76,pointsize=5,res=500,units="in",bg="transparent")
plot_me()
dev.off()
# Plot heatmap by lineage
lin_ord <- rev(c("NK","T","MNP","pDC","B&plasma","mast"))
tab <- tab_raw
lin_tab <- matrix(NA,nrow=nrow(tab),ncol=length(lin_ord),dimnames=list(rownames(tab),lin_ord))
for(lin in lin_ord){
lin_tab[,lin] <- rowSums(tab[,annots_list$lineage[match(colnames(tab),annots_list$sub_lineage)]==lin,drop=F])
}
tab <- lin_tab
tab_tumor <- tab[grep("Tumor",rownames(tab)),]
tab_normal <- tab[grep("Normal",rownames(tab)),]
mat_tumor <- t(tab_tumor[pat_ord_tumor,])
norm_ord <- match(unlist(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]})),
unlist(lapply(strsplit(rownames(tab_normal)," "),function(x){x[1]})))
mat_normal <- t(tab_normal[norm_ord,])
mat_normal <- mat_normal[,!is.na(colnames(mat_normal))]
clust.means <- rowMeans(cbind(mat_normal,mat_tumor),na.rm=T)
mat_normal <- log2((1e-2+mat_normal)/(1e-2+clust.means))
mat_tumor <- log2((1e-2+mat_tumor)/(1e-2+clust.means))
thresh <- 2
mat_normal[mat_normal < -thresh] <- -thresh
mat_normal[mat_normal > thresh] <- thresh
mat_tumor[mat_tumor < -thresh] <- -thresh
mat_tumor[mat_tumor > thresh] <- thresh
mat_normal <- mat_normal/thresh
mat_tumor <- mat_tumor/thresh
mat_normal <- round((mat_normal + 1)/2*49)+1
mat_tumor <- round((mat_tumor+1)/2*49)+1
mat_normal[is.na(mat_normal)] <- 25
mat_tumor[is.na(mat_tumor)] <- 25
col_normal <- bluered(50)[min(mat_normal):max(mat_normal)]
col_tumor <- bluered(50)[min(mat_normal):max(mat_normal)]
h <- seq(-0.5,8.5)[4]/8
plot_me <- function(){
par(oma=c(5,5,5,5))
layout(matrix(1:3,nrow=2),widths = c(10,10,3))
image(t(mat_normal),col=col_normal,xaxt="n",yaxt="n")
mtext(rownames(mat_normal),side=2,at=seq(0,1,1/(nrow(mat_normal)-1)),las=2,line=0.25,cex=1)
mtext(lapply(strsplit(colnames(mat_normal)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_normal)-1)),las=2,line=0.25,cex=0.6)
#mtext("nLung samples",cex=2,line=0.5)
box()
image(t(mat_tumor),col=col_tumor,xaxt="n",yaxt="n")
mtext(lapply(strsplit(colnames(mat_tumor)," "),function(x){x[1]}),side=1,at=seq(0,1,1/(ncol(mat_tumor)-1)),las=2,line=0.25,cex=0.6)
#mtext("Tumor samples",cex=2,line=0.5)
box()
par(pin=c(0.125,0.5))
image(t(1:100),col=bluered(100),xaxt="n",yaxt="n"); box()
mtext("Norm. frequency",line=0.5)
mtext(side=4,at=c(0,1),paste(c("< -",">"),thresh,sep=""),line=0.25,las=2)
}
png("output/figures/figure_5c.png",height=1.5,width=4.76,pointsize=5,res=500,units="in",bg="transparent")
plot_me()
dev.off()
} |
context("factorisation tests")
test_that("factor_aweek will reject non-aweek objects", {
expect_error(factor_aweek("2018-W10-1"), "x must be an 'aweek' object")
})
test_that("factor_aweek accounts for edge weeks", {
w1 <- get_aweek(c(8, 11), year = 2019, day = c(7, 1))
w2 <- get_aweek(c(8, 11), year = 2019, day = c(1, 7))
f1 <- factor_aweek(w1)
f2 <- factor_aweek(w2)
expect_identical(levels(f1), c("2019-W08", "2019-W09", "2019-W10", "2019-W11"))
expect_identical(levels(f2), c("2019-W08", "2019-W09", "2019-W10", "2019-W11"))
})
test_that("factor_aweek accounts for edge days across years", {
w3 <- get_aweek(c(53, 02), year = 2015:2016, day = c(7, 1))
w4 <- get_aweek(c(53, 02), year = 2015:2016, day = c(1, 7))
f3 <- factor_aweek(w3)
f4 <- factor_aweek(w4)
expect_identical(levels(f3), c("2015-W53", "2016-W01", "2016-W02"))
expect_identical(levels(f4), c("2015-W53", "2016-W01", "2016-W02"))
})
| /tests/testthat/test-factor_aweek.R | permissive | reconhub/aweek | R | false | false | 941 | r | context("factorisation tests")
test_that("factor_aweek will reject non-aweek objects", {
expect_error(factor_aweek("2018-W10-1"), "x must be an 'aweek' object")
})
test_that("factor_aweek accounts for edge weeks", {
w1 <- get_aweek(c(8, 11), year = 2019, day = c(7, 1))
w2 <- get_aweek(c(8, 11), year = 2019, day = c(1, 7))
f1 <- factor_aweek(w1)
f2 <- factor_aweek(w2)
expect_identical(levels(f1), c("2019-W08", "2019-W09", "2019-W10", "2019-W11"))
expect_identical(levels(f2), c("2019-W08", "2019-W09", "2019-W10", "2019-W11"))
})
test_that("factor_aweek accounts for edge days across years", {
w3 <- get_aweek(c(53, 02), year = 2015:2016, day = c(7, 1))
w4 <- get_aweek(c(53, 02), year = 2015:2016, day = c(1, 7))
f3 <- factor_aweek(w3)
f4 <- factor_aweek(w4)
expect_identical(levels(f3), c("2015-W53", "2016-W01", "2016-W02"))
expect_identical(levels(f4), c("2015-W53", "2016-W01", "2016-W02"))
})
|
### process_WEF.R:
### Do not run stand-alone - source from main tr_data_prep.Rmd for TourismRecreation.
###
### reformat and add rgn_ids to World Economic Forum (WEF) data
###
### Provenance:
### Jun2015 Casey O'Hara - updated for 2015, removed gapfilling, set up for .csv instead of .pdf
### Mar2014 JStewartLowndes; updated from 'clean_WEF.R' by JStewart in May 2013
### May2013 'clean_WEF.R' by JStewart
###
### Data:
### TTCI: Travel and Tourism competitiveness:
### * download .xlsx: http://www3.weforum.org/docs/TT15/WEF_TTCR_Dataset_2015.xlsx
### * note: only 2015 is represented here.
### * read report online: http://reports.weforum.org/travel-and-tourism-competitiveness-report-2015/
### * table 1: http://reports.weforum.org/travel-and-tourism-competitiveness-report-2015/
### index-results-the-travel-tourism-competitiveness-index-ranking-2015/
###
### GCI: Global Competitiveness (not used in 2015 for TR goal; see data_prep_GCI.R in globalprep/WEF-Economics)
### * download .xlsx: http://www3.weforum.org/docs/GCR2014-15/GCI_Dataset_2006-07-2014-15.xlsx
### * note: contains data for each year from 2006/2007 to 2014/2015
### * read report: http://reports.weforum.org/global-competitiveness-report-2014-2015/
### * table 3 in this .pdf: http://reports.weforum.org/global-competitiveness-report-2014-2015/
### wp-content/blogs.dir/54/mp/files/pages/files/tables3-7-wef-globalcompetitivenessreport-2014-15-2.pdf
###
### read in individual files
### call name_to_rgn() from ohicore
##############################################################################=
### WEF TTCI formatting ----
##############################################################################=
# read in files
ttci_raw <- read.csv(dir_wef,
skip = 3, check.names = FALSE, stringsAsFactors = FALSE)
### NOTE: check.names = FALSE because of Cote d'Ivoire has an accent circonflex over the 'o' (probably other issues in there too)
ttci <- ttci_raw[ , names(ttci_raw) != '']
### first row is index scores for 2015.
### After column 150, a bunch of unnamed columns that throw errors
ttci <- ttci %>%
filter(Series == "Global Competitiveness Index") %>%
filter(Attribute == "Value") %>%
select(-(1:2), -(4:8), year = Edition) %>%
gather(country, value, -year) %>%
mutate(score = as.numeric(value)) %>%
select(year, country, score)
ttci <- ttci %>%
mutate(country = as.character(country)) %>%
mutate(country = ifelse(country == "Congo, Democratic Rep.", "Democratic Republic of the Congo", country)) %>%
mutate(country = ifelse(country == "Côte d'Ivoire", "Ivory Coast", country))
ttci_rgn <- name_2_rgn(df_in = ttci,
fld_name='country',
flds_unique=c('country','year'))
ttci_rgn <- ttci_rgn %>%
arrange(country, year) %>%
select(rgn_id, rgn_name, year, score)
head(ttci_rgn, 10)
# rgn_id year score rgn_name
# 1 14 2015 4.35 Taiwan
# 2 15 2015 3.63 Philippines
# 3 16 2015 4.98 Australia
# 4 20 2015 4.37 South Korea
# 5 24 2015 3.24 Cambodia
# 6 25 2015 4.26 Thailand
# 7 31 2015 4.00 Seychelles
# 8 37 2015 3.90 Mauritius
# 9 40 2015 3.80 Sri Lanka
# 10 41 2015 2.81 Mozambique
### Save TTCI data file
write_csv(ttci_rgn, 'intermediate/wef_ttci.csv')
| /globalprep/tr/v2018/R/process_WEF.R | no_license | OHI-Science/ohiprep_v2018 | R | false | false | 3,473 | r | ### process_WEF.R:
### Do not run stand-alone - source from main tr_data_prep.Rmd for TourismRecreation.
###
### reformat and add rgn_ids to World Economic Forum (WEF) data
###
### Provenance:
### Jun2015 Casey O'Hara - updated for 2015, removed gapfilling, set up for .csv instead of .pdf
### Mar2014 JStewartLowndes; updated from 'clean_WEF.R' by JStewart in May 2013
### May2013 'clean_WEF.R' by JStewart
###
### Data:
### TTCI: Travel and Tourism competitiveness:
### * download .xlsx: http://www3.weforum.org/docs/TT15/WEF_TTCR_Dataset_2015.xlsx
### * note: only 2015 is represented here.
### * read report online: http://reports.weforum.org/travel-and-tourism-competitiveness-report-2015/
### * table 1: http://reports.weforum.org/travel-and-tourism-competitiveness-report-2015/
### index-results-the-travel-tourism-competitiveness-index-ranking-2015/
###
### GCI: Global Competitiveness (not used in 2015 for TR goal; see data_prep_GCI.R in globalprep/WEF-Economics)
### * download .xlsx: http://www3.weforum.org/docs/GCR2014-15/GCI_Dataset_2006-07-2014-15.xlsx
### * note: contains data for each year from 2006/2007 to 2014/2015
### * read report: http://reports.weforum.org/global-competitiveness-report-2014-2015/
### * table 3 in this .pdf: http://reports.weforum.org/global-competitiveness-report-2014-2015/
### wp-content/blogs.dir/54/mp/files/pages/files/tables3-7-wef-globalcompetitivenessreport-2014-15-2.pdf
###
### read in individual files
### call name_to_rgn() from ohicore
##############################################################################=
### WEF TTCI formatting ----
##############################################################################=
# read in files
ttci_raw <- read.csv(dir_wef,
skip = 3, check.names = FALSE, stringsAsFactors = FALSE)
### NOTE: check.names = FALSE because of Cote d'Ivoire has an accent circonflex over the 'o' (probably other issues in there too)
ttci <- ttci_raw[ , names(ttci_raw) != '']
### first row is index scores for 2015.
### After column 150, a bunch of unnamed columns that throw errors
ttci <- ttci %>%
filter(Series == "Global Competitiveness Index") %>%
filter(Attribute == "Value") %>%
select(-(1:2), -(4:8), year = Edition) %>%
gather(country, value, -year) %>%
mutate(score = as.numeric(value)) %>%
select(year, country, score)
ttci <- ttci %>%
mutate(country = as.character(country)) %>%
mutate(country = ifelse(country == "Congo, Democratic Rep.", "Democratic Republic of the Congo", country)) %>%
mutate(country = ifelse(country == "Côte d'Ivoire", "Ivory Coast", country))
ttci_rgn <- name_2_rgn(df_in = ttci,
fld_name='country',
flds_unique=c('country','year'))
ttci_rgn <- ttci_rgn %>%
arrange(country, year) %>%
select(rgn_id, rgn_name, year, score)
head(ttci_rgn, 10)
# rgn_id year score rgn_name
# 1 14 2015 4.35 Taiwan
# 2 15 2015 3.63 Philippines
# 3 16 2015 4.98 Australia
# 4 20 2015 4.37 South Korea
# 5 24 2015 3.24 Cambodia
# 6 25 2015 4.26 Thailand
# 7 31 2015 4.00 Seychelles
# 8 37 2015 3.90 Mauritius
# 9 40 2015 3.80 Sri Lanka
# 10 41 2015 2.81 Mozambique
### Save TTCI data file
write_csv(ttci_rgn, 'intermediate/wef_ttci.csv')
|
inputDir="local/TAAs/"
## Information about the number of artefacts detected in TAAs regions for FFPE/FF pairs
## Note: in the study we focuse on C>T and G>A artefacts
files <- list.files(path = inputDir, pattern = NULL, all.files = FALSE,
full.names = FALSE, recursive = FALSE,
ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
subst <- c("C>T","G>A","C>A","G>T","C>G","G>C","A>C","T>G","A>G","T>C","A>T","T>A")
listOfResults <- list()
for (k in 1:length(files)) {
gene <- files[k]
# read two times the vcf file, first for the columns names, second for the data
tmp_vcf<-readLines(paste(inputDir,files[k],sep = ""))
tmp_vcf_data<-read.table(paste(inputDir,files[k],sep = ""), stringsAsFactors = FALSE)
# filter for the columns names
tmp_vcf<-tmp_vcf[-(grep("#CHROM",tmp_vcf)+1):-(length(tmp_vcf))]
vcf_names<-unlist(strsplit(tmp_vcf[length(tmp_vcf)],"\t"))
names(tmp_vcf_data)<-vcf_names
tmp_vcf_data <- tmp_vcf_data[which(tmp_vcf_data$FILTER=="PASS"),]
for (i in 1:nrow(tmp_vcf_data)) {
for (j in 10:ncol(tmp_vcf_data)) {
cell <- tmp_vcf_data[i,j]
Genotype <- unlist(strsplit(cell,split = ":"))[1]
tmp_vcf_data[i,j] <- Genotype
}
}
n <- ncol(tmp_vcf_data)-9
result <- matrix(0, nrow = length(subst), ncol = n)
colnames(result) <- colnames(tmp_vcf_data)[10:ncol(tmp_vcf_data)]
rownames(result)<-subst
for (i in 1:n) {
for (j in 1:length(subst)) {
R<- unlist(strsplit(subst[j],split = ">"))[1]
A<- unlist(strsplit(subst[j],split = ">"))[2]
l<-length(which(tmp_vcf_data$REF==R
& tmp_vcf_data$ALT==A
& tmp_vcf_data[,i+9] != "0/0"
& tmp_vcf_data[,i+9] != "./."))
result[j,i]<-l
}
}
gene <- strsplit(unlist(gene),split = ".",fixed = T)[[1]][1]
listOfResults[[gene]] <- result
# Use View() to see a particular table from the list
# write.csv(result, paste("Local_",gene,".csv",sep = ""))
}
| /SNVs_calling/SNVs_local.R | no_license | disolis/suitability_of_FFPE_RNAseq_for_TAA_identification | R | false | false | 2,086 | r | inputDir="local/TAAs/"
## Information about the number of artefacts detected in TAAs regions for FFPE/FF pairs
## Note: in the study we focuse on C>T and G>A artefacts
files <- list.files(path = inputDir, pattern = NULL, all.files = FALSE,
full.names = FALSE, recursive = FALSE,
ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
subst <- c("C>T","G>A","C>A","G>T","C>G","G>C","A>C","T>G","A>G","T>C","A>T","T>A")
listOfResults <- list()
for (k in 1:length(files)) {
gene <- files[k]
# read two times the vcf file, first for the columns names, second for the data
tmp_vcf<-readLines(paste(inputDir,files[k],sep = ""))
tmp_vcf_data<-read.table(paste(inputDir,files[k],sep = ""), stringsAsFactors = FALSE)
# filter for the columns names
tmp_vcf<-tmp_vcf[-(grep("#CHROM",tmp_vcf)+1):-(length(tmp_vcf))]
vcf_names<-unlist(strsplit(tmp_vcf[length(tmp_vcf)],"\t"))
names(tmp_vcf_data)<-vcf_names
tmp_vcf_data <- tmp_vcf_data[which(tmp_vcf_data$FILTER=="PASS"),]
for (i in 1:nrow(tmp_vcf_data)) {
for (j in 10:ncol(tmp_vcf_data)) {
cell <- tmp_vcf_data[i,j]
Genotype <- unlist(strsplit(cell,split = ":"))[1]
tmp_vcf_data[i,j] <- Genotype
}
}
n <- ncol(tmp_vcf_data)-9
result <- matrix(0, nrow = length(subst), ncol = n)
colnames(result) <- colnames(tmp_vcf_data)[10:ncol(tmp_vcf_data)]
rownames(result)<-subst
for (i in 1:n) {
for (j in 1:length(subst)) {
R<- unlist(strsplit(subst[j],split = ">"))[1]
A<- unlist(strsplit(subst[j],split = ">"))[2]
l<-length(which(tmp_vcf_data$REF==R
& tmp_vcf_data$ALT==A
& tmp_vcf_data[,i+9] != "0/0"
& tmp_vcf_data[,i+9] != "./."))
result[j,i]<-l
}
}
gene <- strsplit(unlist(gene),split = ".",fixed = T)[[1]][1]
listOfResults[[gene]] <- result
# Use View() to see a particular table from the list
# write.csv(result, paste("Local_",gene,".csv",sep = ""))
}
|
library(ggmap)
library(ggplot2)
library(rjson)
library(jsonlite)
library(RCurl)
library(leaflet)
# Fetching All Durham Data
#1. Zoning Data
base_url1= "https://opendata.arcgis.com/datasets/3dbb7dea6cc14544ad302061809df597_12.geojson"
zoning<-st_read("https://opendata.arcgis.com/datasets/3dbb7dea6cc14544ad302061809df597_12.geojson")
plot(zoning["ZONE_CODE"])
ggplot() +
geom_sf(data = res) +
geom_sf(data=zoning, aes(fill=ZONE_CODE),alpha=I(0.5))
#2. TIER
#https://live-durhamnc.opendata.arcgis.com/datasets/development-tiers?geometry=-79.731%2C35.857%2C-77.997%2C36.245&selectedAttribute=TYPE
base_url2 = "https://opendata.arcgis.com/datasets/02e611b671f64310b7b2a420e67238c3_5.geojson"
dev_tiers <- st_read("https://opendata.arcgis.com/datasets/02e611b671f64310b7b2a420e67238c3_5.geojson")
plot(dev_tiers["TYPE"])
ggplot() +
geom_sf(data = res) +
geom_sf(data=dev_tiers, aes(fill=TYPE),alpha=I(0.5))+
geom_sf(data = tract, fill = NA, color = "red")
#notes:cn stands for The Compact Neighborhood Tier
#3. Building Permits
#https://durham.municipal.codes/UDO/4.1.1
#https://live-durhamnc.opendata.arcgis.com/datasets/all-building-permits-1/data?geometry=-79.725%2C35.858%2C-77.991%2C36.246
base_url3="https://opendata.arcgis.com/datasets/147b91c0ff5c4c03931e9b3580026065_12.geojson"
#4.Parcels
#https://live-durhamnc.opendata.arcgis.com/datasets/parcels
base_url4="https://opendata.arcgis.com/datasets/9cde87b4bac348faa1332997093654bb_0.geojson" | /Codes/Fetching Durham Data.R | permissive | nusharama/DurhamUpzoning | R | false | false | 1,468 | r | library(ggmap)
library(ggplot2)
library(rjson)
library(jsonlite)
library(RCurl)
library(leaflet)
# Fetching All Durham Data
#1. Zoning Data
base_url1= "https://opendata.arcgis.com/datasets/3dbb7dea6cc14544ad302061809df597_12.geojson"
zoning<-st_read("https://opendata.arcgis.com/datasets/3dbb7dea6cc14544ad302061809df597_12.geojson")
plot(zoning["ZONE_CODE"])
ggplot() +
geom_sf(data = res) +
geom_sf(data=zoning, aes(fill=ZONE_CODE),alpha=I(0.5))
#2. TIER
#https://live-durhamnc.opendata.arcgis.com/datasets/development-tiers?geometry=-79.731%2C35.857%2C-77.997%2C36.245&selectedAttribute=TYPE
base_url2 = "https://opendata.arcgis.com/datasets/02e611b671f64310b7b2a420e67238c3_5.geojson"
dev_tiers <- st_read("https://opendata.arcgis.com/datasets/02e611b671f64310b7b2a420e67238c3_5.geojson")
plot(dev_tiers["TYPE"])
ggplot() +
geom_sf(data = res) +
geom_sf(data=dev_tiers, aes(fill=TYPE),alpha=I(0.5))+
geom_sf(data = tract, fill = NA, color = "red")
#notes:cn stands for The Compact Neighborhood Tier
#3. Building Permits
#https://durham.municipal.codes/UDO/4.1.1
#https://live-durhamnc.opendata.arcgis.com/datasets/all-building-permits-1/data?geometry=-79.725%2C35.858%2C-77.991%2C36.246
base_url3="https://opendata.arcgis.com/datasets/147b91c0ff5c4c03931e9b3580026065_12.geojson"
#4.Parcels
#https://live-durhamnc.opendata.arcgis.com/datasets/parcels
base_url4="https://opendata.arcgis.com/datasets/9cde87b4bac348faa1332997093654bb_0.geojson" |
plot.dpm<-function(x,simultaneous=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(1,x$Spred),type="l",lwd=3,main="Survival",ylab="",xlab="Time",ylim=c(min(x$Spredl,na.rm=TRUE),1))
}else{
x$Sbandl<-confband(x$alpha,x$S)[1,]
x$Sbandu<-confband(x$alpha,x$S)[2,]
plot(c(0,x$predtime),c(1,x$Spred),type="l",lwd=3,main="Survival",ylab="",xlab="Time",ylim=c(min(x$Sbandl,na.rm=TRUE),1))
lines(c(0,x$predtime),c(1,x$Sbandu),lty=3,lwd=3)
lines(c(0,x$predtime),c(1,x$Sbandl),lty=3,lwd=3)
}
lines(c(0,x$predtime),c(1,x$Spredu),lty=2,lwd=3)
lines(c(0,x$predtime),c(1,x$Spredl),lty=2,lwd=3)
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(0,x$dpred),type="l",lwd=3,main="Density",ylab="",xlab="Time",ylim=c(0,max(x$dpredu,na.rm=TRUE)))
}else{
x$dbandl<-confband(x$alpha,x$d)[1,]
x$dbandu<-confband(x$alpha,x$d)[2,]
plot(c(0,x$predtime),c(0,x$dpred),type="l",lwd=3,main="Density",ylab="",xlab="Time",ylim=c(0,max(x$dbandu,na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$dbandu),lty=3,lwd=3)
lines(c(0,x$predtime),c(0,x$dbandl),lty=3,lwd=3)
}
lines(c(0,x$predtime),c(0,x$dpredu),lty=2,lwd=3)
lines(c(0,x$predtime),c(0,x$dpredl),lty=2,lwd=3)
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(x$predtime,x$hpred,type="l",lwd=3,main="Hazard",ylab="",xlab="Time",ylim=c(min(x$hpredl,na.rm=TRUE),max(x$hpredu,na.rm=TRUE)))
lines(x$predtime,x$hpredu,lty=2,lwd=3)
lines(x$predtime,x$hpredl,lty=2,lwd=3)
}else{
x$hbandl<-confband(x$alpha,x$h)[1,]
x$hbandu<-confband(x$alpha,x$h)[2,]
plot(x$predtime,x$hpred,type="l",lwd=3,main="Hazard",ylab="",xlab="Time",ylim=c(min(x$hbandl,na.rm=TRUE),max(x$hbandu,na.rm=TRUE)))
lines(x$predtime,x$hbandu,lty=3,lwd=3)
lines(x$predtime,x$hbandl,lty=3,lwd=3)
}
lines(x$predtime,x$hpredu,lty=2,lwd=3)
lines(x$predtime,x$hpredl,lty=2,lwd=3)
}
plot.dpmcomp<-function(x,simultaneous=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(0,x$CIF1.est),type="l",col="red",lwd=3,main="Cumulative Incidence Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$CIF1u,x$CIF2u),na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$CIF2.est),lwd=3,col="blue")
legend("topleft",c("Event 1", "Event 2"), lwd=c(3,3), lty=c(1,1), col=c("red", "blue"))
}else{
x$CIF1bandl<-confband(x$alpha,x$CIF1)[1,]
x$CIF1bandu<-confband(x$alpha,x$CIF1)[2,]
x$CIF2bandl<-confband(x$alpha,x$CIF2)[1,]
x$CIF2bandu<-confband(x$alpha,x$CIF2)[2,]
plot(c(0,x$predtime),c(0,x$CIF1.est),type="l",col="red",lwd=3,main="Cumulative Incidence Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$CIF1bandu,x$CIF2bandu),na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$CIF1bandu),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF1bandl),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF2bandu),lty=3,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$CIF2bandl),lty=3,lwd=3,col="blue")
legend("bottomright",c("Event 1", "Event 2"), lwd=c(3,3), lty=c(1,1), col=c("red", "blue"))
}
lines(c(0,x$predtime),c(0,x$CIF2.est),lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$CIF1u),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF1l),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF2u),lty=2,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$CIF2l),lty=2,lwd=3,col="blue")
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(0,x$d1.est),type="l",col="red",lwd=3,main="Subdistribution Density Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$d1u,x$d2u),na.rm=TRUE)))
}else{
x$d1bandl<-confband(x$alpha,x$d1)[1,]
x$d1bandu<-confband(x$alpha,x$d1)[2,]
x$d2bandl<-confband(x$alpha,x$d2)[1,]
x$d2bandu<-confband(x$alpha,x$d2)[2,]
plot(c(0,x$predtime),c(0,x$d1.est),type="l",col="red",lwd=3,main="Subdistribution Density Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$d1bandu,x$d2bandu),na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$d1bandu),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d1bandl),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d2bandu),lty=3,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$d2bandl),lty=3,lwd=3,col="blue")
}
lines(c(0,x$predtime),c(0,x$d2.est),lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$d1u),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d1l),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d2u),lty=2,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$d2l),lty=2,lwd=3,col="blue")
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(x$predtime,x$h1.est,type="l",col="red",lwd=3,main="Subdistribution Hazard Functions",ylab="",xlab="Time",
ylim=c(min(c(x$h1l,x$h2l),na.rm=TRUE),max(c(x$h1u,x$h2u),na.rm=TRUE)))
}else{
x$h1bandl<-confband(x$alpha,x$h1)[1,]
x$h1bandu<-confband(x$alpha,x$h1)[2,]
x$h2bandl<-confband(x$alpha,x$h2)[1,]
x$h2bandu<-confband(x$alpha,x$h2)[2,]
plot(x$predtime,x$h1.est,type="l",col="red",lwd=3,main="Subdistribution Hazard Functions",ylab="",xlab="Time",
ylim=c(min(c(x$h1bandl,x$h2bandl),na.rm=TRUE),max(c(x$h1bandu,x$h2bandu),na.rm=TRUE)))
lines(x$predtime,x$h1bandu,lty=3,lwd=3,col="red")
lines(x$predtime,x$h1bandl,lty=3,lwd=3,col="red")
lines(x$predtime,x$h2bandu,lty=3,lwd=3,col="blue")
lines(x$predtime,x$h2bandl,lty=3,lwd=3,col="blue")
}
lines(x$predtime,x$h1u,lty=2,lwd=3,col="red")
lines(x$predtime,x$h1l,lty=2,lwd=3,col="red")
lines(x$predtime,x$h2.est,lwd=3,col="blue")
lines(x$predtime,x$h2u,lty=2,lwd=3,col="blue")
lines(x$predtime,x$h2l,lty=2,lwd=3,col="blue")
}
plot.ddp<-function(x,simultaneous=FALSE,exp=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==TRUE)){
x$loghrbandl<-matrix(confband(x$alpha,x$loghr)[1,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
x$loghrbandu<-matrix(confband(x$alpha,x$loghr)[2,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
if(exp==TRUE){
x$hrbandl<-exp(x$loghrbandl)
x$hrbandu<-exp(x$loghrbandu)
}
}
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghru,na.rm=TRUE)
ybot<-min(x$loghrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,main=paste("Log Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
}
}else{
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hru,na.rm=TRUE)
ybot<-min(x$hrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
}
}
}else{
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghr,na.rm=TRUE)
ybot<-min(x$loghr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,main=paste("Log Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
lines(x$predtime,x$loghrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$loghrbandu[i,],lty=3,lwd=3)
}
}else{
x$hr<-exp(x$loghr)
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hr,na.rm=TRUE)
ybot<-min(x$hr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
lines(x$predtime,x$hrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$hrbandu[i,],lty=3,lwd=3)
}
}
}
}
plot.ddpcomp<-function(x,simultaneous=FALSE,exp=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==TRUE)){
x$loghrbandl<-matrix(confband(x$alpha,x$loghr)[1,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
x$loghrbandu<-matrix(confband(x$alpha,x$loghr)[2,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
if(exp==TRUE){
x$hrbandl<-exp(x$loghrbandl)
x$hrbandu<-exp(x$loghrbandu)
}
}
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghru,na.rm=TRUE)
ybot<-min(x$loghrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,
main=paste("Log Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
}
}else{
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hru,na.rm=TRUE)
ybot<-min(x$hrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
}
}
}else{
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghr,na.rm=TRUE)
ybot<-min(x$loghr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,
main=paste("Log Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
lines(x$predtime,x$loghrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$loghrbandu[i,],lty=3,lwd=3)
}
}else{
x$hr<-exp(x$loghr)
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hr,na.rm=TRUE)
ybot<-min(x$hr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Log Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
lines(x$predtime,x$hrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$hrbandu[i,],lty=3,lwd=3)
}
}
}
}
plot.predddpcomp<-function(x,...){
for(i in 1:nrow(x$Fpred)){
plot(c(0,x$tpred),c(0,x$Fpred[i,]),
main=paste("Cumulative Incidence Function Estimate\n with New Data ",i, " for Event 1", sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$Fpredl,na.rm=TRUE),max(x$Fpredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(0,x$Fpredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(0,x$Fpredu[i,]),lwd=3,lty=2)
plot(c(0,x$tpred),c(0,x$dpred[i,]),
main=paste("Cause-specific Density Estimate\n with New Data ",i," for Event 1", sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$dpredl,na.rm=TRUE),max(x$dpredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(0,x$dpredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(0,x$dpredu[i,]),lwd=3,lty=2)
plot(x$tpred,x$hpred[i,],
main=paste("Subdistribution Hazard Estimate\n with New Data ",i," for Event 1", sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$hpredl,na.rm=TRUE),max(x$hpredu,na.rm=TRUE)))
lines(x$tpred,x$hpredl[i,],lwd=3,lty=2)
lines(x$tpred,x$hpredu[i,],lwd=3,lty=2)
}
}
plot.predddp<-function(x,...){
for(i in 1:nrow(x$Spred)){
plot(c(0,x$tpred),c(1,x$Spred[i,]),
main=paste("Survival Estimate with New Data ",i, sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$Spredl,na.rm=TRUE),max(x$Spredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(1,x$Spredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(1,x$Spredu[i,]),lwd=3,lty=2)
plot(c(0,x$tpred),c(0,x$dpred[i,]),
main=paste("Density Estimate with New Data ",i, sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$dpredl,na.rm=TRUE),max(x$dpredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(0,x$dpredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(0,x$dpredu[i,]),lwd=3,lty=2)
plot(x$tpred,x$hpred[i,],
main=paste("Hazard Estimate with New Data ",i,sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$hpredl,na.rm=TRUE),max(x$hpredu,na.rm=TRUE)))
lines(x$tpred,x$hpredl[i,],lwd=3,lty=2)
lines(x$tpred,x$hpredu[i,],lwd=3,lty=2)
}
}
| /DPWeibull/R/plot.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 17,023 | r |
plot.dpm<-function(x,simultaneous=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(1,x$Spred),type="l",lwd=3,main="Survival",ylab="",xlab="Time",ylim=c(min(x$Spredl,na.rm=TRUE),1))
}else{
x$Sbandl<-confband(x$alpha,x$S)[1,]
x$Sbandu<-confband(x$alpha,x$S)[2,]
plot(c(0,x$predtime),c(1,x$Spred),type="l",lwd=3,main="Survival",ylab="",xlab="Time",ylim=c(min(x$Sbandl,na.rm=TRUE),1))
lines(c(0,x$predtime),c(1,x$Sbandu),lty=3,lwd=3)
lines(c(0,x$predtime),c(1,x$Sbandl),lty=3,lwd=3)
}
lines(c(0,x$predtime),c(1,x$Spredu),lty=2,lwd=3)
lines(c(0,x$predtime),c(1,x$Spredl),lty=2,lwd=3)
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(0,x$dpred),type="l",lwd=3,main="Density",ylab="",xlab="Time",ylim=c(0,max(x$dpredu,na.rm=TRUE)))
}else{
x$dbandl<-confband(x$alpha,x$d)[1,]
x$dbandu<-confband(x$alpha,x$d)[2,]
plot(c(0,x$predtime),c(0,x$dpred),type="l",lwd=3,main="Density",ylab="",xlab="Time",ylim=c(0,max(x$dbandu,na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$dbandu),lty=3,lwd=3)
lines(c(0,x$predtime),c(0,x$dbandl),lty=3,lwd=3)
}
lines(c(0,x$predtime),c(0,x$dpredu),lty=2,lwd=3)
lines(c(0,x$predtime),c(0,x$dpredl),lty=2,lwd=3)
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(x$predtime,x$hpred,type="l",lwd=3,main="Hazard",ylab="",xlab="Time",ylim=c(min(x$hpredl,na.rm=TRUE),max(x$hpredu,na.rm=TRUE)))
lines(x$predtime,x$hpredu,lty=2,lwd=3)
lines(x$predtime,x$hpredl,lty=2,lwd=3)
}else{
x$hbandl<-confband(x$alpha,x$h)[1,]
x$hbandu<-confband(x$alpha,x$h)[2,]
plot(x$predtime,x$hpred,type="l",lwd=3,main="Hazard",ylab="",xlab="Time",ylim=c(min(x$hbandl,na.rm=TRUE),max(x$hbandu,na.rm=TRUE)))
lines(x$predtime,x$hbandu,lty=3,lwd=3)
lines(x$predtime,x$hbandl,lty=3,lwd=3)
}
lines(x$predtime,x$hpredu,lty=2,lwd=3)
lines(x$predtime,x$hpredl,lty=2,lwd=3)
}
plot.dpmcomp<-function(x,simultaneous=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(0,x$CIF1.est),type="l",col="red",lwd=3,main="Cumulative Incidence Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$CIF1u,x$CIF2u),na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$CIF2.est),lwd=3,col="blue")
legend("topleft",c("Event 1", "Event 2"), lwd=c(3,3), lty=c(1,1), col=c("red", "blue"))
}else{
x$CIF1bandl<-confband(x$alpha,x$CIF1)[1,]
x$CIF1bandu<-confband(x$alpha,x$CIF1)[2,]
x$CIF2bandl<-confband(x$alpha,x$CIF2)[1,]
x$CIF2bandu<-confband(x$alpha,x$CIF2)[2,]
plot(c(0,x$predtime),c(0,x$CIF1.est),type="l",col="red",lwd=3,main="Cumulative Incidence Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$CIF1bandu,x$CIF2bandu),na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$CIF1bandu),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF1bandl),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF2bandu),lty=3,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$CIF2bandl),lty=3,lwd=3,col="blue")
legend("bottomright",c("Event 1", "Event 2"), lwd=c(3,3), lty=c(1,1), col=c("red", "blue"))
}
lines(c(0,x$predtime),c(0,x$CIF2.est),lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$CIF1u),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF1l),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$CIF2u),lty=2,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$CIF2l),lty=2,lwd=3,col="blue")
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(c(0,x$predtime),c(0,x$d1.est),type="l",col="red",lwd=3,main="Subdistribution Density Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$d1u,x$d2u),na.rm=TRUE)))
}else{
x$d1bandl<-confband(x$alpha,x$d1)[1,]
x$d1bandu<-confband(x$alpha,x$d1)[2,]
x$d2bandl<-confband(x$alpha,x$d2)[1,]
x$d2bandu<-confband(x$alpha,x$d2)[2,]
plot(c(0,x$predtime),c(0,x$d1.est),type="l",col="red",lwd=3,main="Subdistribution Density Functions",ylab="",xlab="Time",
ylim=c(0,max(c(x$d1bandu,x$d2bandu),na.rm=TRUE)))
lines(c(0,x$predtime),c(0,x$d1bandu),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d1bandl),lty=3,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d2bandu),lty=3,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$d2bandl),lty=3,lwd=3,col="blue")
}
lines(c(0,x$predtime),c(0,x$d2.est),lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$d1u),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d1l),lty=2,lwd=3,col="red")
lines(c(0,x$predtime),c(0,x$d2u),lty=2,lwd=3,col="blue")
lines(c(0,x$predtime),c(0,x$d2l),lty=2,lwd=3,col="blue")
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
plot(x$predtime,x$h1.est,type="l",col="red",lwd=3,main="Subdistribution Hazard Functions",ylab="",xlab="Time",
ylim=c(min(c(x$h1l,x$h2l),na.rm=TRUE),max(c(x$h1u,x$h2u),na.rm=TRUE)))
}else{
x$h1bandl<-confband(x$alpha,x$h1)[1,]
x$h1bandu<-confband(x$alpha,x$h1)[2,]
x$h2bandl<-confband(x$alpha,x$h2)[1,]
x$h2bandu<-confband(x$alpha,x$h2)[2,]
plot(x$predtime,x$h1.est,type="l",col="red",lwd=3,main="Subdistribution Hazard Functions",ylab="",xlab="Time",
ylim=c(min(c(x$h1bandl,x$h2bandl),na.rm=TRUE),max(c(x$h1bandu,x$h2bandu),na.rm=TRUE)))
lines(x$predtime,x$h1bandu,lty=3,lwd=3,col="red")
lines(x$predtime,x$h1bandl,lty=3,lwd=3,col="red")
lines(x$predtime,x$h2bandu,lty=3,lwd=3,col="blue")
lines(x$predtime,x$h2bandl,lty=3,lwd=3,col="blue")
}
lines(x$predtime,x$h1u,lty=2,lwd=3,col="red")
lines(x$predtime,x$h1l,lty=2,lwd=3,col="red")
lines(x$predtime,x$h2.est,lwd=3,col="blue")
lines(x$predtime,x$h2u,lty=2,lwd=3,col="blue")
lines(x$predtime,x$h2l,lty=2,lwd=3,col="blue")
}
plot.ddp<-function(x,simultaneous=FALSE,exp=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==TRUE)){
x$loghrbandl<-matrix(confband(x$alpha,x$loghr)[1,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
x$loghrbandu<-matrix(confband(x$alpha,x$loghr)[2,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
if(exp==TRUE){
x$hrbandl<-exp(x$loghrbandl)
x$hrbandu<-exp(x$loghrbandu)
}
}
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghru,na.rm=TRUE)
ybot<-min(x$loghrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,main=paste("Log Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
}
}else{
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hru,na.rm=TRUE)
ybot<-min(x$hrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
}
}
}else{
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghr,na.rm=TRUE)
ybot<-min(x$loghr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,main=paste("Log Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
lines(x$predtime,x$loghrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$loghrbandu[i,],lty=3,lwd=3)
}
}else{
x$hr<-exp(x$loghr)
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hr,na.rm=TRUE)
ybot<-min(x$hr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Hazard Ratio over Time for Covariate ",padName,sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
lines(x$predtime,x$hrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$hrbandu[i,],lty=3,lwd=3)
}
}
}
}
plot.ddpcomp<-function(x,simultaneous=FALSE,exp=FALSE,...){
if((x$simultaneous==FALSE)&(simultaneous==TRUE)){
x$loghrbandl<-matrix(confband(x$alpha,x$loghr)[1,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
x$loghrbandu<-matrix(confband(x$alpha,x$loghr)[2,],byrow=TRUE,nrow=ncol(x$x))/x$xscale
if(exp==TRUE){
x$hrbandl<-exp(x$loghrbandl)
x$hrbandu<-exp(x$loghrbandu)
}
}
if((x$simultaneous==FALSE)&(simultaneous==FALSE)){
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghru,na.rm=TRUE)
ybot<-min(x$loghrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,
main=paste("Log Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
}
}else{
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hru,na.rm=TRUE)
ybot<-min(x$hrl,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
}
}
}else{
if(exp==FALSE){
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$loghr,na.rm=TRUE)
ybot<-min(x$loghr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$loghr.est[i,],type="l",lwd=3,
main=paste("Log Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$loghrl[i,],lty=2,lwd=3)
lines(x$predtime,x$loghru[i,],lty=2,lwd=3)
lines(x$predtime,x$loghrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$loghrbandu[i,],lty=3,lwd=3)
}
}else{
x$hr<-exp(x$loghr)
x$hr.est<-exp(x$loghr.est)
x$hrl<-exp(x$loghrl)
x$hru<-exp(x$loghru)
for(i in 1:nrow(x$loghr.est)){
ytop<-max(x$hr,na.rm=TRUE)
ybot<-min(x$hr,na.rm=TRUE)
padName<-x$covnames[i]
if(grepl("factor",padName,fixed=TRUE)){
factorName<-sub(").*", ")", padName)
# now called factor(Stage)
reference<-unlist(x$xlevels[factorName])[1]
padName<-gsub("factor[(]", "", padName)
padName<-gsub("[)]", "=", padName)
factorName<-gsub("factor[(]", "", factorName)
factorName<-gsub("[)]", "=", factorName)
reference<-paste(factorName,reference)
padName<-paste(padName,"vs",reference)
}
plot(x$predtime,x$hr.est[i,],type="l",lwd=3,
main=paste("Log Subdistribution Hazard Ratio of \n Covariate ",padName," for Event 1",sep="")
,ylab="",xlab="Time",ylim=c(ybot,ytop))
lines(x$predtime,x$hrl[i,],lty=2,lwd=3)
lines(x$predtime,x$hru[i,],lty=2,lwd=3)
lines(x$predtime,x$hrbandl[i,],lty=3,lwd=3)
lines(x$predtime,x$hrbandu[i,],lty=3,lwd=3)
}
}
}
}
plot.predddpcomp<-function(x,...){
for(i in 1:nrow(x$Fpred)){
plot(c(0,x$tpred),c(0,x$Fpred[i,]),
main=paste("Cumulative Incidence Function Estimate\n with New Data ",i, " for Event 1", sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$Fpredl,na.rm=TRUE),max(x$Fpredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(0,x$Fpredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(0,x$Fpredu[i,]),lwd=3,lty=2)
plot(c(0,x$tpred),c(0,x$dpred[i,]),
main=paste("Cause-specific Density Estimate\n with New Data ",i," for Event 1", sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$dpredl,na.rm=TRUE),max(x$dpredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(0,x$dpredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(0,x$dpredu[i,]),lwd=3,lty=2)
plot(x$tpred,x$hpred[i,],
main=paste("Subdistribution Hazard Estimate\n with New Data ",i," for Event 1", sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$hpredl,na.rm=TRUE),max(x$hpredu,na.rm=TRUE)))
lines(x$tpred,x$hpredl[i,],lwd=3,lty=2)
lines(x$tpred,x$hpredu[i,],lwd=3,lty=2)
}
}
plot.predddp<-function(x,...){
for(i in 1:nrow(x$Spred)){
plot(c(0,x$tpred),c(1,x$Spred[i,]),
main=paste("Survival Estimate with New Data ",i, sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$Spredl,na.rm=TRUE),max(x$Spredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(1,x$Spredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(1,x$Spredu[i,]),lwd=3,lty=2)
plot(c(0,x$tpred),c(0,x$dpred[i,]),
main=paste("Density Estimate with New Data ",i, sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$dpredl,na.rm=TRUE),max(x$dpredu,na.rm=TRUE)))
lines(c(0,x$tpred),c(0,x$dpredl[i,]),lwd=3,lty=2)
lines(c(0,x$tpred),c(0,x$dpredu[i,]),lwd=3,lty=2)
plot(x$tpred,x$hpred[i,],
main=paste("Hazard Estimate with New Data ",i,sep=""),
type="l",lwd=3,xlab="Time",ylab="",ylim=c(min(x$hpredl,na.rm=TRUE),max(x$hpredu,na.rm=TRUE)))
lines(x$tpred,x$hpredl[i,],lwd=3,lty=2)
lines(x$tpred,x$hpredu[i,],lwd=3,lty=2)
}
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/cervix.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.03,family="gaussian",standardize=FALSE)
sink('./cervix_016.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/cervix/cervix_016.R | no_license | esbgkannan/QSMART | R | false | false | 345 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/cervix.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.03,family="gaussian",standardize=FALSE)
sink('./cervix_016.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# use gamma approximation based test rather than permutation test for HSIC association.
library(assist)
library(psych)
library(dHSIC)
library(fANCOVA)
library(entropy)
# conduct permutation test to determine direction
permANM<-function(X,Y,number_of_permutations=5000,level=0.05,
fit.method=c("ssr","B.spline","loess"),
measurement=c("difference","ratio"),score=c("HSIC","Entropy-empirical")){
if (length(fit.method)==3){
fit.method="ssr"
}
if (length(measurement)==2){
measurement="difference"
}
if (length(score)==2){
score="HSIC"
}
fit.output=fit_both_dir_continuous(X,Y,fit.method=fit.method,score=score,level=level)
P_X2Y=fit.output$p_val_fw
P_Y2X=fit.output$p_val_bw
constant_fit_X2Y=fit.output$constant_fit_fw
constant_fit_Y2X=fit.output$constant_fit_bw
if (measurement=="difference"){
diff_obs=P_Y2X-P_X2Y
obs_measure=abs(diff_obs)
}else{
ratio_obs=P_Y2X/P_X2Y
obs_measure=ifelse(ratio_obs>1,ratio_obs,1/ratio_obs)
}
# P value of causation test by permutation test based on difference
perm_X2Y<-perm_Y2X<-c()
sizeY=length(Y)
permutation_output=lapply(1:number_of_permutations, function(permutation){
Y_perm=sample(Y,sizeY)
fit_both_dir_continuous(X,Y_perm,fit.method=fit.method,score=score,level=level)})
matrix_perm=matrix(unlist(permutation_output),nrow=number_of_permutations,byrow=TRUE)
ncol_mat=ncol(matrix_perm)
perm_X2Y=matrix_perm[,ncol_mat-4]
perm_Y2X=matrix_perm[,ncol_mat-3]
if (measurement=="difference"){
diff_perm<-perm_Y2X-perm_X2Y
perm_measures=abs(diff_perm)
}else{
perm_measures<-perm_Y2X/perm_X2Y
perm_measures[perm_measures<1]=(1/perm_measures)[perm_measures<1]
}
Pc=sum(perm_measures>=obs_measure)/number_of_permutations
dir=ifelse((Pc>=level)|(P_X2Y<level & P_Y2X<level),0,ifelse(P_X2Y>P_Y2X,1,ifelse(P_X2Y<P_Y2X,-1,2)))
dir=ifelse(dir==2,dir-3*constant_fit_X2Y-1*constant_fit_Y2X+2*constant_fit_X2Y*constant_fit_Y2X,
ifelse(dir==1,dir-constant_fit_X2Y,ifelse(dir==-1,dir+constant_fit_Y2X,dir)))
list(dir=dir,P_X2Y=P_X2Y,P_Y2X=P_Y2X,P_no_causation=Pc,P_ind=fit.output$p_val_ind,
constant_fit_X2Y=constant_fit_X2Y,constant_fit_Y2X=constant_fit_Y2X)
}
# fit both direction for contiANM
fit_both_dir_continuous<-function(x,y,m=c(2,3,4),N=40,limnla=c(-10,3),
fit.method,score,level){
if (length(m)==3){
m=2
}
fit.output=contiANM(x,y,m=m,N=N,limnla=limnla,fit.method=fit.method,score=score,level=level)
statistic_fw=fit.output$statistic
p_val_fw=fit.output$pvalue
constant_fit_fw=fit.output$constant_fit
fit.output=contiANM(y,x,m=m,N=N,limnla=limnla,fit.method=fit.method,score=score,level=level)
statistic_bw=fit.output$statistic
p_val_bw=fit.output$pvalue
constant_fit_bw=fit.output$constant_fit
p_val_ind=dhsic.test(x,y,method="gamma")$p.value
list(statistic_fw=statistic_fw,statistic_bw=statistic_bw,p_val_fw=p_val_fw,p_val_bw=p_val_bw,
constant_fit_fw=constant_fit_fw,constant_fit_bw=constant_fit_bw,p_val_ind=p_val_ind)
}
# continuous ANM
contiANM<-function(x,y,m,N,limnla,fit.method,score,level){
# m: m=2, cubic spline
# m=3, quintic spline
# m=4, septic spline
# limnla: a vector of length one or two, specifying a
# search range for log10(n*lambda), where lambda is the
# smoothing parameter and n is the sample size. If it is
# a single value, the smoothing parameter will be fixed at
# this value. This option is only applicable to spline
# smoothing with a single smoothing parameter.
#
# require(assist)
# require(psych)
# require(dHSIC)
# require(fANCOVA)
# require(entropy)
n.sub=length(x)
if (n.sub!=length(y)){
stop("lengths of x and y do not match")
}else{
test.index=seq(1, n.sub, 5)
if (fit.method=="ssr"){
x=x-min(x)
y=y-min(y)
}
x.train=x[-test.index]
y.train=y[-test.index]
x.test=x[test.index]
y.test=y[test.index]
m.test=length(x.test)
##### B spline #####
if (fit.method=="B.spline"){
BS=smooth.spline(x.train,y.train,nknots=N)
fitted <- predict(BS,x.test)$y
eps=y.test-fitted
new.x.train=x.train[order(x.train)]
fitted.new.train=predict(BS,new.x.train)$y
}
##### smoothing splines #####
if (fit.method=="ssr"){
if (m==2){
B<-ssr(y.train~x.train,rk=cubic2(x.train),limnla=limnla) # based on classic polynomials
}else if(m==3){
B<-ssr(y.train~x.train+I(x.train^2),rk=quintic2(x.train),limnla=limnla)
}else if(m==4){
B<-ssr(y.train~x.train+I(x.train^2)+I(x.train^3),rk=septic2(x.train),limnla=limnla)
}
fitted=predict(B,data.frame(x.train=x.test),pstd=FALSE)$fit
eps=y.test-fitted
new.x.train=x.train[order(x.train)]
fitted.new.train=B$fit[order(x.train)]
}
##### LOESS #####
if (fit.method=="loess"){
new.x.train=x.train[order(x.train)]
new.y.train=y.train[order(x.train)]
# training span parameter based on AICC
span=(loess.as(new.y.train,new.x.train)$pars)$span
cars.lo <- loess(new.y.train ~ new.x.train,span=span)
fitted <- predict(cars.lo,x.test)
eps=y.test-fitted
fitted.new.train=cars.lo$fitted
}
########################
options(warn=-1)
p_constant=summary(lm(fitted.new.train~new.x.train))$coefficients[2,4]
options(warn=0)
constant_fit=ifelse(p_constant>level,1,0)
x.test <- x.test[!is.na(eps)]
eps <- eps[!is.na(eps)]
# two scores
if (score=="HSIC"){
pvalue=dhsic.test(x.test,eps,method="gamma")$p.value
#pvalue
statistic=dhsic.test(x.test,eps,method="gamma")$statistic
#statistic
}else if(score=="Entropy-empirical"){
statistic=EdS_entropy(x.test)+EdS_entropy(eps)
pvalue=1/statistic
}
list(statistic=statistic,pvalue=pvalue,constant_fit=constant_fit)
}
}
EdS_entropy<-function(x){
x=sort(x)
N=length(x)
return(mean(sapply(1:(N-1), function(i){
dx=x[i+1]-x[i]
dx=ifelse(dx!=0,log(abs(dx)),0)}))-digamma(1)+digamma(N))
}
| /contiANM_2.1.R | no_license | jiaorong007/Bivariate-Causal-Discovery | R | false | false | 6,226 | r | # use gamma approximation based test rather than permutation test for HSIC association.
library(assist)
library(psych)
library(dHSIC)
library(fANCOVA)
library(entropy)
# conduct permutation test to determine direction
permANM<-function(X,Y,number_of_permutations=5000,level=0.05,
fit.method=c("ssr","B.spline","loess"),
measurement=c("difference","ratio"),score=c("HSIC","Entropy-empirical")){
if (length(fit.method)==3){
fit.method="ssr"
}
if (length(measurement)==2){
measurement="difference"
}
if (length(score)==2){
score="HSIC"
}
fit.output=fit_both_dir_continuous(X,Y,fit.method=fit.method,score=score,level=level)
P_X2Y=fit.output$p_val_fw
P_Y2X=fit.output$p_val_bw
constant_fit_X2Y=fit.output$constant_fit_fw
constant_fit_Y2X=fit.output$constant_fit_bw
if (measurement=="difference"){
diff_obs=P_Y2X-P_X2Y
obs_measure=abs(diff_obs)
}else{
ratio_obs=P_Y2X/P_X2Y
obs_measure=ifelse(ratio_obs>1,ratio_obs,1/ratio_obs)
}
# P value of causation test by permutation test based on difference
perm_X2Y<-perm_Y2X<-c()
sizeY=length(Y)
permutation_output=lapply(1:number_of_permutations, function(permutation){
Y_perm=sample(Y,sizeY)
fit_both_dir_continuous(X,Y_perm,fit.method=fit.method,score=score,level=level)})
matrix_perm=matrix(unlist(permutation_output),nrow=number_of_permutations,byrow=TRUE)
ncol_mat=ncol(matrix_perm)
perm_X2Y=matrix_perm[,ncol_mat-4]
perm_Y2X=matrix_perm[,ncol_mat-3]
if (measurement=="difference"){
diff_perm<-perm_Y2X-perm_X2Y
perm_measures=abs(diff_perm)
}else{
perm_measures<-perm_Y2X/perm_X2Y
perm_measures[perm_measures<1]=(1/perm_measures)[perm_measures<1]
}
Pc=sum(perm_measures>=obs_measure)/number_of_permutations
dir=ifelse((Pc>=level)|(P_X2Y<level & P_Y2X<level),0,ifelse(P_X2Y>P_Y2X,1,ifelse(P_X2Y<P_Y2X,-1,2)))
dir=ifelse(dir==2,dir-3*constant_fit_X2Y-1*constant_fit_Y2X+2*constant_fit_X2Y*constant_fit_Y2X,
ifelse(dir==1,dir-constant_fit_X2Y,ifelse(dir==-1,dir+constant_fit_Y2X,dir)))
list(dir=dir,P_X2Y=P_X2Y,P_Y2X=P_Y2X,P_no_causation=Pc,P_ind=fit.output$p_val_ind,
constant_fit_X2Y=constant_fit_X2Y,constant_fit_Y2X=constant_fit_Y2X)
}
# fit both direction for contiANM
fit_both_dir_continuous<-function(x,y,m=c(2,3,4),N=40,limnla=c(-10,3),
fit.method,score,level){
if (length(m)==3){
m=2
}
fit.output=contiANM(x,y,m=m,N=N,limnla=limnla,fit.method=fit.method,score=score,level=level)
statistic_fw=fit.output$statistic
p_val_fw=fit.output$pvalue
constant_fit_fw=fit.output$constant_fit
fit.output=contiANM(y,x,m=m,N=N,limnla=limnla,fit.method=fit.method,score=score,level=level)
statistic_bw=fit.output$statistic
p_val_bw=fit.output$pvalue
constant_fit_bw=fit.output$constant_fit
p_val_ind=dhsic.test(x,y,method="gamma")$p.value
list(statistic_fw=statistic_fw,statistic_bw=statistic_bw,p_val_fw=p_val_fw,p_val_bw=p_val_bw,
constant_fit_fw=constant_fit_fw,constant_fit_bw=constant_fit_bw,p_val_ind=p_val_ind)
}
# continuous ANM
contiANM<-function(x,y,m,N,limnla,fit.method,score,level){
# m: m=2, cubic spline
# m=3, quintic spline
# m=4, septic spline
# limnla: a vector of length one or two, specifying a
# search range for log10(n*lambda), where lambda is the
# smoothing parameter and n is the sample size. If it is
# a single value, the smoothing parameter will be fixed at
# this value. This option is only applicable to spline
# smoothing with a single smoothing parameter.
#
# require(assist)
# require(psych)
# require(dHSIC)
# require(fANCOVA)
# require(entropy)
n.sub=length(x)
if (n.sub!=length(y)){
stop("lengths of x and y do not match")
}else{
test.index=seq(1, n.sub, 5)
if (fit.method=="ssr"){
x=x-min(x)
y=y-min(y)
}
x.train=x[-test.index]
y.train=y[-test.index]
x.test=x[test.index]
y.test=y[test.index]
m.test=length(x.test)
##### B spline #####
if (fit.method=="B.spline"){
BS=smooth.spline(x.train,y.train,nknots=N)
fitted <- predict(BS,x.test)$y
eps=y.test-fitted
new.x.train=x.train[order(x.train)]
fitted.new.train=predict(BS,new.x.train)$y
}
##### smoothing splines #####
if (fit.method=="ssr"){
if (m==2){
B<-ssr(y.train~x.train,rk=cubic2(x.train),limnla=limnla) # based on classic polynomials
}else if(m==3){
B<-ssr(y.train~x.train+I(x.train^2),rk=quintic2(x.train),limnla=limnla)
}else if(m==4){
B<-ssr(y.train~x.train+I(x.train^2)+I(x.train^3),rk=septic2(x.train),limnla=limnla)
}
fitted=predict(B,data.frame(x.train=x.test),pstd=FALSE)$fit
eps=y.test-fitted
new.x.train=x.train[order(x.train)]
fitted.new.train=B$fit[order(x.train)]
}
##### LOESS #####
if (fit.method=="loess"){
new.x.train=x.train[order(x.train)]
new.y.train=y.train[order(x.train)]
# training span parameter based on AICC
span=(loess.as(new.y.train,new.x.train)$pars)$span
cars.lo <- loess(new.y.train ~ new.x.train,span=span)
fitted <- predict(cars.lo,x.test)
eps=y.test-fitted
fitted.new.train=cars.lo$fitted
}
########################
options(warn=-1)
p_constant=summary(lm(fitted.new.train~new.x.train))$coefficients[2,4]
options(warn=0)
constant_fit=ifelse(p_constant>level,1,0)
x.test <- x.test[!is.na(eps)]
eps <- eps[!is.na(eps)]
# two scores
if (score=="HSIC"){
pvalue=dhsic.test(x.test,eps,method="gamma")$p.value
#pvalue
statistic=dhsic.test(x.test,eps,method="gamma")$statistic
#statistic
}else if(score=="Entropy-empirical"){
statistic=EdS_entropy(x.test)+EdS_entropy(eps)
pvalue=1/statistic
}
list(statistic=statistic,pvalue=pvalue,constant_fit=constant_fit)
}
}
EdS_entropy<-function(x){
x=sort(x)
N=length(x)
return(mean(sapply(1:(N-1), function(i){
dx=x[i+1]-x[i]
dx=ifelse(dx!=0,log(abs(dx)),0)}))-digamma(1)+digamma(N))
}
|
#Name: Data exploration for w271 Final Project.
#Date: November 2, 2016
#Author: Nick Chen, Johnny Yeo, Rama Thamman, David Skarbrevik
library(xlsx)
library(ggplot2)
library(lmtest)
library(car)
library(sandwich)
#Research Question:
# Do people who live in cultures that drink more wine and eat more dessert live longer lives on average?
# Perhaps a better question: Is alcohol consumption a valid proxy/indirect indicator of health in a country?
# Justification of using our dataset: Because our data has many indicators of health (GNP, life expectancy, obesity, calorie intake, etc.) as well as data on the amount of aclohol consumption of many countries, this is a great dataset to test if alcohol consumption is a valid indirect proxy for health in the way that GNP is.
### Dataset Used: World Nutrition Data
#. Dataset has one "observation per country, but more than 50 countries
#. We don't have a lot of information on how this data was acquired, but a way to validate it is by crosschecking with another more documented dataset
#(http://data.worldbank.org/datacatalog/Healthnutritionandpopulationstatistics).
#. Check that total calorie consumption is greater than or equal to the sum of calories in all of the individual food categories.
##Motivation/Thoughts about this research:
#. Some research has seemed to show that moderate wine consumption seems to be associated with a lower incidence of heart disease and potentially
#longer life spans (http://www.telegraph.co.uk/news/health/11066516/Glassofwinewithdinnerhelpsyoulivelongerscientistsclaim.html).
#. Critiques of this research are that the wine's effect on health outcomes is not a causal effect, but instead that the true underlying effect is more related to stronger social ties and wine consumption is simply associated with having a strong social network and taking more meals with family and friends.
#. The idea behind the main research question is to investigate the idea that cultures where people take more meals with family and friends have better health outcomes.
#. We will use wine consumption and potentially sugar consumption in some form to serve as proxy variables for eating more family meals or meals with friends.
#. The idea behind this is that you are more likely to drink wine or eat dessert with your meal when you are eating with friends or family.
#. Other research has indicated that strong social networks are an important factor in living a healthy life (http://uncnews.unc.edu/2016/01/04/socialnetworksasimportantasexerciseanddietacrossthespanofourlives/).
##Exploratory Data Analysis, Domain Research, and Potential Model
#. Look at correlations between wine / alcohol and sugar consumption and life expectancy
#. Check that the data makes sense
#. Will want to consider the possibility of nonlinear effects of wine / alcohol and sugar consumption on life expectancy at birth
#. Control for obesity prevalence
#. Consider interaction term between wine / alcohol consumption and sugar consumption
#. Perhaps one or the other could have a negative effect individually, but moderate consumption of both may be an even better proxy for taking more meals with family and friends than moderate consumption of one or the other
#Load the data
#setwd('~/Desktop/UC Berkeley/Applied Regression and Time Series Analysis/Lab 3/Health and Diet Data/')
#setwd('C:/Users/rthamman/Dropbox (Personal)/Berkeley/Courses/W271/Labs/Lab 3/Git/Data')
#setwd('~/Documents/MIDS/w271/w271_final_proj/W271_Lab3/Data')
#getwd()
diet.data <- read.csv("diet-forcsv - Sheet 1.csv")
data.validation.country.mapping <- read.xlsx("Data from Third Parties for Validation.xlsx", sheetName = "Country_Mapping")
data.validation.life_expect <- read.xlsx("Data from Third Parties for Validation.xlsx", sheetName = "Life Expectancy")
data.validation.growth_rate <- read.xlsx("Data from Third Parties for Validation.xlsx", sheetName = "Population Growth Rate")
#*************************************
#Missing values check
na.check = sapply(diet.data, function(x) sum(is.na(x))) # check specifically for NA values
if(sum(na.check) == 0)
{
cat("No NA values in this data.")
} else {
na.check
cat("There are a total of", sum(na.check), "NAs in the data.")
}
cat("Number of rows: ",nrow(diet.data))
cat("Number of complete cases: ",nrow(diet.data[complete.cases(diet.data),]))
#There are no missing values
#Univariate EDA
#Wine consumption
#Summary statistics for variables of interest
summary(diet.data$Wine..kcal.day.)
sum(diet.data$Wine..kcal.day. == 0)
#There are 32 countries with zero wine consumption. This could be because of bottom coding to cover for null values.
wine.hist <- ggplot(data = diet.data, aes(x = Wine..kcal.day.))
wine.hist + geom_histogram(fill = "navy", colour = "white") + ggtitle("Histogram of Wine Calories per Day") + labs(y = "Number of Countries")
#life expectancy
summary(diet.data$Life.expectancy.at.birth..years..both.sexes)
#The life expectancy variable shows a negative skew (because no one lives to be 160).
life.expect.all.hist <- ggplot(data = diet.data, aes(x = Life.expectancy.at.birth..years..both.sexes))
life.expect.all.hist + geom_histogram(fill = "navy", colour = "white") + ggtitle("Histogram of Life Expectancy at Birth") + labs(y = "Number of Countries")
#Alcoholic beverages calories per day
summary(diet.data$Alcoholic.Beverages..kcal.day.)
sum(diet.data$Alcoholic.Beverages..kcal.day. == 0)
#Like wine, there are a lot of countries with zero or very little consumption of alcoholic beverages.
Alcoholic.bevs.cals.hist <- ggplot(data = diet.data, aes(x = Alcoholic.Beverages..kcal.day.))
Alcoholic.bevs.cals.hist + geom_histogram(fill = "navy", colour = "white") + ggtitle("Histogram of Alchoholic Beverages Calories per Day") + labs(y = "Number of Countries")
#GNP per capita
summary(diet.data$Gross.national.income.per.capita..PPP.international...)
#GNP histogram
GNP.hist <- ggplot(data = diet.data, aes(x = Gross.national.income.per.capita..PPP.international...))
GNP.hist + geom_histogram(fill = "navy", colour = "white") + ggtitle("Histogram of GNP") + labs(y = "Number of Countries")
#*************************************
#Multivariate EDA
#Correlations
#correlation between wine consumption and alcoholic beverage consumption per day and life expectancy at birth is quite large at .496.
cor(diet.data$Wine..kcal.day., diet.data$Life.expectancy.at.birth..years..both.sexes)
cor(diet.data$Alcoholic.Beverages..kcal.day., diet.data$Life.expectancy.at.birth..years..both.sexes)
#look at correlation between wine / alcohol consumption and GNP to see if the above result appears to be a result of income
#There are very high correlations between wine and alcohol consumption with GNP, both being above 0.6.
cor(diet.data$Gross.national.income.per.capita..PPP.international..., diet.data$Wine..kcal.day.)
cor(diet.data$Gross.national.income.per.capita..PPP.international..., diet.data$Alcoholic.Beverages..kcal.day.)
#diet.data$Alcoholic.Beverages..kcal.day. > 0
wine.gnp.scatter <- ggplot(data = diet.data, aes(x = Gross.national.income.per.capita..PPP.international..., y = Wine..kcal.day.))
wine.gnp.scatter + geom_point(colour = "navy") + ggtitle("Scatterplot of GNP and Wine Consumption per Day")
#further analysis of correlation between wine / alcohol consumption and life expectancy at birth
i = 52
wine.box <- boxplot(diet.data[i], main = "Boxplot of Wine Consumtion (kcal/day)")
df <- cbind(diet.data[i], diet.data$Countries, diet.data$Life.expectancy.at.birth..years..both.sexes)
names(df) <- c("wine_consumption", "countries", "life_expectancy")
ordered_df <- df[order(df[1]),]
ordered_df[ordered_df$wine_consumption > wine.box$stats[5],]
#Given the boxplot, these are the countries with "outlier-level" wine consumption, and their life expectancy.
#Every country with high wine consumption has a life expectancy of over 70.
#It is important to also notice, however, that all of these countries (minus Argentina) are a part of Europe,
#where wine consumption is on average higher than the rest of the world.
#Given these results, despite the high correlation, it's hard to tell whether we see any good indication that greater wine consumption leads to longer life.
#*************************************
#Data validation
#Merge country code with validation datasets.
data.validation.growth_rate <- merge(data.validation.growth_rate, data.validation.country.mapping[,c("Population.Growth.Rate", "Country_main_dataset")], by.x = "Country.Name", by.y = "Population.Growth.Rate")
data.validation.life_expect <- merge(data.validation.life_expect, data.validation.country.mapping[,c("Country_Life_Expectancy", "Country_main_dataset")], by.x = "Country", by.y = "Country_Life_Expectancy")
#Merge validating data into the main country dataset.
diet.data <- merge(diet.data, data.validation.growth_rate[,c("Country_main_dataset", "Growth_rate_2000", "Growth_rate_2005", "Growth_rate_2010")], by.x = "Countries", by.y = "Country_main_dataset")
diet.data <- merge(diet.data, data.validation.life_expect[,c("Country_main_dataset", "Life_Expectancy")], by.x = "Countries", by.y = "Country_main_dataset")
#Now compare data validation sources to main dataset
#Life expectancy
diet.data$Life_Expectancy_pct_diff <- (diet.data$Life.expectancy.at.birth..years..both.sexes - diet.data$Life_Expectancy) / diet.data$Life.expectancy.at.birth..years..both.sexes
summary(diet.data$Life_Expectancy_pct_diff)
hist(diet.data$Life_Expectancy, main = "Data Validation Life Expectancy Distribution")
hist(diet.data$Life.expectancy.at.birth..years..both.sexes, main = "Data Validation Original Life Expectancy Distribution")
hist(diet.data$Life_Expectancy_pct_diff, main = "Percent Difference Life Expectancy")
#Life expectancy in the original dataset appears to be systematically lower than the 2016 life expectancies downloaded from the CIA factbook.
#This makes sense given that we believe the life expectancy in the original data to be from an earlier period, likely 2000 - 2005 based on the other variables, and that we expect life expectancy to increase over time.
Growth.rate.examination <- diet.data[,c("Countries", "Growth_rate_2000", "Growth_rate_2005", "Growth_rate_2010", "Population.annual.growth.rate....")]
Growth.rate.examination$Growth_rate_pct_diff_2000 <- (Growth.rate.examination$Population.annual.growth.rate.... - Growth.rate.examination$Growth_rate_2000) / Growth.rate.examination$Population.annual.growth.rate....
Growth.rate.examination$Growth_rate_pct_diff_2005 <- (Growth.rate.examination$Population.annual.growth.rate.... - Growth.rate.examination$Growth_rate_2005) / Growth.rate.examination$Population.annual.growth.rate....
Growth.rate.examination$Growth_rate_pct_diff_2010 <- (Growth.rate.examination$Population.annual.growth.rate.... - Growth.rate.examination$Growth_rate_2010) / Growth.rate.examination$Population.annual.growth.rate....
#Summary statistics of each growth rate
summary(Growth.rate.examination$Population.annual.growth.rate....)
summary(Growth.rate.examination$Growth_rate_2000)
summary(Growth.rate.examination$Growth_rate_2005)
summary(Growth.rate.examination$Growth_rate_2010)
#Histograms of percent difference with each known year growth rate
summary(Growth.rate.examination$Growth_rate_pct_diff_2000)
hist(Growth.rate.examination$Growth_rate_pct_diff_2000, main = "Histogram of Growth Rate % Diff with 2000 Growth Rate")
hist(Growth.rate.examination$Growth_rate_pct_diff_2005, main = "Histogram of Growth Rate % Diff with 2005 Growth Rate")
hist(Growth.rate.examination$Growth_rate_pct_diff_2010, main = "Histogram of Growth Rate % Diff with 2010 Growth Rate")
#Histograms of each growth rate
hist(Growth.rate.examination$Population.annual.growth.rate...., main = "Histogram of Original Growth Rate")
hist(Growth.rate.examination$Growth_rate_2000, main = "Histogram of 2000 Growth Rate")
hist(Growth.rate.examination$Growth_rate_2005, main = "Histogram of 2005 Growth Rate")
hist(Growth.rate.examination$Growth_rate_2010, main = "Histogram of 2010 Growth Rate")
#Correlation between main dataset growth rate and year 2000 growth rate
cor(Growth.rate.examination$Population.annual.growth.rate...., Growth.rate.examination$Growth_rate_2010)
#The population growth rate distribution from the original dataset looks the most similar to the 2000 population growth rate.
#This makes sense and is a good sign of data validation given that other variables appear to be measures of this time period.
#***************************
#Model building - Test H0: Average daily Wine / Alcohol consumption has no impact on life expectancy.
#Model 1 - parsimonious model - life expectancy ~ wine
#Start with a simple linear regression and build up from there comparing models along the way.
wine.model.1 <- lm(Life.expectancy.at.birth..years..both.sexes ~ Wine..kcal.day., data = diet.data)
summary(wine.model.1)
plot(wine.model.1)
bptest(wine.model.1)
durbinWatsonTest(wine.model.1)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan test has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(wine.model.1, vcov = vcovHC)
#Comment on parsimonious model.
#The first model shows that wine consumption at the country level has quite a strong relationship with life expectancy.
#The coefficient estimate for wine is .195 which is statistically significant at the p < .001 level. The statistical significance of the estimate holds when heteroskedasticity robust standard errors are used.
#The wine consumption variable is measured in calories, so the interpretation of this coefficient is that one additional calorie of wine consumption per day across the population is associated with a 0.19 year increase in life expectancy.
#A glass of wine has about 120 calories so this coeficcient indicates that on average a population that drinks one additional glass of wine per day is expected to have a life expectancy of about 22.8 years greater, all else equal.
#However, this interpretation relies on the assumption that there is a linear relationship between average wine consumption and population life expectancy which may or may not be true.
#The diagnostic residuals vs. fitted values plot shows that heteroskedasticity may be a problem. Part of this result is caused by the fact that there are so many countries where average wine consumption is zero.
#As a result, we may want to use the generalized alcohol consumption variable that has fewer observations of zero.
#The Breusch pagan test confirms that heteroskedasticity of errors is borderline problematic.
#The Durbin Watson test also gives a statistically significant result which means we should reject the null hypothesis of the test that the errors are not correlated. This is a bit of a strange result that we may want to look into further.
#Our theoretical foundation could also support the use of the generalized alcohol consumption variable as the main independent variable in the model as it may be able to extend our hypothesis to cultures where wine consumption is not common, but instead other alcoholic beverages are consumed at group meals.
#Despite the statistically significant coefficient estimate, there is by no means any evidence of any casual relationship between wine consumption and life expectancy at this point.
#It is interesting to see that there is a relationship of some sort between the two variables, but this could be just a result of two variables affected caused by a third variable, or simply a phenomena due to chance, or any other reasonable scenario that can be thought up at this point.
#Model 1.1 - Sensitivity analysis - Healthy life expectancy ~ wine
#This analysis is to test if Healthy life expectancy is a proxy for Life expectancy
#There is a high correlation between Healthy life expectancy and Life expectance at birth
cor(diet.data$Healthy.life.expectancy..HALE..at.birth..years..both.sexes, diet.data$Life.expectancy.at.birth..years..both.sexes)
#Start with a simple linear regression and build up from there comparing models along the way.
wine.model.1.1 <- lm(diet.data$Healthy.life.expectancy..HALE..at.birth..years..both.sexes ~ Wine..kcal.day., data = diet.data)
summary(wine.model.1.1)
plot(wine.model.1.1)
bptest(wine.model.1.1)
durbinWatsonTest(wine.model.1.1)
#Comment on using Healthy life expectancy instead of Life expectancy
#Outcome of the analysis is very similar to Model #1. This validates the data Healty life expectancy and Life expectancy are consistent.
#Model 2 - parsimonious model using alcohol consumption- life expectancy ~ alcohol
alc.model.1 <- lm(Life.expectancy.at.birth..years..both.sexes ~ Alcoholic.Beverages..kcal.day., data = diet.data)
summary(alc.model.1)
plot(alc.model.1)
bptest(alc.model.1)
durbinWatsonTest(alc.model.1)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan test has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(alc.model.1, vcov = vcovHC)
#Comment on the second parsimonious model.
#The coefficient estimate for alcohol consumption is .065 indicating that for a country where average daily alcohol consumption across the population is 1 calorie higher is expected to have a higher life expectancy by .065 years, holding all else equal.
#This coefficient is statistically significant at p < .001 level using heteroskedasticity robust errors.
#Again, the diagnostic residuals vs. fitted values plot shows that heteroskedasticity may continue be a problem.
#The Breusch-Pagan test however yields a non-statistically significant result which means that we fail to reject the null hypothesis that the variance of the errors is stable across levels of fitted values.
#The Durbin-Watson test again shows that the errors are correlated. We should be sure to keep an eye on this after adding controls to the model.
#Model 3 - alcohol consumption with control for GNP - life expectancy ~ alcohol + GNP
alc.model.2 <- lm(Life.expectancy.at.birth..years..both.sexes ~ Alcoholic.Beverages..kcal.day. + Gross.national.income.per.capita..PPP.international..., data = diet.data)
summary(alc.model.2)
plot(alc.model.2)
bptest(alc.model.2)
durbinWatsonTest(alc.model.2)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan teset has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(alc.model.2, vcov = vcovHC)
#Comment on the model including a wealth control.
#This model drastically changes the impact of alcoholic beverage consumption on life expectancy.
#The coefficient estimate of the impact of alcoholic beverage consumption on life expectancy decreases to .006 and is no longer statistically significant.
#Heteroskedasticity of errors and correlation continue to be a problem.
#The residuals vs. fitted plot also seems to show a violation of the zero conditional mean assumption.
#The presence of heteroskedasticity of errors and a violation of the zero conditional mean assumption may indicate a non-linear relationship in the population.
#Including wealth as a control seems to have pulled away what had seemed to be a strong linear relationship between alcohol consumption and life expectancy.
#This is a reasonable result, as it seems that wealth would be a key driver for both alcohol consumption and life expectancy.
#Adding the wealth control, therefore, reveals that alcohol consumption in an of itself may not be as strongly relate with life expectancy as previously suspected.
#Model 4 - non-linear alcohol consumption with control for GNP - life expectancy ~ alcohol^2 + alcohol + GNP
alc.model.3 <- lm(Life.expectancy.at.birth..years..both.sexes ~ I(Alcoholic.Beverages..kcal.day.^2) + Alcoholic.Beverages..kcal.day. + Gross.national.income.per.capita..PPP.international..., data = diet.data)
summary(alc.model.3)
plot(alc.model.3)
bptest(alc.model.3)
durbinWatsonTest(alc.model.3)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan teset has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(alc.model.3, vcov = vcovHC)
#Comment on model including non-linear effect of alcohol consumption
#Including a non-linear effect of alcohol consumption in the model does not improve the problems with heteroskedasticity and correlation of errors.
#The alcoholic beverage consumption coefficient estimates are still not significant.
#The residuals vs. fitted values plot shows heteroskedasticity of errors and the Breusch-Pagan test confirms the errors are heteroskedastic.
#Therefore, we need to be sure to use heteroskedasticity robust standard errors to assess statistical significance of the coefficient estimates in the model.
#The Durbin-Watson test shows that correlation remains a problem
##NOTE FOR NEXT TEAM MEMBER TO PICK UP ANALYSIS - WHAT TO DO ABOUT CORRELATION.
#Model 5 - log transformation of alcohol consumption with control for GNP - life expectancy ~ log(alcohol) + GNP
#First, remove observations of zero alcoholic beverage consumption so can implement a log transformation.
diet.data.2 <- diet.data[diet.data$Alcoholic.Beverages..kcal.day. > 0, ]
#Estimate the model
alc.model.4 <- lm(Life.expectancy.at.birth..years..both.sexes ~ log(Alcoholic.Beverages..kcal.day.) + Gross.national.income.per.capita..PPP.international..., data = diet.data.2)
summary(alc.model.4)
plot(alc.model.4)
bptest(alc.model.4)
durbinWatsonTest(alc.model.4)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan teset has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(alc.model.4, vcov = vcovHC)
#Comment on the model
#Including a log transformation for alcoholic beverage consumption does not fix the problem of heteroskedasticity of errors as evidenced by the residuals vs. fitted values plot and the Breusch Pagan test.
#The Durbin Watson test also shows that correlation of errors remains a problem.
#Model 6 - log transformation of alcohol consumption with control for log transformation of GNP - life expectancy ~ log(alcohol) + log(GNP)
#Estimate the model
alc.model.5 <- lm(Life.expectancy.at.birth..years..both.sexes ~ log(Alcoholic.Beverages..kcal.day.) + log(Gross.national.income.per.capita..PPP.international...), data = diet.data.2)
summary(alc.model.5)
plot(alc.model.5)
bptest(alc.model.5)
durbinWatsonTest(alc.model.5)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan teset has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(alc.model.5, vcov = vcovHC)
#Comment on the model.
#Using a log transformation on GNP and alcohol consumption makes sense because each of these variables is positively skewed.
#After making these transformations, the Durbin-Watson test shows that correlated errors seems to have been solved.
#The residuals vs. fitted values plot shows that heteroskedasticity of errors continues to be a problem. The Breusch-Pagan test confirms this result.
#Using heteroskedasticity robust standard errors, the coefficients are both statistically significant.
#The coefficient estimate on log(alcohol consumption) is -1.271 which is statistically significant (p = .012 using heteroskedasticity robust errors).
#The interpretation of this coefficient is that a one percent increase in alcohol consumption corresponds with a decrease of 1.271 years in life expectancy while holding GNP equal.
#While this model outputs a statistically significant coefficient estimate of alcohol consumption, now its relationship with life expectancy is reversed, making it fairly suspect that there is a real meaningful relationship between the two variables.
#In contrast, the wealth control, the GNP, still retained a similar relationship with life expectancy, which is consistent with its coefficient estimates in previous models.
#Conclusion
#TBD
# Pros
# TBD
#Cons:
# Data Collection Constraints:
#. Health expectancy estimates based on selfreported health status information are generally not comparable across countries due to differences in survey instruments
#and cultural differences in reporting of health.
#. Comparability problems with self-report health status data relate not only to differences in survey design and methods, but more fundamentally to
#unmeasured differences in expectations and norms for health references
#. The meaning that different populations attach to the labels used for each of the response categories, such as mild, moderate or severe, in self-reported questions can vary greatly.
#. Calculation of healthy life expectancy at birth is based on age-specific death rates for a particular calendar period together with severity-adjusted health state prevalences by age.
# Data Collection Constraint Mitigation:
#. To mitigate the risk, data is validated against another datasource (http://data.worldbank.org/data-catalog/health-nutrition-and-population-statistics). Analysis is outlined in Data Validation Section above.
# Control Variables:
#. We expect positive linear relationship between wine consumption and life expectancy only to a certain extent, beyond that there will be other negative implications.
#We need a control variable to balance that out. For example, a variable that captures negative impact on life expectancy when more calories are consumed.
#create interesting subset to data
DavidSubset = diet.data[, c("Countries", "Alcoholic.Beverages..kcal.day.", "Gross.national.income.per.capita..PPP.international...", "Life.expectancy.at.birth..years..both.sexes", "Systolic.blood.pressure..adults.aged.15.and.above..men..mmHg.", "Obesity.prevalence..men....", "Mean.total.cholesterol..men..mg.dl...2005")]
summary(DavidSubset)
colnames(DavidSubset) = c("Countries", "alcohol_consumption", "GNP_capita", "life_expectancy", "blood_pressure", "obesity_pcnt", "cholesterol_mean_total")
hist(DavidSubset$cholesterol_mean_total, breaks=30)
hist(DavidSubset$blood_pressure, breaks=30)
hist(DavidSubset$obesity_pcnt, breaks=30)
cor(DavidSubset$alcohol_consumption, DavidSubset$GNP_capita)
alcohol.gnp.scatter <- ggplot(data = DavidSubset, aes(x = GNP_capita, y = alcohol_consumption))
alcohol.gnp.scatter + geom_point(colour = "navy") + ggtitle("Scatterplot of GNP and Alcohol Consumption per Day")
DavidSubset
# added a few health indicator variables
davidmodel = lm(life_expectancy ~ GNP_capita + blood_pressure + obesity_pcnt + alcohol_consumption + cholesterol_mean_total, data = DavidSubset)
summary(davidmodel)
# Alcohol is strongly linked to GNP but strongly not linked to life expectancy... which is interesting.
# Alcohol consumption may be a good proxy for being a wealthy country, thus it is a good indication of healthy life or life expectancy.
# This is maybe similar to the idea of looking at the size of a country's entertainment industry as a proxy for its health/success/happiness.
| /R Scripts/w271 Final Project_v1.1.R | no_license | dskarbrevik/Geographic-Regression-Analysis | R | false | false | 27,374 | r | #Name: Data exploration for w271 Final Project.
#Date: November 2, 2016
#Author: Nick Chen, Johnny Yeo, Rama Thamman, David Skarbrevik
library(xlsx)
library(ggplot2)
library(lmtest)
library(car)
library(sandwich)
#Research Question:
# Do people who live in cultures that drink more wine and eat more dessert live longer lives on average?
# Perhaps a better question: Is alcohol consumption a valid proxy/indirect indicator of health in a country?
# Justification of using our dataset: Because our data has many indicators of health (GNP, life expectancy, obesity, calorie intake, etc.) as well as data on the amount of aclohol consumption of many countries, this is a great dataset to test if alcohol consumption is a valid indirect proxy for health in the way that GNP is.
### Dataset Used: World Nutrition Data
#. Dataset has one "observation per country, but more than 50 countries
#. We don't have a lot of information on how this data was acquired, but a way to validate it is by crosschecking with another more documented dataset
#(http://data.worldbank.org/datacatalog/Healthnutritionandpopulationstatistics).
#. Check that total calorie consumption is greater than or equal to the sum of calories in all of the individual food categories.
##Motivation/Thoughts about this research:
#. Some research has seemed to show that moderate wine consumption seems to be associated with a lower incidence of heart disease and potentially
#longer life spans (http://www.telegraph.co.uk/news/health/11066516/Glassofwinewithdinnerhelpsyoulivelongerscientistsclaim.html).
#. Critiques of this research are that the wine's effect on health outcomes is not a causal effect, but instead that the true underlying effect is more related to stronger social ties and wine consumption is simply associated with having a strong social network and taking more meals with family and friends.
#. The idea behind the main research question is to investigate the idea that cultures where people take more meals with family and friends have better health outcomes.
#. We will use wine consumption and potentially sugar consumption in some form to serve as proxy variables for eating more family meals or meals with friends.
#. The idea behind this is that you are more likely to drink wine or eat dessert with your meal when you are eating with friends or family.
#. Other research has indicated that strong social networks are an important factor in living a healthy life (http://uncnews.unc.edu/2016/01/04/socialnetworksasimportantasexerciseanddietacrossthespanofourlives/).
##Exploratory Data Analysis, Domain Research, and Potential Model
#. Look at correlations between wine / alcohol and sugar consumption and life expectancy
#. Check that the data makes sense
#. Will want to consider the possibility of nonlinear effects of wine / alcohol and sugar consumption on life expectancy at birth
#. Control for obesity prevalence
#. Consider interaction term between wine / alcohol consumption and sugar consumption
#. Perhaps one or the other could have a negative effect individually, but moderate consumption of both may be an even better proxy for taking more meals with family and friends than moderate consumption of one or the other
#Load the data
#setwd('~/Desktop/UC Berkeley/Applied Regression and Time Series Analysis/Lab 3/Health and Diet Data/')
#setwd('C:/Users/rthamman/Dropbox (Personal)/Berkeley/Courses/W271/Labs/Lab 3/Git/Data')
#setwd('~/Documents/MIDS/w271/w271_final_proj/W271_Lab3/Data')
#getwd()
diet.data <- read.csv("diet-forcsv - Sheet 1.csv")
data.validation.country.mapping <- read.xlsx("Data from Third Parties for Validation.xlsx", sheetName = "Country_Mapping")
data.validation.life_expect <- read.xlsx("Data from Third Parties for Validation.xlsx", sheetName = "Life Expectancy")
data.validation.growth_rate <- read.xlsx("Data from Third Parties for Validation.xlsx", sheetName = "Population Growth Rate")
#*************************************
#Missing values check
na.check = sapply(diet.data, function(x) sum(is.na(x))) # check specifically for NA values
if(sum(na.check) == 0)
{
cat("No NA values in this data.")
} else {
na.check
cat("There are a total of", sum(na.check), "NAs in the data.")
}
cat("Number of rows: ",nrow(diet.data))
cat("Number of complete cases: ",nrow(diet.data[complete.cases(diet.data),]))
#There are no missing values
#Univariate EDA
#Wine consumption
#Summary statistics for variables of interest
summary(diet.data$Wine..kcal.day.)
sum(diet.data$Wine..kcal.day. == 0)
#There are 32 countries with zero wine consumption. This could be because of bottom coding to cover for null values.
wine.hist <- ggplot(data = diet.data, aes(x = Wine..kcal.day.))
wine.hist + geom_histogram(fill = "navy", colour = "white") + ggtitle("Histogram of Wine Calories per Day") + labs(y = "Number of Countries")
#life expectancy
summary(diet.data$Life.expectancy.at.birth..years..both.sexes)
#The life expectancy variable shows a negative skew (because no one lives to be 160).
life.expect.all.hist <- ggplot(data = diet.data, aes(x = Life.expectancy.at.birth..years..both.sexes))
life.expect.all.hist + geom_histogram(fill = "navy", colour = "white") + ggtitle("Histogram of Life Expectancy at Birth") + labs(y = "Number of Countries")
#Alcoholic beverages calories per day
summary(diet.data$Alcoholic.Beverages..kcal.day.)
sum(diet.data$Alcoholic.Beverages..kcal.day. == 0)
#Like wine, there are a lot of countries with zero or very little consumption of alcoholic beverages.
Alcoholic.bevs.cals.hist <- ggplot(data = diet.data, aes(x = Alcoholic.Beverages..kcal.day.))
Alcoholic.bevs.cals.hist + geom_histogram(fill = "navy", colour = "white") + ggtitle("Histogram of Alchoholic Beverages Calories per Day") + labs(y = "Number of Countries")
#GNP per capita
summary(diet.data$Gross.national.income.per.capita..PPP.international...)
#GNP histogram
GNP.hist <- ggplot(data = diet.data, aes(x = Gross.national.income.per.capita..PPP.international...))
GNP.hist + geom_histogram(fill = "navy", colour = "white") + ggtitle("Histogram of GNP") + labs(y = "Number of Countries")
#*************************************
#Multivariate EDA
#Correlations
#correlation between wine consumption and alcoholic beverage consumption per day and life expectancy at birth is quite large at .496.
cor(diet.data$Wine..kcal.day., diet.data$Life.expectancy.at.birth..years..both.sexes)
cor(diet.data$Alcoholic.Beverages..kcal.day., diet.data$Life.expectancy.at.birth..years..both.sexes)
#look at correlation between wine / alcohol consumption and GNP to see if the above result appears to be a result of income
#There are very high correlations between wine and alcohol consumption with GNP, both being above 0.6.
cor(diet.data$Gross.national.income.per.capita..PPP.international..., diet.data$Wine..kcal.day.)
cor(diet.data$Gross.national.income.per.capita..PPP.international..., diet.data$Alcoholic.Beverages..kcal.day.)
#diet.data$Alcoholic.Beverages..kcal.day. > 0
wine.gnp.scatter <- ggplot(data = diet.data, aes(x = Gross.national.income.per.capita..PPP.international..., y = Wine..kcal.day.))
wine.gnp.scatter + geom_point(colour = "navy") + ggtitle("Scatterplot of GNP and Wine Consumption per Day")
#further analysis of correlation between wine / alcohol consumption and life expectancy at birth
i = 52
wine.box <- boxplot(diet.data[i], main = "Boxplot of Wine Consumtion (kcal/day)")
df <- cbind(diet.data[i], diet.data$Countries, diet.data$Life.expectancy.at.birth..years..both.sexes)
names(df) <- c("wine_consumption", "countries", "life_expectancy")
ordered_df <- df[order(df[1]),]
ordered_df[ordered_df$wine_consumption > wine.box$stats[5],]
#Given the boxplot, these are the countries with "outlier-level" wine consumption, and their life expectancy.
#Every country with high wine consumption has a life expectancy of over 70.
#It is important to also notice, however, that all of these countries (minus Argentina) are a part of Europe,
#where wine consumption is on average higher than the rest of the world.
#Given these results, despite the high correlation, it's hard to tell whether we see any good indication that greater wine consumption leads to longer life.
#*************************************
#Data validation
#Merge country code with validation datasets.
data.validation.growth_rate <- merge(data.validation.growth_rate, data.validation.country.mapping[,c("Population.Growth.Rate", "Country_main_dataset")], by.x = "Country.Name", by.y = "Population.Growth.Rate")
data.validation.life_expect <- merge(data.validation.life_expect, data.validation.country.mapping[,c("Country_Life_Expectancy", "Country_main_dataset")], by.x = "Country", by.y = "Country_Life_Expectancy")
#Merge validating data into the main country dataset.
diet.data <- merge(diet.data, data.validation.growth_rate[,c("Country_main_dataset", "Growth_rate_2000", "Growth_rate_2005", "Growth_rate_2010")], by.x = "Countries", by.y = "Country_main_dataset")
diet.data <- merge(diet.data, data.validation.life_expect[,c("Country_main_dataset", "Life_Expectancy")], by.x = "Countries", by.y = "Country_main_dataset")
#Now compare data validation sources to main dataset
#Life expectancy
diet.data$Life_Expectancy_pct_diff <- (diet.data$Life.expectancy.at.birth..years..both.sexes - diet.data$Life_Expectancy) / diet.data$Life.expectancy.at.birth..years..both.sexes
summary(diet.data$Life_Expectancy_pct_diff)
hist(diet.data$Life_Expectancy, main = "Data Validation Life Expectancy Distribution")
hist(diet.data$Life.expectancy.at.birth..years..both.sexes, main = "Data Validation Original Life Expectancy Distribution")
hist(diet.data$Life_Expectancy_pct_diff, main = "Percent Difference Life Expectancy")
#Life expectancy in the original dataset appears to be systematically lower than the 2016 life expectancies downloaded from the CIA factbook.
#This makes sense given that we believe the life expectancy in the original data to be from an earlier period, likely 2000 - 2005 based on the other variables, and that we expect life expectancy to increase over time.
Growth.rate.examination <- diet.data[,c("Countries", "Growth_rate_2000", "Growth_rate_2005", "Growth_rate_2010", "Population.annual.growth.rate....")]
Growth.rate.examination$Growth_rate_pct_diff_2000 <- (Growth.rate.examination$Population.annual.growth.rate.... - Growth.rate.examination$Growth_rate_2000) / Growth.rate.examination$Population.annual.growth.rate....
Growth.rate.examination$Growth_rate_pct_diff_2005 <- (Growth.rate.examination$Population.annual.growth.rate.... - Growth.rate.examination$Growth_rate_2005) / Growth.rate.examination$Population.annual.growth.rate....
Growth.rate.examination$Growth_rate_pct_diff_2010 <- (Growth.rate.examination$Population.annual.growth.rate.... - Growth.rate.examination$Growth_rate_2010) / Growth.rate.examination$Population.annual.growth.rate....
#Summary statistics of each growth rate
summary(Growth.rate.examination$Population.annual.growth.rate....)
summary(Growth.rate.examination$Growth_rate_2000)
summary(Growth.rate.examination$Growth_rate_2005)
summary(Growth.rate.examination$Growth_rate_2010)
#Histograms of percent difference with each known year growth rate
summary(Growth.rate.examination$Growth_rate_pct_diff_2000)
hist(Growth.rate.examination$Growth_rate_pct_diff_2000, main = "Histogram of Growth Rate % Diff with 2000 Growth Rate")
hist(Growth.rate.examination$Growth_rate_pct_diff_2005, main = "Histogram of Growth Rate % Diff with 2005 Growth Rate")
hist(Growth.rate.examination$Growth_rate_pct_diff_2010, main = "Histogram of Growth Rate % Diff with 2010 Growth Rate")
#Histograms of each growth rate
hist(Growth.rate.examination$Population.annual.growth.rate...., main = "Histogram of Original Growth Rate")
hist(Growth.rate.examination$Growth_rate_2000, main = "Histogram of 2000 Growth Rate")
hist(Growth.rate.examination$Growth_rate_2005, main = "Histogram of 2005 Growth Rate")
hist(Growth.rate.examination$Growth_rate_2010, main = "Histogram of 2010 Growth Rate")
#Correlation between main dataset growth rate and year 2000 growth rate
cor(Growth.rate.examination$Population.annual.growth.rate...., Growth.rate.examination$Growth_rate_2010)
#The population growth rate distribution from the original dataset looks the most similar to the 2000 population growth rate.
#This makes sense and is a good sign of data validation given that other variables appear to be measures of this time period.
#***************************
#Model building - Test H0: Average daily Wine / Alcohol consumption has no impact on life expectancy.
#Model 1 - parsimonious model - life expectancy ~ wine
#Start with a simple linear regression and build up from there comparing models along the way.
wine.model.1 <- lm(Life.expectancy.at.birth..years..both.sexes ~ Wine..kcal.day., data = diet.data)
summary(wine.model.1)
plot(wine.model.1)
bptest(wine.model.1)
durbinWatsonTest(wine.model.1)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan test has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(wine.model.1, vcov = vcovHC)
#Comment on parsimonious model.
#The first model shows that wine consumption at the country level has quite a strong relationship with life expectancy.
#The coefficient estimate for wine is .195 which is statistically significant at the p < .001 level. The statistical significance of the estimate holds when heteroskedasticity robust standard errors are used.
#The wine consumption variable is measured in calories, so the interpretation of this coefficient is that one additional calorie of wine consumption per day across the population is associated with a 0.19 year increase in life expectancy.
#A glass of wine has about 120 calories so this coeficcient indicates that on average a population that drinks one additional glass of wine per day is expected to have a life expectancy of about 22.8 years greater, all else equal.
#However, this interpretation relies on the assumption that there is a linear relationship between average wine consumption and population life expectancy which may or may not be true.
#The diagnostic residuals vs. fitted values plot shows that heteroskedasticity may be a problem. Part of this result is caused by the fact that there are so many countries where average wine consumption is zero.
#As a result, we may want to use the generalized alcohol consumption variable that has fewer observations of zero.
#The Breusch pagan test confirms that heteroskedasticity of errors is borderline problematic.
#The Durbin Watson test also gives a statistically significant result which means we should reject the null hypothesis of the test that the errors are not correlated. This is a bit of a strange result that we may want to look into further.
#Our theoretical foundation could also support the use of the generalized alcohol consumption variable as the main independent variable in the model as it may be able to extend our hypothesis to cultures where wine consumption is not common, but instead other alcoholic beverages are consumed at group meals.
#Despite the statistically significant coefficient estimate, there is by no means any evidence of any casual relationship between wine consumption and life expectancy at this point.
#It is interesting to see that there is a relationship of some sort between the two variables, but this could be just a result of two variables affected caused by a third variable, or simply a phenomena due to chance, or any other reasonable scenario that can be thought up at this point.
#Model 1.1 - Sensitivity analysis - Healthy life expectancy ~ wine
#This analysis is to test if Healthy life expectancy is a proxy for Life expectancy
#There is a high correlation between Healthy life expectancy and Life expectance at birth
cor(diet.data$Healthy.life.expectancy..HALE..at.birth..years..both.sexes, diet.data$Life.expectancy.at.birth..years..both.sexes)
#Start with a simple linear regression and build up from there comparing models along the way.
wine.model.1.1 <- lm(diet.data$Healthy.life.expectancy..HALE..at.birth..years..both.sexes ~ Wine..kcal.day., data = diet.data)
summary(wine.model.1.1)
plot(wine.model.1.1)
bptest(wine.model.1.1)
durbinWatsonTest(wine.model.1.1)
#Comment on using Healthy life expectancy instead of Life expectancy
#Outcome of the analysis is very similar to Model #1. This validates the data Healty life expectancy and Life expectancy are consistent.
#Model 2 - parsimonious model using alcohol consumption- life expectancy ~ alcohol
alc.model.1 <- lm(Life.expectancy.at.birth..years..both.sexes ~ Alcoholic.Beverages..kcal.day., data = diet.data)
summary(alc.model.1)
plot(alc.model.1)
bptest(alc.model.1)
durbinWatsonTest(alc.model.1)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan test has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(alc.model.1, vcov = vcovHC)
#Comment on the second parsimonious model.
#The coefficient estimate for alcohol consumption is .065 indicating that for a country where average daily alcohol consumption across the population is 1 calorie higher is expected to have a higher life expectancy by .065 years, holding all else equal.
#This coefficient is statistically significant at p < .001 level using heteroskedasticity robust errors.
#Again, the diagnostic residuals vs. fitted values plot shows that heteroskedasticity may continue be a problem.
#The Breusch-Pagan test however yields a non-statistically significant result which means that we fail to reject the null hypothesis that the variance of the errors is stable across levels of fitted values.
#The Durbin-Watson test again shows that the errors are correlated. We should be sure to keep an eye on this after adding controls to the model.
#Model 3 - alcohol consumption with control for GNP - life expectancy ~ alcohol + GNP
alc.model.2 <- lm(Life.expectancy.at.birth..years..both.sexes ~ Alcoholic.Beverages..kcal.day. + Gross.national.income.per.capita..PPP.international..., data = diet.data)
summary(alc.model.2)
plot(alc.model.2)
bptest(alc.model.2)
durbinWatsonTest(alc.model.2)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan teset has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(alc.model.2, vcov = vcovHC)
#Comment on the model including a wealth control.
#This model drastically changes the impact of alcoholic beverage consumption on life expectancy.
#The coefficient estimate of the impact of alcoholic beverage consumption on life expectancy decreases to .006 and is no longer statistically significant.
#Heteroskedasticity of errors and correlation continue to be a problem.
#The residuals vs. fitted plot also seems to show a violation of the zero conditional mean assumption.
#The presence of heteroskedasticity of errors and a violation of the zero conditional mean assumption may indicate a non-linear relationship in the population.
#Including wealth as a control seems to have pulled away what had seemed to be a strong linear relationship between alcohol consumption and life expectancy.
#This is a reasonable result, as it seems that wealth would be a key driver for both alcohol consumption and life expectancy.
#Adding the wealth control, therefore, reveals that alcohol consumption in an of itself may not be as strongly relate with life expectancy as previously suspected.
#Model 4 - non-linear alcohol consumption with control for GNP - life expectancy ~ alcohol^2 + alcohol + GNP
alc.model.3 <- lm(Life.expectancy.at.birth..years..both.sexes ~ I(Alcoholic.Beverages..kcal.day.^2) + Alcoholic.Beverages..kcal.day. + Gross.national.income.per.capita..PPP.international..., data = diet.data)
summary(alc.model.3)
plot(alc.model.3)
bptest(alc.model.3)
durbinWatsonTest(alc.model.3)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan teset has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(alc.model.3, vcov = vcovHC)
#Comment on model including non-linear effect of alcohol consumption
#Including a non-linear effect of alcohol consumption in the model does not improve the problems with heteroskedasticity and correlation of errors.
#The alcoholic beverage consumption coefficient estimates are still not significant.
#The residuals vs. fitted values plot shows heteroskedasticity of errors and the Breusch-Pagan test confirms the errors are heteroskedastic.
#Therefore, we need to be sure to use heteroskedasticity robust standard errors to assess statistical significance of the coefficient estimates in the model.
#The Durbin-Watson test shows that correlation remains a problem
##NOTE FOR NEXT TEAM MEMBER TO PICK UP ANALYSIS - WHAT TO DO ABOUT CORRELATION.
#Model 5 - log transformation of alcohol consumption with control for GNP - life expectancy ~ log(alcohol) + GNP
#First, remove observations of zero alcoholic beverage consumption so can implement a log transformation.
diet.data.2 <- diet.data[diet.data$Alcoholic.Beverages..kcal.day. > 0, ]
#Estimate the model
alc.model.4 <- lm(Life.expectancy.at.birth..years..both.sexes ~ log(Alcoholic.Beverages..kcal.day.) + Gross.national.income.per.capita..PPP.international..., data = diet.data.2)
summary(alc.model.4)
plot(alc.model.4)
bptest(alc.model.4)
durbinWatsonTest(alc.model.4)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan teset has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(alc.model.4, vcov = vcovHC)
#Comment on the model
#Including a log transformation for alcoholic beverage consumption does not fix the problem of heteroskedasticity of errors as evidenced by the residuals vs. fitted values plot and the Breusch Pagan test.
#The Durbin Watson test also shows that correlation of errors remains a problem.
#Model 6 - log transformation of alcohol consumption with control for log transformation of GNP - life expectancy ~ log(alcohol) + log(GNP)
#Estimate the model
alc.model.5 <- lm(Life.expectancy.at.birth..years..both.sexes ~ log(Alcoholic.Beverages..kcal.day.) + log(Gross.national.income.per.capita..PPP.international...), data = diet.data.2)
summary(alc.model.5)
plot(alc.model.5)
bptest(alc.model.5)
durbinWatsonTest(alc.model.5)
#Look at coefficient estimates with heteroskedasticity robust standard errors because the Breusch-Pagan teset has a marginally significant result suggesting that heteroskedasticity of errors may be a problem..
coeftest(alc.model.5, vcov = vcovHC)
#Comment on the model.
#Using a log transformation on GNP and alcohol consumption makes sense because each of these variables is positively skewed.
#After making these transformations, the Durbin-Watson test shows that correlated errors seems to have been solved.
#The residuals vs. fitted values plot shows that heteroskedasticity of errors continues to be a problem. The Breusch-Pagan test confirms this result.
#Using heteroskedasticity robust standard errors, the coefficients are both statistically significant.
#The coefficient estimate on log(alcohol consumption) is -1.271 which is statistically significant (p = .012 using heteroskedasticity robust errors).
#The interpretation of this coefficient is that a one percent increase in alcohol consumption corresponds with a decrease of 1.271 years in life expectancy while holding GNP equal.
#While this model outputs a statistically significant coefficient estimate of alcohol consumption, now its relationship with life expectancy is reversed, making it fairly suspect that there is a real meaningful relationship between the two variables.
#In contrast, the wealth control, the GNP, still retained a similar relationship with life expectancy, which is consistent with its coefficient estimates in previous models.
#Conclusion
#TBD
# Pros
# TBD
#Cons:
# Data Collection Constraints:
#. Health expectancy estimates based on selfreported health status information are generally not comparable across countries due to differences in survey instruments
#and cultural differences in reporting of health.
#. Comparability problems with self-report health status data relate not only to differences in survey design and methods, but more fundamentally to
#unmeasured differences in expectations and norms for health references
#. The meaning that different populations attach to the labels used for each of the response categories, such as mild, moderate or severe, in self-reported questions can vary greatly.
#. Calculation of healthy life expectancy at birth is based on age-specific death rates for a particular calendar period together with severity-adjusted health state prevalences by age.
# Data Collection Constraint Mitigation:
#. To mitigate the risk, data is validated against another datasource (http://data.worldbank.org/data-catalog/health-nutrition-and-population-statistics). Analysis is outlined in Data Validation Section above.
# Control Variables:
#. We expect positive linear relationship between wine consumption and life expectancy only to a certain extent, beyond that there will be other negative implications.
#We need a control variable to balance that out. For example, a variable that captures negative impact on life expectancy when more calories are consumed.
#create interesting subset to data
DavidSubset = diet.data[, c("Countries", "Alcoholic.Beverages..kcal.day.", "Gross.national.income.per.capita..PPP.international...", "Life.expectancy.at.birth..years..both.sexes", "Systolic.blood.pressure..adults.aged.15.and.above..men..mmHg.", "Obesity.prevalence..men....", "Mean.total.cholesterol..men..mg.dl...2005")]
summary(DavidSubset)
colnames(DavidSubset) = c("Countries", "alcohol_consumption", "GNP_capita", "life_expectancy", "blood_pressure", "obesity_pcnt", "cholesterol_mean_total")
hist(DavidSubset$cholesterol_mean_total, breaks=30)
hist(DavidSubset$blood_pressure, breaks=30)
hist(DavidSubset$obesity_pcnt, breaks=30)
cor(DavidSubset$alcohol_consumption, DavidSubset$GNP_capita)
alcohol.gnp.scatter <- ggplot(data = DavidSubset, aes(x = GNP_capita, y = alcohol_consumption))
alcohol.gnp.scatter + geom_point(colour = "navy") + ggtitle("Scatterplot of GNP and Alcohol Consumption per Day")
DavidSubset
# added a few health indicator variables
davidmodel = lm(life_expectancy ~ GNP_capita + blood_pressure + obesity_pcnt + alcohol_consumption + cholesterol_mean_total, data = DavidSubset)
summary(davidmodel)
# Alcohol is strongly linked to GNP but strongly not linked to life expectancy... which is interesting.
# Alcohol consumption may be a good proxy for being a wealthy country, thus it is a good indication of healthy life or life expectancy.
# This is maybe similar to the idea of looking at the size of a country's entertainment industry as a proxy for its health/success/happiness.
|
massage.labels = function(gsd.labels,labels.table="massage_labels.txt",final.cleanup=TRUE){
# Read in an Excel speadsheet with a gsd_label column that indicates the label to change
# to the pretty_label column
# gsd.labels = names(gsd.df)
gsd.labels = as.character(gsd.labels)
if(grepl("xls",labels.table)){
# library(xlsx)
# massage.labels.df = read.xlsx("massage_labels.xlsx",sheetIndex=1)
library(XLConnect)
massage_labels.wb = loadWorkbook("massage_labels.xlsx")
massage_labels.df = readWorksheet(massage_labels.wb,1)
massage.labels.df$gsd_label[is.na(massage.labels.df$gsd_label)]=""
massage.labels.df$pretty_label[is.na(massage.labels.df$pretty_label)]=""
massage.labels.df$type[is.na(massage.labels.df$type)]="gsub"
}else{
massage.labels.df = read.table("massage_labels.txt",as.is=T,sep="\t",header=T,stringsAsFactors=F,allowEscapes=T)
}
multiline = length(grep("\n",gsd.labels))!=0
if(multiline){
line2 = do.call("rbind",strsplit(gsd.labels,"\n"))[,2]
gsd.labels = do.call("rbind",strsplit(gsd.labels,"\n"))[,1]
}
for(i in 1:nrow(massage.labels.df)){
if(massage.labels.df$type[i]=="replace"){
labels.ind = regexpr(massage.labels.df$gsd_label[i],gsd.labels)>0
gsd.labels[labels.ind] = massage.labels.df$pretty_label[i]
}else if(massage.labels.df$type[i]=="gsub"){
# i = 1
gsd.labels = try(gsub(massage.labels.df$gsd_label[i],massage.labels.df$pretty_label[i],gsd.labels,fixed=TRUE))
if(inherits(gsd.labels,"try-error")) browser()
}
}
if(final.cleanup){
# gsd.labels = gsub("([[:upper:]])"," \\1",gsd.labels,perl=TRUE)
gsd.labels = gsub("\\["," \\[",gsd.labels)
gsd.labels = gsub("\\\\","/",gsd.labels)
gsd.labels = gsub("//","/",gsd.labels)
gsd.labels = gsub("`","",gsd.labels)
gsd.labels = gsub("^ ","",gsd.labels)
gsd.labels = gsub(" "," ",gsd.labels)
}
if(multiline){
gsd.labels = paste(gsd.labels,line2,sep="\n")
}
return(gsd.labels)
}
| /R/massage_labels.R | no_license | tbstockton/gisdtR | R | false | false | 2,012 | r | massage.labels = function(gsd.labels,labels.table="massage_labels.txt",final.cleanup=TRUE){
# Read in an Excel speadsheet with a gsd_label column that indicates the label to change
# to the pretty_label column
# gsd.labels = names(gsd.df)
gsd.labels = as.character(gsd.labels)
if(grepl("xls",labels.table)){
# library(xlsx)
# massage.labels.df = read.xlsx("massage_labels.xlsx",sheetIndex=1)
library(XLConnect)
massage_labels.wb = loadWorkbook("massage_labels.xlsx")
massage_labels.df = readWorksheet(massage_labels.wb,1)
massage.labels.df$gsd_label[is.na(massage.labels.df$gsd_label)]=""
massage.labels.df$pretty_label[is.na(massage.labels.df$pretty_label)]=""
massage.labels.df$type[is.na(massage.labels.df$type)]="gsub"
}else{
massage.labels.df = read.table("massage_labels.txt",as.is=T,sep="\t",header=T,stringsAsFactors=F,allowEscapes=T)
}
multiline = length(grep("\n",gsd.labels))!=0
if(multiline){
line2 = do.call("rbind",strsplit(gsd.labels,"\n"))[,2]
gsd.labels = do.call("rbind",strsplit(gsd.labels,"\n"))[,1]
}
for(i in 1:nrow(massage.labels.df)){
if(massage.labels.df$type[i]=="replace"){
labels.ind = regexpr(massage.labels.df$gsd_label[i],gsd.labels)>0
gsd.labels[labels.ind] = massage.labels.df$pretty_label[i]
}else if(massage.labels.df$type[i]=="gsub"){
# i = 1
gsd.labels = try(gsub(massage.labels.df$gsd_label[i],massage.labels.df$pretty_label[i],gsd.labels,fixed=TRUE))
if(inherits(gsd.labels,"try-error")) browser()
}
}
if(final.cleanup){
# gsd.labels = gsub("([[:upper:]])"," \\1",gsd.labels,perl=TRUE)
gsd.labels = gsub("\\["," \\[",gsd.labels)
gsd.labels = gsub("\\\\","/",gsd.labels)
gsd.labels = gsub("//","/",gsd.labels)
gsd.labels = gsub("`","",gsd.labels)
gsd.labels = gsub("^ ","",gsd.labels)
gsd.labels = gsub(" "," ",gsd.labels)
}
if(multiline){
gsd.labels = paste(gsd.labels,line2,sep="\n")
}
return(gsd.labels)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pwOmics_vizualisation_functions.R
\name{plotConsensusProfiles}
\alias{plotConsensusProfiles}
\title{Plot consensus graph profiles of static consensus molecules.}
\usage{
plotConsensusProfiles(consensusGraphs, data_omics, subsel = TRUE, ...)
}
\arguments{
\item{consensusGraphs}{result from static analysis: consensus graph generated
by staticConsensusNet function.}
\item{data_omics}{OmicsData object.}
\item{subsel}{character vector of selected consensus molecules for plotting;
if TRUE all consensus molecules are plotted}
\item{...}{further plotting/legend parameters.}
}
\value{
pdf file in current working directory.
}
\description{
Consensus graph profiles of static consensus molecules
plotted as heatmap to pdf file stored in current working directory.
}
\examples{
\dontrun{
data(OmicsExampleData)
data_omics = readOmics(tp_prots = c(0.25, 1, 4, 8, 13, 18, 24),
tp_genes = c(1, 4, 8, 13, 18, 24), OmicsExampleData,
PWdatabase = c("biocarta", "kegg", "nci", "reactome"),
TFtargetdatabase = c("userspec"))
data_omics = readPhosphodata(data_omics,
phosphoreg = system.file("extdata", "phospho_reg_table.txt",
package = "pwOmics.newupdown"))
data_omics = readTFdata(data_omics,
TF_target_path = system.file("extdata", "TF_targets.txt",
package = "pwOmics.newupdown"))
data_omics_plus = readPWdata(data_omics,
loadgenelists = system.file("extdata/Genelists", package = "pwOmics.newupdown"))
}
\dontrun{
data_omics_plus = identifyPR(data_omics_plus)
setwd(system.file("extdata/Genelists", package = "pwOmics.newupdown"))
data_omics = identifyPWs(data_omics_plus)
data_omics = identifyTFs(data_omics)
data_omics = identifyRsofTFs(data_omics,
noTFs_inPW = 1, order_neighbors = 10)
data_omics = identifyPWTFTGs(data_omics)
statConsNet = staticConsensusNet(data_omics)
plotConsensusProfiles(statConsNet, data_omics, subsel = TRUE)
}
}
\keyword{manip}
| /man/plotConsensusProfiles.Rd | no_license | MarenS2/pwOmics | R | false | true | 1,946 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pwOmics_vizualisation_functions.R
\name{plotConsensusProfiles}
\alias{plotConsensusProfiles}
\title{Plot consensus graph profiles of static consensus molecules.}
\usage{
plotConsensusProfiles(consensusGraphs, data_omics, subsel = TRUE, ...)
}
\arguments{
\item{consensusGraphs}{result from static analysis: consensus graph generated
by staticConsensusNet function.}
\item{data_omics}{OmicsData object.}
\item{subsel}{character vector of selected consensus molecules for plotting;
if TRUE all consensus molecules are plotted}
\item{...}{further plotting/legend parameters.}
}
\value{
pdf file in current working directory.
}
\description{
Consensus graph profiles of static consensus molecules
plotted as heatmap to pdf file stored in current working directory.
}
\examples{
\dontrun{
data(OmicsExampleData)
data_omics = readOmics(tp_prots = c(0.25, 1, 4, 8, 13, 18, 24),
tp_genes = c(1, 4, 8, 13, 18, 24), OmicsExampleData,
PWdatabase = c("biocarta", "kegg", "nci", "reactome"),
TFtargetdatabase = c("userspec"))
data_omics = readPhosphodata(data_omics,
phosphoreg = system.file("extdata", "phospho_reg_table.txt",
package = "pwOmics.newupdown"))
data_omics = readTFdata(data_omics,
TF_target_path = system.file("extdata", "TF_targets.txt",
package = "pwOmics.newupdown"))
data_omics_plus = readPWdata(data_omics,
loadgenelists = system.file("extdata/Genelists", package = "pwOmics.newupdown"))
}
\dontrun{
data_omics_plus = identifyPR(data_omics_plus)
setwd(system.file("extdata/Genelists", package = "pwOmics.newupdown"))
data_omics = identifyPWs(data_omics_plus)
data_omics = identifyTFs(data_omics)
data_omics = identifyRsofTFs(data_omics,
noTFs_inPW = 1, order_neighbors = 10)
data_omics = identifyPWTFTGs(data_omics)
statConsNet = staticConsensusNet(data_omics)
plotConsensusProfiles(statConsNet, data_omics, subsel = TRUE)
}
}
\keyword{manip}
|
#This script assumes that the library "sqldf" has been installed
# Run the following script to install the sqldf library
# install.packages("sqldf") #uncomment and run this
#Load the sqldf library
library(sqldf)
#Read the rows where the date is 2/1/2007 or 2/2/2007.
#This method will not require loading the data into a dataframe and subsetting it further.
df<-read.csv.sql("household_power_consumption.txt","select * from file where date in ('1/2/2007','2/2/2007')",header=TRUE,sep=";")
#Add the column "DateTime" by combining the date and time columns together
df$DateTime <- strptime(paste(df$Date, df$Time), format="%d/%m/%Y %H:%M:%S")
#Plot the graph
plot(df$DateTime, df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
#Copy the image and save it to a file
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() | /Plot 2.R | no_license | rahultrips/ExData_Plotting1 | R | false | false | 866 | r | #This script assumes that the library "sqldf" has been installed
# Run the following script to install the sqldf library
# install.packages("sqldf") #uncomment and run this
#Load the sqldf library
library(sqldf)
#Read the rows where the date is 2/1/2007 or 2/2/2007.
#This method will not require loading the data into a dataframe and subsetting it further.
df<-read.csv.sql("household_power_consumption.txt","select * from file where date in ('1/2/2007','2/2/2007')",header=TRUE,sep=";")
#Add the column "DateTime" by combining the date and time columns together
df$DateTime <- strptime(paste(df$Date, df$Time), format="%d/%m/%Y %H:%M:%S")
#Plot the graph
plot(df$DateTime, df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
#Copy the image and save it to a file
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() |
#Dictionalyの呼び出し
dic <- read.csv("R/dictionary.csv", header = FALSE)
colnames(dic) <- c("word", "count", "g1", "g2", "g3", "g4", "g5", "g6",
"g7", "g8", "g9", "g10", "g11", "g12", "g13")
dic$word <- as.character(dic$word)
| /R/dictionary.R | no_license | JaehyunSong/JPreadable | R | false | false | 251 | r | #Dictionalyの呼び出し
dic <- read.csv("R/dictionary.csv", header = FALSE)
colnames(dic) <- c("word", "count", "g1", "g2", "g3", "g4", "g5", "g6",
"g7", "g8", "g9", "g10", "g11", "g12", "g13")
dic$word <- as.character(dic$word)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_grattan_colours.R
\docType{data}
\name{grattan_lightred8}
\alias{grattan_lightred8}
\title{'}
\format{
An object of class \code{character} of length 1.
}
\usage{
grattan_lightred8
}
\description{
'
}
\keyword{datasets}
| /man/grattan_lightred8.Rd | permissive | MattCowgill/grattantheme | R | false | true | 304 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_grattan_colours.R
\docType{data}
\name{grattan_lightred8}
\alias{grattan_lightred8}
\title{'}
\format{
An object of class \code{character} of length 1.
}
\usage{
grattan_lightred8
}
\description{
'
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Zalpha_rsq_over_expected.R
\name{Zalpha_rsq_over_expected}
\alias{Zalpha_rsq_over_expected}
\title{Runs the Zalpha function on the r-squared values over the expected r-squared values for the region}
\usage{
Zalpha_rsq_over_expected(
pos,
ws,
x,
dist,
LDprofile_bins,
LDprofile_rsq,
minRandL = 4,
minRL = 25,
X = NULL
)
}
\arguments{
\item{pos}{A numeric vector of SNP locations}
\item{ws}{The window size which the \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} statistic will be calculated over. This should be on the same scale as the \code{pos} vector.}
\item{x}{A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic.}
\item{dist}{A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}.}
\item{LDprofile_bins}{A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size.}
\item{LDprofile_rsq}{A numeric vector containing the expected \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. Must be between 0 and 1.}
\item{minRandL}{Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4.}
\item{minRL}{Minimum value for the product of the set sizes for R and L. Default is 25.}
\item{X}{Optional. Specify a region of the chromosome to calculate \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} for every SNP in the \code{pos} vector.}
}
\value{
A list containing the SNP positions and the \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} values for those SNPs
}
\description{
Returns a \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} value for each SNP location supplied to the function, based on
the expected \eqn{r^2} values given an LD profile and genetic distances.
For more information about the \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} statistic, please see Jacobs (2016).
The \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} statistic is defined as:
\deqn{{Z_{\alpha}^{r^2/E[r^2]}}=\frac{{|L| \choose 2}^{-1}\sum_{i,j \in L}r^2_{i,j}/E[r^2_{i,j}] + {|R| \choose 2}^{-1}\sum_{i,j \in R}r^2_{i,j}/E[r^2_{i,j}]}{2}}
where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, \eqn{r^2}{r^2} is equal to
the squared correlation between a pair of SNPs, and \eqn{E[r^2]}{E[r^2]} is equal to the expected squared correlation between a pair of SNPs, given an LD profile.
}
\details{
The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or
real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD
profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower
bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated
using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile.
}
\examples{
## load the snps and LDprofile example datasets
data(snps)
data(LDprofile)
## run Zalpha_rsq_over_expected over all the SNPs with a window size of 3000 bp
Zalpha_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances,
LDprofile$bin,LDprofile$rsq)
## only return results for SNPs between locations 600 and 1500 bp
Zalpha_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances,
LDprofile$bin,LDprofile$rsq,X=c(600,1500))
}
\references{
Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807
}
\seealso{
\code{\link{create_LDprofile}}
}
| /man/Zalpha_rsq_over_expected.Rd | permissive | chorscroft/zalpha | R | false | true | 4,203 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Zalpha_rsq_over_expected.R
\name{Zalpha_rsq_over_expected}
\alias{Zalpha_rsq_over_expected}
\title{Runs the Zalpha function on the r-squared values over the expected r-squared values for the region}
\usage{
Zalpha_rsq_over_expected(
pos,
ws,
x,
dist,
LDprofile_bins,
LDprofile_rsq,
minRandL = 4,
minRL = 25,
X = NULL
)
}
\arguments{
\item{pos}{A numeric vector of SNP locations}
\item{ws}{The window size which the \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} statistic will be calculated over. This should be on the same scale as the \code{pos} vector.}
\item{x}{A matrix of SNP values. Columns represent chromosomes; rows are SNP locations. Hence, the number of rows should equal the length of the \code{pos} vector. SNPs should all be biallelic.}
\item{dist}{A numeric vector of genetic distances (e.g. cM, LDU). This should be the same length as \code{pos}.}
\item{LDprofile_bins}{A numeric vector containing the lower bound of the bins used in the LD profile. These should be of equal size.}
\item{LDprofile_rsq}{A numeric vector containing the expected \eqn{r^2}{r^2} values for the corresponding bin in the LD profile. Must be between 0 and 1.}
\item{minRandL}{Minimum number of SNPs in each set R and L for the statistic to be calculated. Default is 4.}
\item{minRL}{Minimum value for the product of the set sizes for R and L. Default is 25.}
\item{X}{Optional. Specify a region of the chromosome to calculate \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} for in the format \code{c(startposition, endposition)}. The start position and the end position should be within the extremes of the positions given in the \code{pos} vector. If not supplied, the function will calculate \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} for every SNP in the \code{pos} vector.}
}
\value{
A list containing the SNP positions and the \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} values for those SNPs
}
\description{
Returns a \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} value for each SNP location supplied to the function, based on
the expected \eqn{r^2} values given an LD profile and genetic distances.
For more information about the \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} statistic, please see Jacobs (2016).
The \eqn{Z_{\alpha}^{r^2/E[r^2]}}{Zalpha} statistic is defined as:
\deqn{{Z_{\alpha}^{r^2/E[r^2]}}=\frac{{|L| \choose 2}^{-1}\sum_{i,j \in L}r^2_{i,j}/E[r^2_{i,j}] + {|R| \choose 2}^{-1}\sum_{i,j \in R}r^2_{i,j}/E[r^2_{i,j}]}{2}}
where \code{|L|} and \code{|R|} are the number of SNPs to the left and right of the current locus within the given window \code{ws}, \eqn{r^2}{r^2} is equal to
the squared correlation between a pair of SNPs, and \eqn{E[r^2]}{E[r^2]} is equal to the expected squared correlation between a pair of SNPs, given an LD profile.
}
\details{
The LD profile describes the expected correlation between SNPs at a given genetic distance, generated using simulations or
real data. Care should be taken to utilise an LD profile that is representative of the population in question. The LD
profile should consist of evenly sized bins of distances (for example 0.0001 cM per bin), where the value given is the (inclusive) lower
bound of the bin. Ideally, an LD profile would be generated using data from a null population with no selection, however one can be generated
using this data. See the \code{\link{create_LDprofile}} function for more information on how to create an LD profile.
}
\examples{
## load the snps and LDprofile example datasets
data(snps)
data(LDprofile)
## run Zalpha_rsq_over_expected over all the SNPs with a window size of 3000 bp
Zalpha_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances,
LDprofile$bin,LDprofile$rsq)
## only return results for SNPs between locations 600 and 1500 bp
Zalpha_rsq_over_expected(snps$bp_positions,3000,as.matrix(snps[,3:12]),snps$cM_distances,
LDprofile$bin,LDprofile$rsq,X=c(600,1500))
}
\references{
Jacobs, G.S., T.J. Sluckin, and T. Kivisild, \emph{Refining the Use of Linkage Disequilibrium as a Robust Signature of Selective Sweeps.} Genetics, 2016. \strong{203}(4): p. 1807
}
\seealso{
\code{\link{create_LDprofile}}
}
|
#' Calculate site index using site tools
#'
#' @description This function calculates site index based on bored age (\code{boredAge}), tree height (\code{height}),
#' species (\code{species}) and region (\code{ICRegion}) using site tools program. This function
#' is equivalent to sindex_httoage.sas.
#'
#' @param boredAge numeric, Age at bored height.
#' @param height numeric, Total tree height.
#' @param species character, Species code, must be consistent with the species code in site tools, which can be converted
#' from the original species code by using \code{\link{siteToolsSpeciesConvertor}}.
#' @param ICRegion character, Must be either \code{I} (interior) and \code{C} (coastal).
#' IC regions can be derived using \code{\link{BEC2IC}}.
#' @param ageType numeric, Must be either \code{0} or \code{1}. \code{0} stands for total age, for which site index is
#' calculated for 50 years of total tree age. While \code{1} stands for breast height age, for which
#' site index is calculated for 50 year old at breast height.
#' @param estimateMethod numeric, Defines how the site tools estimate site index. Valued as \code{0} and \code{1},
#' \code{0} is interative and while \code{1} is directive. Default is \code{1}, which is directive.
#' @param siteToolsDLLPath character, Path to \code{SINDEX33.DLL}
#' @param sasExePath character, Path to sas executable, i.e., \code{sas.exe}.
#'
#'
#' @return Site index
#'
#' @importFrom data.table ':=' data.table
#'
#'
#' @export
#' @docType methods
#' @rdname SiteTools_HTBoredAge2SI
#'
#' @author Yong Luo
#'
setGeneric("SiteTools_HTBoredAge2SI",
function(boredAge, height, species, ICRegion, ageType,
estimateMethod, siteToolsDLLPath, sasExePath) {
standardGeneric("SiteTools_HTBoredAge2SI")
})
#' @rdname SiteTools_HTBoredAge2SI
setMethod(
"SiteTools_HTBoredAge2SI",
signature = c(boredAge = "numeric",
height = "numeric",
species = "character",
ICRegion = "character",
ageType = "numeric",
estimateMethod = "numeric",
siteToolsDLLPath = "character",
sasExePath = "character"),
definition = function(boredAge, height, species, ICRegion, ageType,
estimateMethod, siteToolsDLLPath, sasExePath){
worktable <- data.table(uniObs = 1:max(length(boredAge), length(height)),
age = boredAge, height, ageType,
speciesFRED = species, BEC_I_C = ICRegion)
worktable[, SI_SP := ST_SpecRemap(species = speciesFRED, ICRegion = BEC_I_C,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath)]
worktable[SI_SP >= 0, ':='(SITE_CURVE = ST_DefCurve(siteIndexRef = SI_SP,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath),
GRTH_CURVE = ST_DefGICurve(siteIndexRef = SI_SP,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath))]
worktable[SI_SP >= 0, ':='(SI_ERR = ST_HTAgeToSI(curveRef = SITE_CURVE,
boredAge = age,
ageType = ageType,
height = height,
estimateMethod = estimateMethod,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath)$error,
SI_TREE = ST_HTAgeToSI(curveRef = SITE_CURVE,
boredAge = age,
ageType = ageType,
height = height,
estimateMethod = estimateMethod,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath)$output)]
worktable[SI_SP >= 0 & SI_ERR < 0, SI_TREE := as.numeric(NA)]
worktable[SI_SP >= 0 & age <= 50, ':='(GI_ERR = ST_HTAgeToSI(curveRef = GRTH_CURVE,
boredAge = age,
ageType = ageType,
height = height,
estimateMethod = estimateMethod,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath)$error,
SI_GI = ST_HTAgeToSI(curveRef = GRTH_CURVE,
boredAge = age,
ageType = ageType,
height = height,
estimateMethod = estimateMethod,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath)$output)]
worktable[SI_SP >= 0 & age <= 50 & GI_ERR >= 0,
SI_TREE := SI_GI]
return(worktable[order(uniObs),]$SI_TREE)
}) | /R/SiteTools_HTBoredAge2SI.R | permissive | Miss-White/BCForestGroundSample | R | false | false | 6,154 | r | #' Calculate site index using site tools
#'
#' @description This function calculates site index based on bored age (\code{boredAge}), tree height (\code{height}),
#' species (\code{species}) and region (\code{ICRegion}) using site tools program. This function
#' is equivalent to sindex_httoage.sas.
#'
#' @param boredAge numeric, Age at bored height.
#' @param height numeric, Total tree height.
#' @param species character, Species code, must be consistent with the species code in site tools, which can be converted
#' from the original species code by using \code{\link{siteToolsSpeciesConvertor}}.
#' @param ICRegion character, Must be either \code{I} (interior) and \code{C} (coastal).
#' IC regions can be derived using \code{\link{BEC2IC}}.
#' @param ageType numeric, Must be either \code{0} or \code{1}. \code{0} stands for total age, for which site index is
#' calculated for 50 years of total tree age. While \code{1} stands for breast height age, for which
#' site index is calculated for 50 year old at breast height.
#' @param estimateMethod numeric, Defines how the site tools estimate site index. Valued as \code{0} and \code{1},
#' \code{0} is interative and while \code{1} is directive. Default is \code{1}, which is directive.
#' @param siteToolsDLLPath character, Path to \code{SINDEX33.DLL}
#' @param sasExePath character, Path to sas executable, i.e., \code{sas.exe}.
#'
#'
#' @return Site index
#'
#' @importFrom data.table ':=' data.table
#'
#'
#' @export
#' @docType methods
#' @rdname SiteTools_HTBoredAge2SI
#'
#' @author Yong Luo
#'
setGeneric("SiteTools_HTBoredAge2SI",
function(boredAge, height, species, ICRegion, ageType,
estimateMethod, siteToolsDLLPath, sasExePath) {
standardGeneric("SiteTools_HTBoredAge2SI")
})
#' @rdname SiteTools_HTBoredAge2SI
setMethod(
"SiteTools_HTBoredAge2SI",
signature = c(boredAge = "numeric",
height = "numeric",
species = "character",
ICRegion = "character",
ageType = "numeric",
estimateMethod = "numeric",
siteToolsDLLPath = "character",
sasExePath = "character"),
definition = function(boredAge, height, species, ICRegion, ageType,
estimateMethod, siteToolsDLLPath, sasExePath){
worktable <- data.table(uniObs = 1:max(length(boredAge), length(height)),
age = boredAge, height, ageType,
speciesFRED = species, BEC_I_C = ICRegion)
worktable[, SI_SP := ST_SpecRemap(species = speciesFRED, ICRegion = BEC_I_C,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath)]
worktable[SI_SP >= 0, ':='(SITE_CURVE = ST_DefCurve(siteIndexRef = SI_SP,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath),
GRTH_CURVE = ST_DefGICurve(siteIndexRef = SI_SP,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath))]
worktable[SI_SP >= 0, ':='(SI_ERR = ST_HTAgeToSI(curveRef = SITE_CURVE,
boredAge = age,
ageType = ageType,
height = height,
estimateMethod = estimateMethod,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath)$error,
SI_TREE = ST_HTAgeToSI(curveRef = SITE_CURVE,
boredAge = age,
ageType = ageType,
height = height,
estimateMethod = estimateMethod,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath)$output)]
worktable[SI_SP >= 0 & SI_ERR < 0, SI_TREE := as.numeric(NA)]
worktable[SI_SP >= 0 & age <= 50, ':='(GI_ERR = ST_HTAgeToSI(curveRef = GRTH_CURVE,
boredAge = age,
ageType = ageType,
height = height,
estimateMethod = estimateMethod,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath)$error,
SI_GI = ST_HTAgeToSI(curveRef = GRTH_CURVE,
boredAge = age,
ageType = ageType,
height = height,
estimateMethod = estimateMethod,
siteToolsDLLPath = siteToolsDLLPath,
sasExePath = sasExePath)$output)]
worktable[SI_SP >= 0 & age <= 50 & GI_ERR >= 0,
SI_TREE := SI_GI]
return(worktable[order(uniObs),]$SI_TREE)
}) |
i = 0
repeat {
i = i+1
if (i > 1) {print(i)}
if(i == 10) {
break
}
}
| /test.R | no_license | lesics/BMMA | R | false | false | 81 | r | i = 0
repeat {
i = i+1
if (i > 1) {print(i)}
if(i == 10) {
break
}
}
|
rankall <- function(outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
data <- data[c(2, 7, 11, 17, 23)]
names(data)[1] <- "name"
names(data)[2] <- "state"
names(data)[3] <- "heart attack"
names(data)[4] <- "heart failure"
names(data)[5] <- "pneumonia"
## Validate the outcome string
outcomes = c("heart attack", "heart failure", "pneumonia")
if( outcome %in% outcomes == FALSE ) stop("invalid outcome")
## Validate the num value
if( num != "best" && num != "worst" && num%%1 != 0 ) stop("invalid num")
## Grab only rows with data in our outcome
data <- data[data[outcome] != 'Not Available', ]
## Order the data
data[outcome] <- as.data.frame(sapply(data[outcome], as.numeric))
data <- data[order(data$name, decreasing = FALSE), ]
data <- data[order(data[outcome], decreasing = FALSE), ]
## Helper functiont to process the num argument
getHospByRank <- function(df, s, n) {
df <- df[df$state==s, ]
vals <- df[, outcome]
if( n == "best" ) {
rowNum <- which.min(vals)
} else if( n == "worst" ) {
rowNum <- which.max(vals)
} else {
rowNum <- n
}
df[rowNum, ]$name
}
## For each state, find the hospital of the given rank
states <- data[, 2]
states <- unique(states)
newdata <- data.frame("hospital"=character(), "state"=character())
for(st in states) {
hosp <- getHospByRank(data, st, num)
newdata <- rbind(newdata, data.frame(hospital=hosp, state=st))
}
## Return a data frame with the hospital names and the (abbreviated) state name
newdata <- newdata[order(newdata['state'], decreasing = FALSE), ]
newdata
}
| /programming-assignment-3/rankall.R | permissive | rajasekaronline/coursera-r-programming | R | false | false | 1,827 | r | rankall <- function(outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
data <- data[c(2, 7, 11, 17, 23)]
names(data)[1] <- "name"
names(data)[2] <- "state"
names(data)[3] <- "heart attack"
names(data)[4] <- "heart failure"
names(data)[5] <- "pneumonia"
## Validate the outcome string
outcomes = c("heart attack", "heart failure", "pneumonia")
if( outcome %in% outcomes == FALSE ) stop("invalid outcome")
## Validate the num value
if( num != "best" && num != "worst" && num%%1 != 0 ) stop("invalid num")
## Grab only rows with data in our outcome
data <- data[data[outcome] != 'Not Available', ]
## Order the data
data[outcome] <- as.data.frame(sapply(data[outcome], as.numeric))
data <- data[order(data$name, decreasing = FALSE), ]
data <- data[order(data[outcome], decreasing = FALSE), ]
## Helper functiont to process the num argument
getHospByRank <- function(df, s, n) {
df <- df[df$state==s, ]
vals <- df[, outcome]
if( n == "best" ) {
rowNum <- which.min(vals)
} else if( n == "worst" ) {
rowNum <- which.max(vals)
} else {
rowNum <- n
}
df[rowNum, ]$name
}
## For each state, find the hospital of the given rank
states <- data[, 2]
states <- unique(states)
newdata <- data.frame("hospital"=character(), "state"=character())
for(st in states) {
hosp <- getHospByRank(data, st, num)
newdata <- rbind(newdata, data.frame(hospital=hosp, state=st))
}
## Return a data frame with the hospital names and the (abbreviated) state name
newdata <- newdata[order(newdata['state'], decreasing = FALSE), ]
newdata
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BIKM1_LBM_Poisson-class.R
\name{summary,BIKM1_LBM_Poisson-method}
\alias{summary,BIKM1_LBM_Poisson-method}
\title{Summary method for a BIKM1_LBM_Poisson object}
\usage{
\S4method{summary}{BIKM1_LBM_Poisson}(object, ...)
}
\arguments{
\item{object}{in the summary method, a BIKM1_LBM_Poisson object}
\item{...}{in the summary method, additional parameters (ignored)}
}
\description{
Produce a summary of informations of a \code{BIKM1_LBM_Poisson} object
}
\examples{
\donttest{require(bikm1)
J=200
K=120
h=3
l=2
theta=list()
theta$rho_h=1/h *matrix(1,h,1)
theta$tau_l=1/l *matrix(1,l,1)
theta$gamma_hl=matrix(c(1, 6,4, 1, 7, 1),ncol=2)
data=PoissonBlocRnd(J,K,theta)
res=BIKM1_LBM_Poisson(data$x,4,4,4,init_choice='random')
summary(res)}
}
| /man/summary-BIKM1_LBM_Poisson-method.Rd | no_license | cran/bikm1 | R | false | true | 820 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BIKM1_LBM_Poisson-class.R
\name{summary,BIKM1_LBM_Poisson-method}
\alias{summary,BIKM1_LBM_Poisson-method}
\title{Summary method for a BIKM1_LBM_Poisson object}
\usage{
\S4method{summary}{BIKM1_LBM_Poisson}(object, ...)
}
\arguments{
\item{object}{in the summary method, a BIKM1_LBM_Poisson object}
\item{...}{in the summary method, additional parameters (ignored)}
}
\description{
Produce a summary of informations of a \code{BIKM1_LBM_Poisson} object
}
\examples{
\donttest{require(bikm1)
J=200
K=120
h=3
l=2
theta=list()
theta$rho_h=1/h *matrix(1,h,1)
theta$tau_l=1/l *matrix(1,l,1)
theta$gamma_hl=matrix(c(1, 6,4, 1, 7, 1),ncol=2)
data=PoissonBlocRnd(J,K,theta)
res=BIKM1_LBM_Poisson(data$x,4,4,4,init_choice='random')
summary(res)}
}
|
stdNorm <- function(Y, hstd, fncf='mghtsConfig.R') {
source(fncf)
library(hash)
cat(sprintf('self normalizing\n'))
V = Y
for (test in tests) {
vt = c()
for (pt in names(V)) {
vt = c(vt, V[[pt]][test,])
}
hstd[[test]] = sd(vt, na.rm=T)
}
for (pt in names(V)) {
for (test in tests) {
V[[pt]][test,] = V[[pt]][test,] / hstd[[test]]
}
}
cat(sprintf('done\n'))
return(V)
}
| /Project_1/Baseline/code/stdNorm.R | no_license | yuhang-lin/CSC791-User-Adaptive-ML | R | false | false | 481 | r | stdNorm <- function(Y, hstd, fncf='mghtsConfig.R') {
source(fncf)
library(hash)
cat(sprintf('self normalizing\n'))
V = Y
for (test in tests) {
vt = c()
for (pt in names(V)) {
vt = c(vt, V[[pt]][test,])
}
hstd[[test]] = sd(vt, na.rm=T)
}
for (pt in names(V)) {
for (test in tests) {
V[[pt]][test,] = V[[pt]][test,] / hstd[[test]]
}
}
cat(sprintf('done\n'))
return(V)
}
|
suppressPackageStartupMessages(library(tm))
suppressPackageStartupMessages(library(stringr))
suppressPackageStartupMessages(library(shiny))
suppressPackageStartupMessages(library(shinythemes))
suppressPackageStartupMessages(library(markdown))
quadGram <- readRDS(file="./grams/quadGram.RData");
triGram <- readRDS(file="./grams/triGram.RData");
biGram <- readRDS(file="./grams/biGram.RData");
# next word predictor function
NextWord <- function(userInput) {
inputClean <- removeNumbers(removePunctuation(tolower(userInput)))
inputClean <- strsplit(inputClean, " ")[[1]]
if (length(inputClean)>= 3)
{
inputClean <- tail(inputClean,3)
if (identical(as.character(head(quadGram[quadGram$first == inputClean[1] & quadGram$second == inputClean[2] & quadGram$third == inputClean[3], 6],1)), character(0)))
{NextWord(paste(inputClean[2],inputClean[3],sep=" "))}
else {Pred <<- as.character(head(quadGram[quadGram$first == inputClean[1] & quadGram$second == inputClean[2] & quadGram$third == inputClean[3], 6],3))
#Predx <<- as.character(tail(head(quadGram[quadGram$first == inputClean[1] & quadGram$second == inputClean[2] & quadGram$third == inputClean[3], 6],4),3))
}
}
else if (length(inputClean) == 2)
{
if (identical(as.character(head(triGram[triGram$first == inputClean[1] & triGram$second == inputClean[2], 5],1)), character(0)))
{ NextWord( inputClean[2]) }
else {Pred <<- as.character(head(triGram[triGram$first == inputClean[1] & triGram$second == inputClean[2], 5],3))
#Predx <<- as.character(tail(head(triGram[triGram$first == inputClean[1] & triGram$second == inputClean[2], 5],4),3))
}
}
else if (length(inputClean) <= 1)
{
if (identical(as.character(head(biGram[biGram$first == inputClean[1], 4],1)) , character(0)))
{Pred <<-userInput
#Predx <<- c("please", "will", "can")
}
else {Pred <<- as.character(head(biGram[biGram$first == inputClean[1],4],3))
#Predx <<- as.character(tail(head(biGram[biGram$first == inputClean[1],4],4),3))
}
}
} | /prompty_2_0/util/NextWordx.R | no_license | shirshendu-nandy/prompty-word-predictor | R | false | false | 2,465 | r | suppressPackageStartupMessages(library(tm))
suppressPackageStartupMessages(library(stringr))
suppressPackageStartupMessages(library(shiny))
suppressPackageStartupMessages(library(shinythemes))
suppressPackageStartupMessages(library(markdown))
quadGram <- readRDS(file="./grams/quadGram.RData");
triGram <- readRDS(file="./grams/triGram.RData");
biGram <- readRDS(file="./grams/biGram.RData");
# next word predictor function
NextWord <- function(userInput) {
inputClean <- removeNumbers(removePunctuation(tolower(userInput)))
inputClean <- strsplit(inputClean, " ")[[1]]
if (length(inputClean)>= 3)
{
inputClean <- tail(inputClean,3)
if (identical(as.character(head(quadGram[quadGram$first == inputClean[1] & quadGram$second == inputClean[2] & quadGram$third == inputClean[3], 6],1)), character(0)))
{NextWord(paste(inputClean[2],inputClean[3],sep=" "))}
else {Pred <<- as.character(head(quadGram[quadGram$first == inputClean[1] & quadGram$second == inputClean[2] & quadGram$third == inputClean[3], 6],3))
#Predx <<- as.character(tail(head(quadGram[quadGram$first == inputClean[1] & quadGram$second == inputClean[2] & quadGram$third == inputClean[3], 6],4),3))
}
}
else if (length(inputClean) == 2)
{
if (identical(as.character(head(triGram[triGram$first == inputClean[1] & triGram$second == inputClean[2], 5],1)), character(0)))
{ NextWord( inputClean[2]) }
else {Pred <<- as.character(head(triGram[triGram$first == inputClean[1] & triGram$second == inputClean[2], 5],3))
#Predx <<- as.character(tail(head(triGram[triGram$first == inputClean[1] & triGram$second == inputClean[2], 5],4),3))
}
}
else if (length(inputClean) <= 1)
{
if (identical(as.character(head(biGram[biGram$first == inputClean[1], 4],1)) , character(0)))
{Pred <<-userInput
#Predx <<- c("please", "will", "can")
}
else {Pred <<- as.character(head(biGram[biGram$first == inputClean[1],4],3))
#Predx <<- as.character(tail(head(biGram[biGram$first == inputClean[1],4],4),3))
}
}
} |
######
library(readxl)
library(readr)
library(xlsx)
dir.create("supp_tables/")
## main tables
files = c(Bulk_DE = "bulk_analysis_genotype/tables/all_genes_voom_sva_trkB_CST.csv",
Bulk_GO = "bulk_analysis_genotype/tables/bulk_genotype_GOsets_hypergeo_Gene-p005.csv",
IPvsInput_DE = "ip_vs_input_analysis/tables/all_genes_voom_CST_IPvsInput_lmer.csv",
IPvsInput_GO = "ip_vs_input_analysis/tables/GO_voomBonf_IPvsInput_signed.csv",
IP_Geno_DE = "ip_analysis_genotype/tables/mutantVsWT_statistics_all.csv.gz",
IP_Geno_GO = "ip_analysis_genotype/tables/mutVsWt_GO_FDR05.csv")
fileOut = c("Extended_Data_Figure1-1", "Extended_Data_Figure1-2",
"Extended_Data_Figure2-1", "Extended_Data_Figure2-3",
"Extended_Data_Figure3-1", "Extended_Data_Figure3-2")
## de lists
deColnames = c("Symbol", "logFC", "t", "P.Value",
"adj.P.Val", "B", "gene_type","EntrezID",
"AveExpr","Length", "ensemblID")
for(i in c(1,3,5)) {
x = read.csv(files[i], row.names=1,as.is=TRUE)
x = x[,deColnames]
write.csv(x, paste0("supp_tables/", fileOut[i], ".csv"),row.names=FALSE)
}
## GO lists
for(i in c(2,4,6)) {
x = read.csv(files[i], as.is=TRUE)
write.csv(x, paste0("supp_tables/", fileOut[i], ".csv"),row.names=FALSE)
}
#####################
## sfari stuff
sfariRdas = c(Bulk = "bulk_analysis_genotype/tables/SFARI_annotated_results.rda",
IPvsInput = "ip_vs_input_analysis/tables/SFARI_annotated_results.rda",
IP_Geno = "ip_analysis_genotype/tables/SFARI_annotated_results.rda")
## load in
humanSfariList = lapply(sfariRdas, function(x) {
load(x)
humanSFARI
})
mouseSfariList = lapply(sfariRdas, function(x) {
load(x)
mouseSFARI
})
## filter by project
humanSfariList$Bulk$Sig = humanSfariList$Bulk$adj.P.Val < 0.1
mouseSfariList$Bulk$Sig = mouseSfariList$Bulk$adj.P.Val < 0.1
humanSfariList$IPvsInput$Sig = humanSfariList$IPvsInput$Bonf < 0.05 &
humanSfariList$IPvsInput$logFC > 0
mouseSfariList$IPvsInput$Sig = mouseSfariList$IPvsInput$Bonf < 0.05 &
mouseSfariList$IPvsInput$logFC > 0
humanSfariList$IP_Geno$Sig = humanSfariList$IP_Geno$adj.P.Val < 0.05
mouseSfariList$IP_Geno$Sig = mouseSfariList$IP_Geno$adj.P.Val < 0.05
## merge
names(humanSfariList) = paste0(names(humanSfariList), "_human")
names(mouseSfariList) = paste0(names(mouseSfariList), "_mouse")
sfariList = c(humanSfariList, mouseSfariList)
sfariList = sfariList[c(4,1,5,2,6,3)] ## reorder
## get unique genes
sigList = lapply(sfariList, function(x) x[x$Sig,])
sapply(sigList,nrow)
uGenes = unique(unlist(sapply(sigList, function(x) x$gencodeID)))
length(uGenes)
## write otu
sfariMat = sapply(sigList, function(x) uGenes %in% x$gencodeID)
rownames(sfariMat) = uGenes
sfariDat = as.data.frame(sfariMat)
geneSym = do.call("rbind", lapply(sigList, function(x) x[,c("gencodeID", "Symbol")]))
sfariDat$Symbol = geneSym$Symbol[match(rownames(sfariDat), geneSym$gencodeID)]
sfariDat = sfariDat[,c(7,1:6)]
write.csv(sfariDat, "supp_tables/Extended_Data_Figure3-3.csv")
#######################
#### Harmonizome data #
#######################
harmFiles = c(Bulk = "bulk_analysis_genotype/tables/Harmonizome_CTD-Dx_CST_bulk_effects.csv",
IPvsInput = "ip_vs_input_analysis/tables/Harmonizome_CTD-Dx_CST_effects.csv",
IP_Geno = "ip_analysis_genotype/tables/Harmonizome_CST_IP-Genotype_effects.csv")
harmList = lapply(harmFiles, read.csv,as.is=TRUE, row.names=1)
for(i in seq(along=harmList)) {
write.xlsx(harmList[[i]], file = "supp_tables/Extended_Data_Figure3-4.xlsx",
sheetName = names(harmList)[i],append=TRUE)
}
###############
## zip everything
zip("maynardKardian_CST_Extended-Data.zip",
files = list.files("supp_tables", full.names=TRUE)) | /make_supp_table_excel.R | no_license | LieberInstitute/cst_trap_seq | R | false | false | 3,620 | r | ######
library(readxl)
library(readr)
library(xlsx)
dir.create("supp_tables/")
## main tables
files = c(Bulk_DE = "bulk_analysis_genotype/tables/all_genes_voom_sva_trkB_CST.csv",
Bulk_GO = "bulk_analysis_genotype/tables/bulk_genotype_GOsets_hypergeo_Gene-p005.csv",
IPvsInput_DE = "ip_vs_input_analysis/tables/all_genes_voom_CST_IPvsInput_lmer.csv",
IPvsInput_GO = "ip_vs_input_analysis/tables/GO_voomBonf_IPvsInput_signed.csv",
IP_Geno_DE = "ip_analysis_genotype/tables/mutantVsWT_statistics_all.csv.gz",
IP_Geno_GO = "ip_analysis_genotype/tables/mutVsWt_GO_FDR05.csv")
fileOut = c("Extended_Data_Figure1-1", "Extended_Data_Figure1-2",
"Extended_Data_Figure2-1", "Extended_Data_Figure2-3",
"Extended_Data_Figure3-1", "Extended_Data_Figure3-2")
## de lists
deColnames = c("Symbol", "logFC", "t", "P.Value",
"adj.P.Val", "B", "gene_type","EntrezID",
"AveExpr","Length", "ensemblID")
for(i in c(1,3,5)) {
x = read.csv(files[i], row.names=1,as.is=TRUE)
x = x[,deColnames]
write.csv(x, paste0("supp_tables/", fileOut[i], ".csv"),row.names=FALSE)
}
## GO lists
for(i in c(2,4,6)) {
x = read.csv(files[i], as.is=TRUE)
write.csv(x, paste0("supp_tables/", fileOut[i], ".csv"),row.names=FALSE)
}
#####################
## sfari stuff
sfariRdas = c(Bulk = "bulk_analysis_genotype/tables/SFARI_annotated_results.rda",
IPvsInput = "ip_vs_input_analysis/tables/SFARI_annotated_results.rda",
IP_Geno = "ip_analysis_genotype/tables/SFARI_annotated_results.rda")
## load in
humanSfariList = lapply(sfariRdas, function(x) {
load(x)
humanSFARI
})
mouseSfariList = lapply(sfariRdas, function(x) {
load(x)
mouseSFARI
})
## filter by project
humanSfariList$Bulk$Sig = humanSfariList$Bulk$adj.P.Val < 0.1
mouseSfariList$Bulk$Sig = mouseSfariList$Bulk$adj.P.Val < 0.1
humanSfariList$IPvsInput$Sig = humanSfariList$IPvsInput$Bonf < 0.05 &
humanSfariList$IPvsInput$logFC > 0
mouseSfariList$IPvsInput$Sig = mouseSfariList$IPvsInput$Bonf < 0.05 &
mouseSfariList$IPvsInput$logFC > 0
humanSfariList$IP_Geno$Sig = humanSfariList$IP_Geno$adj.P.Val < 0.05
mouseSfariList$IP_Geno$Sig = mouseSfariList$IP_Geno$adj.P.Val < 0.05
## merge
names(humanSfariList) = paste0(names(humanSfariList), "_human")
names(mouseSfariList) = paste0(names(mouseSfariList), "_mouse")
sfariList = c(humanSfariList, mouseSfariList)
sfariList = sfariList[c(4,1,5,2,6,3)] ## reorder
## get unique genes
sigList = lapply(sfariList, function(x) x[x$Sig,])
sapply(sigList,nrow)
uGenes = unique(unlist(sapply(sigList, function(x) x$gencodeID)))
length(uGenes)
## write otu
sfariMat = sapply(sigList, function(x) uGenes %in% x$gencodeID)
rownames(sfariMat) = uGenes
sfariDat = as.data.frame(sfariMat)
geneSym = do.call("rbind", lapply(sigList, function(x) x[,c("gencodeID", "Symbol")]))
sfariDat$Symbol = geneSym$Symbol[match(rownames(sfariDat), geneSym$gencodeID)]
sfariDat = sfariDat[,c(7,1:6)]
write.csv(sfariDat, "supp_tables/Extended_Data_Figure3-3.csv")
#######################
#### Harmonizome data #
#######################
harmFiles = c(Bulk = "bulk_analysis_genotype/tables/Harmonizome_CTD-Dx_CST_bulk_effects.csv",
IPvsInput = "ip_vs_input_analysis/tables/Harmonizome_CTD-Dx_CST_effects.csv",
IP_Geno = "ip_analysis_genotype/tables/Harmonizome_CST_IP-Genotype_effects.csv")
harmList = lapply(harmFiles, read.csv,as.is=TRUE, row.names=1)
for(i in seq(along=harmList)) {
write.xlsx(harmList[[i]], file = "supp_tables/Extended_Data_Figure3-4.xlsx",
sheetName = names(harmList)[i],append=TRUE)
}
###############
## zip everything
zip("maynardKardian_CST_Extended-Data.zip",
files = list.files("supp_tables", full.names=TRUE)) |
#' Optimal currency and load
#'
#' Function to calculate optimal currency and load in cell u. Returns NA values
#' if arguments to cells are NA. This is the "workhorse" function of CPForage.
#'
#' @param u Address of cells in Scenario to calculate
#' @param scenario Scenario to use
#'
#' @return List of optimal currency and load values for each cell in \code{u}
#'
#' @examples
#'
optimLoadCurr <- function(u,scenario){
#Goal: Optimize a vector of L values to produce greatest summed currency in cell u
nests <- scenario$nests #Unpacks scenario
world <- scenario$world
#Argument checking:
#If nest-level arguments are NA or missing, throw an error
if(any(lengths(nests)==0|is.na(nests))){
stop('Nest-level arguments are NA or length==0')
}
#If any arguments are missing from nests
argNames <- c("xloc","yloc","n","whatCurr","sol","eps","L_max","v","betaVal","p_i",
"h","c_f","c_i","H","d","L","curr")
if(any(!argNames %in% names(nests))){
stop('Nest-level arguments are missing for: ',paste(names(nests)[!argNames %in% names(nests)],collapse=', '))
}
#If there are too many cells provided
if(length(u)>1) {
stop('Too many cells provided. Use lapply to pass cells. e.g. lapply(use,optimLoadCurr,scenario=scenario)')
}
#If cell u has no foragers, returns NA values for L, 0 for curr, and 1 for S
if(nests$n[u]<1){
return(list('optimL'=NA,'optimCurr'=0,'S'=1)) #No competition in unoccupied cells
}
#Arguments to feed to optim, which optim feeds to curr
arglist=list(L_max_i=nests$L_max,n_i=nests$n[u],
h_i=nests$h[u],
p_i=nests$p_i,
d_i=nests$d[u],
v_i=nests$v,
beta_i=nests$beta,
H_i=nests$H,
c_i=nests$c_i,
c_f=nests$c_f,
whatCurr_i=nests$whatCurr,
mu=world$mu[u],l=world$l[u],e=world$e[u],NumFls=world$flDens[u],
f_i=world$f[u],forageType=world$forageType,
alphaVal=world$alphaVal[u])
#Nest-level arguments (one for each nest involved)
nestArgs <- arglist[c("L_max_i","n_i","p_i","f_i","d_i","v_i",
"beta_i","H_i","c_i","c_f","whatCurr_i","forageType")]
#Patch-level arguments (only one for the patch)
patchArgs <- arglist[c('mu','e','NumFls','l','h_i','alphaVal')]
#Are any nest-level arguments NA or nonexistant?
if(any(lengths(nestArgs)==0|is.na(nestArgs))){
stop('Nest-level arguments ',paste(names(nestArgs)[any(lengths(nestArgs)==0|is.na(nestArgs))]),
' are NA or length==0. Are all dimensions equal?')
}
#Are any patch-level arguments nonexistent?
if(any(lengths(patchArgs)==0)) {
stop('Patch-level arguments ',paste(names(patchArgs)[any(lengths(patchArgs)==0|is.na(patchArgs))]),
' are missing (length==0). Are all dimensions equal?')
}
#If anything in the patch-level argument list is NA or <=0, returns NA values - indicates worthless patch
if(any(is.na(patchArgs)|patchArgs<=0)) {
return(list('optimL'=NA,'optimCurr'= switch(nests$whatCurr,eff=-1,rat=-Inf),'S'=NA))
}
startL <- 0 #Use zero as the starting value for load
#L value and maximized currency value - tolerance needs to be < ~1e-7
optimL <- do.call(optimize,c(list(f=curr,interval=c(0,nests$L_max),maximum=TRUE,tol=1e-10),arglist))
#Best currency given optimum load, and S-value for the cell
#NOTE: this works for both solitary and social, because it calculates (currency | n); n is dealt with elsewhere
currencyS <- do.call(curr,c(list(L=optimL$maximum,sumAll=FALSE),arglist)) #Named vector of currency and S-values
optimCurr <- currencyS[[1]]
S <- currencyS[[2]]
# Return all results together in one list
resultList <- list('optimL'=optimL$maximum,'optimCurr'=optimCurr,'S'=S)
return(resultList)
}
| /R/optimLoadCurr.R | no_license | samuelVJrobinson/CPForage | R | false | false | 3,833 | r | #' Optimal currency and load
#'
#' Function to calculate optimal currency and load in cell u. Returns NA values
#' if arguments to cells are NA. This is the "workhorse" function of CPForage.
#'
#' @param u Address of cells in Scenario to calculate
#' @param scenario Scenario to use
#'
#' @return List of optimal currency and load values for each cell in \code{u}
#'
#' @examples
#'
optimLoadCurr <- function(u,scenario){
#Goal: Optimize a vector of L values to produce greatest summed currency in cell u
nests <- scenario$nests #Unpacks scenario
world <- scenario$world
#Argument checking:
#If nest-level arguments are NA or missing, throw an error
if(any(lengths(nests)==0|is.na(nests))){
stop('Nest-level arguments are NA or length==0')
}
#If any arguments are missing from nests
argNames <- c("xloc","yloc","n","whatCurr","sol","eps","L_max","v","betaVal","p_i",
"h","c_f","c_i","H","d","L","curr")
if(any(!argNames %in% names(nests))){
stop('Nest-level arguments are missing for: ',paste(names(nests)[!argNames %in% names(nests)],collapse=', '))
}
#If there are too many cells provided
if(length(u)>1) {
stop('Too many cells provided. Use lapply to pass cells. e.g. lapply(use,optimLoadCurr,scenario=scenario)')
}
#If cell u has no foragers, returns NA values for L, 0 for curr, and 1 for S
if(nests$n[u]<1){
return(list('optimL'=NA,'optimCurr'=0,'S'=1)) #No competition in unoccupied cells
}
#Arguments to feed to optim, which optim feeds to curr
arglist=list(L_max_i=nests$L_max,n_i=nests$n[u],
h_i=nests$h[u],
p_i=nests$p_i,
d_i=nests$d[u],
v_i=nests$v,
beta_i=nests$beta,
H_i=nests$H,
c_i=nests$c_i,
c_f=nests$c_f,
whatCurr_i=nests$whatCurr,
mu=world$mu[u],l=world$l[u],e=world$e[u],NumFls=world$flDens[u],
f_i=world$f[u],forageType=world$forageType,
alphaVal=world$alphaVal[u])
#Nest-level arguments (one for each nest involved)
nestArgs <- arglist[c("L_max_i","n_i","p_i","f_i","d_i","v_i",
"beta_i","H_i","c_i","c_f","whatCurr_i","forageType")]
#Patch-level arguments (only one for the patch)
patchArgs <- arglist[c('mu','e','NumFls','l','h_i','alphaVal')]
#Are any nest-level arguments NA or nonexistant?
if(any(lengths(nestArgs)==0|is.na(nestArgs))){
stop('Nest-level arguments ',paste(names(nestArgs)[any(lengths(nestArgs)==0|is.na(nestArgs))]),
' are NA or length==0. Are all dimensions equal?')
}
#Are any patch-level arguments nonexistent?
if(any(lengths(patchArgs)==0)) {
stop('Patch-level arguments ',paste(names(patchArgs)[any(lengths(patchArgs)==0|is.na(patchArgs))]),
' are missing (length==0). Are all dimensions equal?')
}
#If anything in the patch-level argument list is NA or <=0, returns NA values - indicates worthless patch
if(any(is.na(patchArgs)|patchArgs<=0)) {
return(list('optimL'=NA,'optimCurr'= switch(nests$whatCurr,eff=-1,rat=-Inf),'S'=NA))
}
startL <- 0 #Use zero as the starting value for load
#L value and maximized currency value - tolerance needs to be < ~1e-7
optimL <- do.call(optimize,c(list(f=curr,interval=c(0,nests$L_max),maximum=TRUE,tol=1e-10),arglist))
#Best currency given optimum load, and S-value for the cell
#NOTE: this works for both solitary and social, because it calculates (currency | n); n is dealt with elsewhere
currencyS <- do.call(curr,c(list(L=optimL$maximum,sumAll=FALSE),arglist)) #Named vector of currency and S-values
optimCurr <- currencyS[[1]]
S <- currencyS[[2]]
# Return all results together in one list
resultList <- list('optimL'=optimL$maximum,'optimCurr'=optimCurr,'S'=S)
return(resultList)
}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{effectTerms}
\alias{effectTerms}
\title{Extract the main effects and interactions from an aov object}
\usage{
effectTerms(aov.obj, strata.to.extract = names(summary(aov.obj)))
}
\arguments{
\item{aov.obj}{An object of class aov representing an ANOVA calculation}
\item{strata.to.extract}{(vector) A vector of the names of the error strata from which to extract effects. As elsewhere, note that these are the names embedded in the aov summary. It may be best to extract them from a use of the \code{\link{errorTerms}} function.}
}
\value{
data.frame
}
\description{
Extract the main effects and interactions from an aov object
}
\examples{
data(EBR.Table.18.25)
aov.EBR.Table.18.25 <- ezANOVA.EBR.Table.18.25$aov
effectTerms(aov.EBR.Table.18.25)
}
\seealso{
Other AggregatingErrorTerms: \code{\link{aggregateErrorTerms}};
\code{\link{errorTermRatios}}; \code{\link{errorTerms}};
\code{\link{useAggregateErrorTerms}}
}
| /man/effectTerms.Rd | no_license | spock74/repsych | R | false | false | 983 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{effectTerms}
\alias{effectTerms}
\title{Extract the main effects and interactions from an aov object}
\usage{
effectTerms(aov.obj, strata.to.extract = names(summary(aov.obj)))
}
\arguments{
\item{aov.obj}{An object of class aov representing an ANOVA calculation}
\item{strata.to.extract}{(vector) A vector of the names of the error strata from which to extract effects. As elsewhere, note that these are the names embedded in the aov summary. It may be best to extract them from a use of the \code{\link{errorTerms}} function.}
}
\value{
data.frame
}
\description{
Extract the main effects and interactions from an aov object
}
\examples{
data(EBR.Table.18.25)
aov.EBR.Table.18.25 <- ezANOVA.EBR.Table.18.25$aov
effectTerms(aov.EBR.Table.18.25)
}
\seealso{
Other AggregatingErrorTerms: \code{\link{aggregateErrorTerms}};
\code{\link{errorTermRatios}}; \code{\link{errorTerms}};
\code{\link{useAggregateErrorTerms}}
}
|
#' Describe iChart
#'
#' This function returns a tabular summary of trial and prescreening counts for each participant and condition.
#'
#' @description \code{describeiChart()} provides a quick check of the number of trials
#' for each participant and condition. It is also useful for checking the effects of any data
#' filtering.
#' @param iChart A data frame created by the \code{readiChart()} function.
#' @export
#' @examples
#' \dontrun{d <- describeiChart(iChart)}
describeiChart <- function(iChart) {
d_ps <- iChart %>%
dplyr::filter(Prescreen.Notes != "good_trial") %>%
dplyr::count(Sub.Num, Condition) %>%
dplyr::rename(n_trials_prescreened = n) %>%
tidyr::complete(Sub.Num, Condition, fill = list(n_trials_prescreened = 0))
if(nrow(d_ps) == 0) {
message("There are no trials to prescreen out, returning trial counts for each participant and condition")
iChart %>%
dplyr::count(Sub.Num, Condition) %>%
dplyr::rename(n_trials = n)
} else {
message("There are prescreened out trials in the dataset, returning trial counts with prescreening information for each participant and condition")
iChart %>%
dplyr::count(Sub.Num, Condition) %>%
dplyr::left_join(d_ps, by = c("Sub.Num", "Condition")) %>%
dplyr::rename(n_trials = n) %>%
dplyr::mutate(n_good_trials = n_trials - n_trials_prescreened)
}
}
| /R/describeiChart.R | no_license | kemacdonald/iChartAnalyzeR | R | false | false | 1,387 | r | #' Describe iChart
#'
#' This function returns a tabular summary of trial and prescreening counts for each participant and condition.
#'
#' @description \code{describeiChart()} provides a quick check of the number of trials
#' for each participant and condition. It is also useful for checking the effects of any data
#' filtering.
#' @param iChart A data frame created by the \code{readiChart()} function.
#' @export
#' @examples
#' \dontrun{d <- describeiChart(iChart)}
describeiChart <- function(iChart) {
d_ps <- iChart %>%
dplyr::filter(Prescreen.Notes != "good_trial") %>%
dplyr::count(Sub.Num, Condition) %>%
dplyr::rename(n_trials_prescreened = n) %>%
tidyr::complete(Sub.Num, Condition, fill = list(n_trials_prescreened = 0))
if(nrow(d_ps) == 0) {
message("There are no trials to prescreen out, returning trial counts for each participant and condition")
iChart %>%
dplyr::count(Sub.Num, Condition) %>%
dplyr::rename(n_trials = n)
} else {
message("There are prescreened out trials in the dataset, returning trial counts with prescreening information for each participant and condition")
iChart %>%
dplyr::count(Sub.Num, Condition) %>%
dplyr::left_join(d_ps, by = c("Sub.Num", "Condition")) %>%
dplyr::rename(n_trials = n) %>%
dplyr::mutate(n_good_trials = n_trials - n_trials_prescreened)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VUS.R
\name{VUS}
\alias{VUS}
\title{Volume under the ROC surface}
\usage{
VUS(y, fx)
}
\arguments{
\item{y}{a vector of realized categories.}
\item{fx}{a vector of predicted values of the ranking function f.}
}
\value{
The implemented algorithm is based on Waegeman, De Baets and Boullart (2008). A list of length two is returned, containing the following components:
\item{val}{volume under the ROC surface}
\item{count}{counts the number of observations falling into each category}
}
\description{
This function computes the volume under the ROC surface (VUS) for a vector of realisations \code{y} (i.e. realised categories) and a vector of predictions \code{fx} (i.e. values of the a ranking function f) for the purpose of assessing the discrimiatory power in a multi-class classification problem. This is achieved by counting the number of r-tuples that are correctly ranked by the ranking function f. Thereby, r is the number of classes of the response variable \code{y}.
}
\examples{
VUS(rep(1:5,each=3),c(3,3,3,rep(2:5,each=3)))
}
\references{
Waegeman W., De Baets B., Boullart L., 2008. On the scalability of ordered multi-class ROC analysis. Computational Statistics & Data Analysis 52, 3371-3388.
}
| /VUROCS/man/VUS.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | true | 1,290 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VUS.R
\name{VUS}
\alias{VUS}
\title{Volume under the ROC surface}
\usage{
VUS(y, fx)
}
\arguments{
\item{y}{a vector of realized categories.}
\item{fx}{a vector of predicted values of the ranking function f.}
}
\value{
The implemented algorithm is based on Waegeman, De Baets and Boullart (2008). A list of length two is returned, containing the following components:
\item{val}{volume under the ROC surface}
\item{count}{counts the number of observations falling into each category}
}
\description{
This function computes the volume under the ROC surface (VUS) for a vector of realisations \code{y} (i.e. realised categories) and a vector of predictions \code{fx} (i.e. values of the a ranking function f) for the purpose of assessing the discrimiatory power in a multi-class classification problem. This is achieved by counting the number of r-tuples that are correctly ranked by the ranking function f. Thereby, r is the number of classes of the response variable \code{y}.
}
\examples{
VUS(rep(1:5,each=3),c(3,3,3,rep(2:5,each=3)))
}
\references{
Waegeman W., De Baets B., Boullart L., 2008. On the scalability of ordered multi-class ROC analysis. Computational Statistics & Data Analysis 52, 3371-3388.
}
|
# THIS CODE CAN BE DELETED ON 7/1/2021 -- USED TO CHECK RE-FACTOR FOR TRANSITION TO TABLEAU
# QC tables ------------------------------------------------------------------
# No messages = No issues!
rm(list = ls())
QC <- function(name, qc_df) {
if(nrow(qc_df) > 0) {
cat("CHECK ", name)
print(qc_df)
}
}
# Define appKey and years ----------------------------------------------------
#appKey = "hc_ins"; years = 1996:2018;
#appKey = "hc_pmed"; years = 1996:2018;
#appKey = "hc_use"; years = c(1996, 2000, 2002, 2005, 2015:2018);
#appKey = "hc_cond_icd9"; years = 1996:2015;
#appKey = "hc_cond_icd10"; years = 2016:2018;
#appKey = "hc_care_access"; years = 2002:2018; ## !! 2018 is tricky...
#appKey = "hc_care_diab"; years = 2002:2018;
#appKey = "hc_care_qual"; years = 2002:2017;
# year = 2014
# year = 2018
app_years <- list(
"hc_use" = c(1996, 2000, 2002, 2005, 2015:2018),
"hc_ins" = 1996:2018,
"hc_pmed" = 1996:2018,
"hc_care_access" = 2002:2018,
"hc_care_diab" = 2002:2018,
"hc_care_qual" = 2002:2017,
"hc_cond_icd9" = 1996:2015,
"hc_cond_icd10" = 2016:2018)
# Loop through years ---------------------------------------------------------
for(appKey in names(app_years)) { print(appKey)
years = app_years[[appKey]]
for(year in years) { print(year)
options(dplyr.width = Inf)
new <- read.csv(str_glue("formatted_tables/{appKey}/DY{year}.csv"))
orig <- read.csv(str_glue("formatted_tables - Copy/{appKey}/DY{year}.csv"))
new_tbl <- new %>% as_tibble
orig_tbl <- orig %>% as_tibble
if(appKey == "hc_care_qual") {
orig_tbl <- orig_tbl %>%
mutate(
adult_child = replace(adult_child, adult_child == "child", "Children"),
adult_child = replace(adult_child, adult_child == "adult", "Adults"))
}
all_equal(orig_tbl, new_tbl) %>% print
}
}
# OLD CODE FOR PREVIOUS VERSION ----------------------------------------------
# # hc_care -- load subset
# if(appKey %>% startsWith("hc_care")) {
# orig_care <- read.csv(str_glue("formatted_tables - orig/hc_care/DY{year}.csv"))
#
# if(appKey == "hc_care_access") {
# orig <- orig_care %>% filter(grepl("Access", col_group))
# new <- new %>%
# mutate(col_label = gsub( " (2002-2017)","", col_label, fixed = T))
# }
#
# if(appKey == "hc_care_diab") orig <- orig_care %>% filter(grepl("Diabetes", col_group))
# if(appKey == "hc_care_qual") orig <- orig_care %>% filter(grepl("Quality", col_group))
#
# } else {
# orig <- read.csv(str_glue("formatted_tables - orig/{appKey}/DY{year}.csv"))
# }
# Edits
# orig <- orig %>% rename(value = coef)
# # if("row_group" %in% colnames(orig_tbl)) {
# orig_tbl <- orig_tbl %>%
# mutate(row_group = replace(row_group, row_group == "", "(none)"))
# }
# if("col_group" %in% colnames(orig_tbl)) {
# orig_tbl <- orig_tbl %>%
# mutate(col_group = replace(col_group, col_group == "", "(none)"))
# }
#
# if(appKey == "hc_use") {
# new_tbl <- new_tbl %>% filter(!(col_var == row_var & row_var != "ind"))
# }
#
# if(appKey == "hc_care_qual") {
# orig_tbl <- orig_tbl %>%
# separate(col_var, into = c("adult_child", "col_var")) %>%
# separate(col_group, into = c("col_group", "drop"), sep = ":") %>%
# select(-drop)
# }
#
# if(appKey %>% startsWith("hc_care")) {
# orig_tbl <- orig_tbl %>%
# mutate(caption = gsub(" by", ", by", caption))
# }
# # Compare caption only
# cap_vars <- c("adult_child", "stat_var", "row_var", "col_var", "caption")
# captions <- full_join(
# orig_tbl %>% select(one_of(cap_vars)) %>% rename(orig_caption = caption) %>% distinct,
# new_tbl %>% select(one_of(cap_vars)) %>% rename(new_caption = caption) %>% distinct)
#
# QC("caption_diff",captions %>%
# filter(orig_caption != new_caption))
#
# QC("caption_missing", captions %>%
# filter(is.na(orig_caption) | is.na(new_caption)))
#
#
# # # Edits based on apps
# # if(appKey == "hc_pmed") {
# # orig_tbl <- orig_tbl %>%
# # mutate(col_label = "(none)", rowLevels = ifelse(row_var == "RXDRGNAM", toupper(rowLevels), rowLevels))
# # #new_tbl <- new_tbl %>% filter(value != "--")
# # }
#
#
# # Combine and compare
# combined <- full_join(
# orig_tbl %>% rename(value_orig = value, se_orig = se) %>% mutate(in_orig = TRUE),
# new_tbl %>% rename(value_new = value, se_new = se) %>% mutate(in_new = TRUE))
#
# # Check number of rows
# nc <- nrow(combined)
# norig <- nrow(orig_tbl)
# nnew <- nrow(new_tbl)
#
# if(nc != nnew) {
# print("WARNING: DIFFERENT NUMBER OF ROWS")
# }
#
# if(FALSE) {
# # should have 0 rows
# QC("QC1", combined %>%
# filter(is.na(in_orig) | is.na(in_new)) %>%
# count(col_var, row_var, in_orig, in_new))
#
# QC("QC2", combined %>%
# filter(is.na(in_orig) | is.na(in_new)) %>%
# count(stat_var, stat_label, in_orig, in_new))
#
#
# }
#
#
# QC("QC1.5", combined %>%
# filter(is.na(in_orig) | is.na(in_new), value_new != "--")
# )
#
# #
# # combined2 <- combined %>%
# # mutate(
# # value_new = suppressWarnings(as.numeric(value_new)),
# # se_new = suppressWarnings(as.numeric(se_new)),
# #
# # value_new = ifelse(Percent, value_new*100, value_new),
# # se_new = ifelse(Percent, se_new*100, se_new),
# #
# # asterisk_orig = grepl("*", value_orig, fixed = T),
# #
# # orig_num = value_orig %>%
# # gsub(",", "", .) %>%
# # gsub("*","", ., fixed = T) %>%
# # as.numeric,
# #
# # se_num = se_orig %>%
# # gsub(",", "", .) %>%
# # gsub("*", "", ., fixed = T) %>%
# # as.numeric(),
# #
# # n_digits = nchar(word(orig_num, 2, sep = "\\.")),
# # se_digits = nchar(word(se_num, 2, sep = "\\."))) %>%
# #
# # replace_na(list(n_digits = 0, se_digits = 0))
# #
# # combined3 <- combined2 %>%
# # select(-ends_with("label"), -ends_with("group"), -caption) %>%
# # mutate(
# # value_rnd = round(value_new, n_digits),
# # se_rnd = round(se_new, se_digits)
# # )
#
#
# # These are bad:
# QC("QC3", combined3 %>% filter(value_rnd != orig_num))
# QC("QC4", combined3 %>% filter(se_rnd != se_num))
#
# QC("QC5", combined3 %>% filter(is.na(in_new)))
# QC("QC6", combined3 %>% filter(is.na(in_orig) & !is.na(value_new)))
#
#
# # These are also bad:
# QC("QC7", combined3 %>% filter(is.na(value_rnd) & !is.na(orig_num), sample_size >= 60))
# QC("QC8", combined3 %>% filter(!is.na(value_rnd) & is.na(orig_num), sample_size >= 60))
#
# QC("QC9", combined3 %>% filter(is.na(se_rnd) & !is.na(se_num), sample_size >= 60))
# QC("QC10", combined3 %>% filter(!is.na(se_rnd) & is.na(se_num), sample_size >= 60))
#
# QC("QC11", combined3 %>% filter(asterisk == "*" & !asterisk_orig, sample_size >= 60))
# QC("QC12", combined3 %>% filter(asterisk == "" & asterisk_orig, sample_size >= 60))
#
# QC("QC13", combined3 %>% filter(is.na(value_rnd) & !is.na(se_rnd), sample_size >= 60))
# QC("QC14", combined3 %>% filter(!is.na(value_rnd) & is.na(se_rnd), sample_size >= 60))
#
# } # end for year in years
#
#
# # # These are OK:
# # combined3 %>% filter(is.na(value_rnd) & is.na(orig_num))
# # combined3 %>% filter(asterisk == "*")
#
# # View(combined3)
| /qc/UPDATE_qc_formatted.R | permissive | gaybro8777/MEPS-summary-tables | R | false | false | 7,383 | r |
# THIS CODE CAN BE DELETED ON 7/1/2021 -- USED TO CHECK RE-FACTOR FOR TRANSITION TO TABLEAU
# QC tables ------------------------------------------------------------------
# No messages = No issues!
rm(list = ls())
QC <- function(name, qc_df) {
if(nrow(qc_df) > 0) {
cat("CHECK ", name)
print(qc_df)
}
}
# Define appKey and years ----------------------------------------------------
#appKey = "hc_ins"; years = 1996:2018;
#appKey = "hc_pmed"; years = 1996:2018;
#appKey = "hc_use"; years = c(1996, 2000, 2002, 2005, 2015:2018);
#appKey = "hc_cond_icd9"; years = 1996:2015;
#appKey = "hc_cond_icd10"; years = 2016:2018;
#appKey = "hc_care_access"; years = 2002:2018; ## !! 2018 is tricky...
#appKey = "hc_care_diab"; years = 2002:2018;
#appKey = "hc_care_qual"; years = 2002:2017;
# year = 2014
# year = 2018
app_years <- list(
"hc_use" = c(1996, 2000, 2002, 2005, 2015:2018),
"hc_ins" = 1996:2018,
"hc_pmed" = 1996:2018,
"hc_care_access" = 2002:2018,
"hc_care_diab" = 2002:2018,
"hc_care_qual" = 2002:2017,
"hc_cond_icd9" = 1996:2015,
"hc_cond_icd10" = 2016:2018)
# Loop through years ---------------------------------------------------------
for(appKey in names(app_years)) { print(appKey)
years = app_years[[appKey]]
for(year in years) { print(year)
options(dplyr.width = Inf)
new <- read.csv(str_glue("formatted_tables/{appKey}/DY{year}.csv"))
orig <- read.csv(str_glue("formatted_tables - Copy/{appKey}/DY{year}.csv"))
new_tbl <- new %>% as_tibble
orig_tbl <- orig %>% as_tibble
if(appKey == "hc_care_qual") {
orig_tbl <- orig_tbl %>%
mutate(
adult_child = replace(adult_child, adult_child == "child", "Children"),
adult_child = replace(adult_child, adult_child == "adult", "Adults"))
}
all_equal(orig_tbl, new_tbl) %>% print
}
}
# OLD CODE FOR PREVIOUS VERSION ----------------------------------------------
# # hc_care -- load subset
# if(appKey %>% startsWith("hc_care")) {
# orig_care <- read.csv(str_glue("formatted_tables - orig/hc_care/DY{year}.csv"))
#
# if(appKey == "hc_care_access") {
# orig <- orig_care %>% filter(grepl("Access", col_group))
# new <- new %>%
# mutate(col_label = gsub( " (2002-2017)","", col_label, fixed = T))
# }
#
# if(appKey == "hc_care_diab") orig <- orig_care %>% filter(grepl("Diabetes", col_group))
# if(appKey == "hc_care_qual") orig <- orig_care %>% filter(grepl("Quality", col_group))
#
# } else {
# orig <- read.csv(str_glue("formatted_tables - orig/{appKey}/DY{year}.csv"))
# }
# Edits
# orig <- orig %>% rename(value = coef)
# # if("row_group" %in% colnames(orig_tbl)) {
# orig_tbl <- orig_tbl %>%
# mutate(row_group = replace(row_group, row_group == "", "(none)"))
# }
# if("col_group" %in% colnames(orig_tbl)) {
# orig_tbl <- orig_tbl %>%
# mutate(col_group = replace(col_group, col_group == "", "(none)"))
# }
#
# if(appKey == "hc_use") {
# new_tbl <- new_tbl %>% filter(!(col_var == row_var & row_var != "ind"))
# }
#
# if(appKey == "hc_care_qual") {
# orig_tbl <- orig_tbl %>%
# separate(col_var, into = c("adult_child", "col_var")) %>%
# separate(col_group, into = c("col_group", "drop"), sep = ":") %>%
# select(-drop)
# }
#
# if(appKey %>% startsWith("hc_care")) {
# orig_tbl <- orig_tbl %>%
# mutate(caption = gsub(" by", ", by", caption))
# }
# # Compare caption only
# cap_vars <- c("adult_child", "stat_var", "row_var", "col_var", "caption")
# captions <- full_join(
# orig_tbl %>% select(one_of(cap_vars)) %>% rename(orig_caption = caption) %>% distinct,
# new_tbl %>% select(one_of(cap_vars)) %>% rename(new_caption = caption) %>% distinct)
#
# QC("caption_diff",captions %>%
# filter(orig_caption != new_caption))
#
# QC("caption_missing", captions %>%
# filter(is.na(orig_caption) | is.na(new_caption)))
#
#
# # # Edits based on apps
# # if(appKey == "hc_pmed") {
# # orig_tbl <- orig_tbl %>%
# # mutate(col_label = "(none)", rowLevels = ifelse(row_var == "RXDRGNAM", toupper(rowLevels), rowLevels))
# # #new_tbl <- new_tbl %>% filter(value != "--")
# # }
#
#
# # Combine and compare
# combined <- full_join(
# orig_tbl %>% rename(value_orig = value, se_orig = se) %>% mutate(in_orig = TRUE),
# new_tbl %>% rename(value_new = value, se_new = se) %>% mutate(in_new = TRUE))
#
# # Check number of rows
# nc <- nrow(combined)
# norig <- nrow(orig_tbl)
# nnew <- nrow(new_tbl)
#
# if(nc != nnew) {
# print("WARNING: DIFFERENT NUMBER OF ROWS")
# }
#
# if(FALSE) {
# # should have 0 rows
# QC("QC1", combined %>%
# filter(is.na(in_orig) | is.na(in_new)) %>%
# count(col_var, row_var, in_orig, in_new))
#
# QC("QC2", combined %>%
# filter(is.na(in_orig) | is.na(in_new)) %>%
# count(stat_var, stat_label, in_orig, in_new))
#
#
# }
#
#
# QC("QC1.5", combined %>%
# filter(is.na(in_orig) | is.na(in_new), value_new != "--")
# )
#
# #
# # combined2 <- combined %>%
# # mutate(
# # value_new = suppressWarnings(as.numeric(value_new)),
# # se_new = suppressWarnings(as.numeric(se_new)),
# #
# # value_new = ifelse(Percent, value_new*100, value_new),
# # se_new = ifelse(Percent, se_new*100, se_new),
# #
# # asterisk_orig = grepl("*", value_orig, fixed = T),
# #
# # orig_num = value_orig %>%
# # gsub(",", "", .) %>%
# # gsub("*","", ., fixed = T) %>%
# # as.numeric,
# #
# # se_num = se_orig %>%
# # gsub(",", "", .) %>%
# # gsub("*", "", ., fixed = T) %>%
# # as.numeric(),
# #
# # n_digits = nchar(word(orig_num, 2, sep = "\\.")),
# # se_digits = nchar(word(se_num, 2, sep = "\\."))) %>%
# #
# # replace_na(list(n_digits = 0, se_digits = 0))
# #
# # combined3 <- combined2 %>%
# # select(-ends_with("label"), -ends_with("group"), -caption) %>%
# # mutate(
# # value_rnd = round(value_new, n_digits),
# # se_rnd = round(se_new, se_digits)
# # )
#
#
# # These are bad:
# QC("QC3", combined3 %>% filter(value_rnd != orig_num))
# QC("QC4", combined3 %>% filter(se_rnd != se_num))
#
# QC("QC5", combined3 %>% filter(is.na(in_new)))
# QC("QC6", combined3 %>% filter(is.na(in_orig) & !is.na(value_new)))
#
#
# # These are also bad:
# QC("QC7", combined3 %>% filter(is.na(value_rnd) & !is.na(orig_num), sample_size >= 60))
# QC("QC8", combined3 %>% filter(!is.na(value_rnd) & is.na(orig_num), sample_size >= 60))
#
# QC("QC9", combined3 %>% filter(is.na(se_rnd) & !is.na(se_num), sample_size >= 60))
# QC("QC10", combined3 %>% filter(!is.na(se_rnd) & is.na(se_num), sample_size >= 60))
#
# QC("QC11", combined3 %>% filter(asterisk == "*" & !asterisk_orig, sample_size >= 60))
# QC("QC12", combined3 %>% filter(asterisk == "" & asterisk_orig, sample_size >= 60))
#
# QC("QC13", combined3 %>% filter(is.na(value_rnd) & !is.na(se_rnd), sample_size >= 60))
# QC("QC14", combined3 %>% filter(!is.na(value_rnd) & is.na(se_rnd), sample_size >= 60))
#
# } # end for year in years
#
#
# # # These are OK:
# # combined3 %>% filter(is.na(value_rnd) & is.na(orig_num))
# # combined3 %>% filter(asterisk == "*")
#
# # View(combined3)
|
# Create data with the randomNames package :
library('randomNames')
NUMOFLINKS = 100
relations = data.frame(source = randomNames(1000,which.names='both'), target = "")
relations = relations[rep(seq_len(nrow(relations)), sample(1:10,nrow(relations), replace=T)),]
relations = relations[sample(nrow(relations),NUMOFLINKS),]
relations$target = sample(relations$source,nrow(relations), replace = T)
relations = relations[relations[,1]!=relations[,2], ]
#The table looks like that
head(relations)
## Plot the graphs using IGRAPH package
library("igraph")
vertices<-data.frame("name" = unique(unlist(relations))) # node names
g = graph.data.frame(relations, directed=F, vertices=vertices) # raw graph
vertices$group = edge.betweenness.community(g)$membership # betweeness centrality for each node for grouping
png("#86_network-igraph.png", width = 480, height = 480 )
plot(g,
#mark.groups=vertices$group, # group vertices by betweeness indicator (redish blob background)
layout=layout.auto,
vertex.color = vertices$group, # color vertices by edge betweeness
vertex.label=NA, # no vertex label (name)
vertex.size=5,
edge.arrow.size=0.8)
dev.off()
| /OLD_GALLERY_RSCRIPT/#86_network-igraph.R | permissive | holtzy/R-graph-gallery | R | false | false | 1,178 | r | # Create data with the randomNames package :
library('randomNames')
NUMOFLINKS = 100
relations = data.frame(source = randomNames(1000,which.names='both'), target = "")
relations = relations[rep(seq_len(nrow(relations)), sample(1:10,nrow(relations), replace=T)),]
relations = relations[sample(nrow(relations),NUMOFLINKS),]
relations$target = sample(relations$source,nrow(relations), replace = T)
relations = relations[relations[,1]!=relations[,2], ]
#The table looks like that
head(relations)
## Plot the graphs using IGRAPH package
library("igraph")
vertices<-data.frame("name" = unique(unlist(relations))) # node names
g = graph.data.frame(relations, directed=F, vertices=vertices) # raw graph
vertices$group = edge.betweenness.community(g)$membership # betweeness centrality for each node for grouping
png("#86_network-igraph.png", width = 480, height = 480 )
plot(g,
#mark.groups=vertices$group, # group vertices by betweeness indicator (redish blob background)
layout=layout.auto,
vertex.color = vertices$group, # color vertices by edge betweeness
vertex.label=NA, # no vertex label (name)
vertex.size=5,
edge.arrow.size=0.8)
dev.off()
|
## ANALISIS OF THE SPIN cohort DATA #################
#Load gmodels package
#install.packages(c("gmodels", "rpart", "randomForest", "knitr", "psych", "pgirmess", "Hmisc", "car", "fBasics", "sm", "foreign", "xlsx"))
#install.packages(c("car", "xlsx", "ggplot2", "gridExtra"), repos='http://cran.us.r-project.org')
library(grid)
#library(gmodels)
#library(knitr)
#library(psych)
#library(survival)
#library(Hmisc)
library(car)
#library(stats)
#library(fBasics)
#library(sm)
library(xlsx)
#library(pgirmess)
library(ggplot2)
library(gridExtra)
#library(pROC)
#library(randomForest)
#library(rpart)
#library(rpart.plot)
#library(pwr)
options(scipen=8, warn=-1)
printWithNumber = function(gg_plot, top_right=counter, resetcounter=FALSE)
{
plot(gg_plot)
if (resetcounter==TRUE){
counter <<- 0
}
counter <<- counter+1
label = textGrob(top_right,
x = 0.98, # right side
y = 0.98, # top
just="right",
hjust = NULL,
vjust = 1,
gp=gpar(fontsize=10, col="gray"))
grid.draw(label)
}
#### RETRIEVE DATA and ARRANGE VARIABLES######
#DATA in Santpau
setwd("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Resultados")
data <- data.frame(read.xlsx ("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Tablas exportadas/CohBasalGlobal.xlsx", 1))
data1a <- data.frame(read.xlsx ("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Tablas exportadas/CCoh1aGlobalExportable.xlsx", 1))
data2a <- data.frame(read.xlsx ("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Tablas exportadas/CCoh2aGlobalExportable.xlsx", 1))
data3a <- data.frame(read.xlsx ("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Tablas exportadas/CCoh3aGlobalExportable.xlsx", 1))
data4a <- data.frame(read.xlsx ("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Tablas exportadas/CCoh4aGlobalExportable.xlsx", 1))
#DATA in MACBOOK
#data <- data.frame(read.xlsx ("/Users/Daniel/Google Drive/WORK/ExportacionBaseUdM/CohBasalGlobal.xlsx", 1))
#setwd("/Users/Daniel/Google Drive/WORK/Projectes/02-PROYECTOS ACTIVOS/SPIN cohort/Analisis R")
data$STDIAGCODE <- factor(data$STDIAGCODE)
data1a$STDIAGCODE <- factor(data1a$STDIAGCODE)
data2a$STDIAGCODE <- factor(data2a$STDIAGCODE)
data3a$STDIAGCODE <- factor(data3a$STDIAGCODE)
data4a$STDIAGCODE <- factor(data4a$STDIAGCODE)
data1a$STDIAGCODE1a <- factor(data1a$STDIAGCODE1a)
data2a$STDIAGCODE2a <- factor(data2a$STDIAGCODE2a)
data3a$STDIAGCODE3a <- factor(data3a$STDIAGCODE3a)
data4a$STDIAGCODE4a <- factor(data4a$STDIAGCODE4a)
levels(data$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data1a$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data1a$STDIAGCODE1a) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data2a$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data2a$STDIAGCODE2a) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data3a$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data3a$STDIAGCODE3a) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data4a$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data4a$STDIAGCODE4a) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
dx <- levels(data$STDIAGCODE)
#Set up Cut-off point for ABETA42, TAU and P-TAU
for (i in 1:length(data$DBCODE)) {
data$ABETA42status[i] <- ifelse(data$CSFABETA42[i] < 550, "ABETA42pos",
ifelse(data$CSFABETA42[i] >550, "ABETA42neg", NA))
data$TAUstatus[i] <- ifelse (data$CSFTAU[i]>350, "TAUpos",
ifelse(data$CSFTAU[i] <350, "TAUneg", NA))
data$PTAUstatus[i] <- ifelse (data$CSFPTAU[i]>61, "PTAUpos",
ifelse(data$CSFPTAU[i] <61, "PTAUneg", NA))
}
data$RatioTauAbeta <- data$CSFTAU / data$CSFABETA42
data$RatioTauPtau <- data$CSFTAU / data$CSFPTAU
data$RatioTausappbeta <- data$CSFTAU / data$CSFSAPPBETA
data$RatioPtausappbeta <- data$CSFPTAU / data$CSFSAPPBETA
data$RatioYKLsappbeta <- data$CSFYKL40 / data$CSFSAPPBETA
data$RatioAbetasappbeta <- data$CSFABETA42 / data$CSFSAPPBETA
for (i in 1:length(data$RatioTauAbeta)) {
data$RatioAD[i] <- ifelse(data$RatioTauAbeta[i] > 0.52, "RatioAD+", "RatioAD-")
}
for (i in 1:length(data$APOE)) {
data$APOE4[i] <- ifelse(data$APOE[i]=="22"|
data$APOE[i]=="23"|
data$APOE[i]=="33", "APOE4-",
ifelse(data$APOE[i]=="24"|
data$APOE[i]=="34"|
data$APOE[i]=="44", "APOE4+", NA))
}
myvars <- c("DBCODE", "STDIAGCODE", "FUPDATEbasal", "NPSDATE", "CSFDATE", "MRIDATE")
myvars1 <- c("DBCODE", "STDIAGCODE", "FUPDATE1a", "NPSDATE")
myvars2 <- c("DBCODE", "STDIAGCODE", "FUPDATE2a", "NPSDATE", "CSFDATE", "MRIDATE")
myvars3 <- c("DBCODE", "STDIAGCODE", "FUPDATE3a", "NPSDATE")
myvars4 <- c("DBCODE", "STDIAGCODE", "FUPDATE4a", "NPSDATE", "CSFDATE", "MRIDATE")
subdata <- data[myvars]
subdata$VISIT <- "BASELINE"
subdata1a <- data1a[myvars1]
subdata1a$CSFDATE <- NA
subdata1a$MRIDATE <- NA
names(subdata1a) <- myvars
subdata1a$VISIT <- "YEAR 1"
subdata2a <- data2a[myvars2]
names(subdata2a) <- myvars
subdata2a$VISIT <- "YEAR 2"
subdata3a <- data3a[myvars3]
subdata3a$CSFDATE <- NA
subdata3a$MRIDATE <- NA
names(subdata3a) <- myvars
subdata3a$VISIT <- "YEAR 3"
subdata4a <- data4a[myvars4]
names(subdata4a) <- myvars
subdata4a$VISIT <- "YEAR 4"
longdata <- rbind(subdata, subdata1a, subdata2a, subdata3a, subdata4a)
rm(subdata)
rm(subdata1a)
rm(subdata2a)
rm(subdata3a)
rm(subdata4a)
longdata <- longdata[!is.na(longdata$FUPDATE),]
levels(longdata$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
names(longdata)<- c("DBCODE", "STDIAGCODE", "FUPDATE", "NPSDATE", "CSFDATE", "MRIDATE", "VISIT")
##TABLE SUMMARY####
attach(data)
quantvariables <-data.frame(AGE, EDUC, MMSE, FCSRTTOTALFREE, FCSRTTOTAL, FCSRTDELTOT, CERADLISTRECALL, DIGDIR, DIGREV, TMTA, TMTB, BNT, REYCOPY, PHONFLU, SEMFLU, VOSP, NPI.Q.TOTAL, CSFABETA42, AB1.42, CSFTAU, CSFPTAU, CSFYKL40, CSFSAPPBETA, CSFNFL, CSFPROGRANULIN)
propvariables <- data.frame(SEX, APOE4, RatioAD, ABETA42status, TAUstatus, PTAUstatus, FBPVISUAL, FDGVISUAL, STDIAGCODE)
DATATABLE <- matrix(nrow=length(quantvariables)*4, ncol=length(dx))
colnames(DATATABLE) <- dx
NAMES<-vector()
for(i in 1:length(quantvariables)) {
VAR <- quantvariables[i]
NAMES <- c(NAMES, paste("Summary-",names(VAR), sep=""), paste(names(VAR),"-N", sep=""), paste(names(VAR),"-MEAN", sep=""), paste(names(VAR),"-SD", sep=""))
for (k in 1:length(dx)) {
DATATABLE[i*4-3,k] <- paste("")
DATATABLE[i*4-2,k] <- length(VAR[data$STDIAGCODE==dx[k]&!is.na(VAR),])
DATATABLE[i*4-1,k] <- round(mean(VAR[data$STDIAGCODE==dx[k],],na.rm=TRUE), digits=1)
DATATABLE[i*4,k] <- round(sd(VAR[data$STDIAGCODE==dx[k],],na.rm=TRUE), digits=1)
}
}
row.names(DATATABLE) <- NAMES
colnames(DATATABLE)<- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
date<-paste("Data updated on", format(Sys.Date(), "%Y-%m-%d"))
NAMESprop <- vector()
proptable <- table(dx)
for(i in 1:length((propvariables))) {
VAR <- propvariables[,i]
a<-(table(VAR, propvariables$STDIAGCODE))
proptable <- rbind(proptable, a)
}
PROPTABLE <- proptable[2:(nrow(proptable)-length(dx)),]
DATATABLE <- rbind(DATATABLE, PROPTABLE)
write.xlsx2(DATATABLE, "DATATABLE.xlsx", sheetName=date, showNA=FALSE)
Sys.setlocale(locale = "en")
caption <- paste("Summary data from the SPIN cohort, Hospital de la Santa Creu i Sant Pau, Barcelona\nLast update on", format(Sys.time(), "%A, %B %d %Y at %H:%M"))
palettenumber<- "BuPu"
blank <- ggplot(data, aes()) +
geom_blank() +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0))+
theme_minimal()
### SUMMARY PDF ######
######################
pdf("SPINsummary.pdf", 8, 6)
#### >>pdf section 1####
grid.text(label="SPIN cohort",x=0.5, y=0.6,just="centre", gp=gpar(fontsize=24, col="#177db7", fontface="bold"))
grid.text(label="Sant Pau Initiative on Neurodegeneration",x=0.5, y=0.5, gp=gpar(fontsize=14, col="black", fontface="bold"))
grid.text(label=caption, x=0.5, y=0.22, vjust=0, hjust=0.5, gp=gpar(fontsize=9, col="gray", fontface="bold"))
#### >>pdf section 1####
printWithNumber(blank, resetcounter = TRUE)
grid.text(label="1. Summary Table",hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
printWithNumber(blank)
table1 <- (tableGrob(DATATABLE[1:20,], theme=ttheme_minimal(base_size=7)))
grid.draw(table1)
printWithNumber(blank)
table2 <- (tableGrob(DATATABLE[21:40,], theme=ttheme_minimal(base_size=7)))
grid.draw(table2)
printWithNumber(blank)
table3 <- (tableGrob(DATATABLE[41:60,], theme=ttheme_minimal(base_size=7)))
grid.draw(table3)
printWithNumber(blank)
table4 <- (tableGrob(DATATABLE[61:80,], theme=ttheme_minimal(base_size=7)))
grid.draw(table4)
printWithNumber(blank)
table5 <- (tableGrob(DATATABLE[81:100,], theme=ttheme_minimal(base_size=7)))
grid.draw(table5)
printWithNumber(blank)
table6 <- (tableGrob(DATATABLE[101:nrow(DATATABLE),], theme=ttheme_minimal(base_size=7)))
grid.draw(table6)
#### >>pdf section 2###
printWithNumber(blank)
grid.text(label="2. Cohort Summary Counts - BASELINE",hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
#### COHORT STATUS GRAPHICS ###
### Summary graphs#####
cohortstatus <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=SEX)) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Count", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatus)
plotages <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=AGE, fill=factor(data$STDIAGCODE))) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Age (years)", title="SPIN Cohort - Age", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 110), breaks=seq(0, 100, 20))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(plotages)
ploteduc <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=EDUC, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="", y="Education (years)", title="SPIN Cohort - Education", caption=caption)+
guides(fill=FALSE)+
scale_y_continuous(limits=c(0, 25), breaks=seq(0, 20, 2))+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(ploteduc)
plotmmse <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=MMSE, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Mini-Mental State Examination", title="SPIN Cohort - MMSE", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 32), breaks=seq(0, 30, 2))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(plotmmse)
cohortapoe <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=APOE4)) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - APOE4", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortapoe)
cohortabetastatus <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=ABETA42status)) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - ABETA42 status (cut-off=550 pg/ml)", fill="ABETA42", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortabetastatus)
cohorttaustatus <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=TAUstatus)) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - T-TAU status (cut-off=350 pg/ml)", fill="T-TAU", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohorttaustatus)
cohortptaustatus <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=PTAUstatus)) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - P-TAU status (cut-off=61 pg/ml)", fill="P-TAU", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortptaustatus)
cohortinclusion <-
ggplot(data, aes(y=STDIAGCODE,
x=FUPDATEbasal,
colour=(STDIAGCODE))) +
geom_point(size=3, alpha=0.7)+
labs(x="Baseline date",y="Diagnostic group", title="SPIN Cohort - Inclusion", caption=caption, colour="")+
scale_x_date(date_breaks = "4 months", date_labels = "%b-%Y")+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"), axis.text.x = element_text(face="bold", hjust=0, angle=330))
printWithNumber(cohortinclusion)
cohortstatusMRI <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=!is.na(data$MRIDATE))) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - MRI Count", fill="MRI is available", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatusMRI)
cohortstatusCSF <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=!is.na(data$CSFDATE))) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - CSF Count", fill="CSF is available", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.85)
printWithNumber(cohortstatusCSF)
cohortstatusFBP <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=!is.na(data$FBPDATE))) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - 18F-Florbetapir PET Count", fill="FBP is available", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.80)
printWithNumber(cohortstatusFBP)
cohortstatusFDG <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=!is.na(data$FDGDATE))) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - 18F-Fluorodeoxyglucose PET Count", fill="FDG is available", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.80)
printWithNumber(cohortstatusFDG)
cohortstatusRatiopos <-
ggplot(data=data,
aes(x=factor(STDIAGCODE),
fill=factor(RatioAD)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - RatioAD in CSF", fill="RatioAD<0.52\n(AD profile)", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.80)
printWithNumber(cohortstatusRatiopos)
cohortstatusFBPpos <-
ggplot(data=data,
aes(x=factor(STDIAGCODE),
fill=factor(FBPVISUAL)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Visual FBP-PET", fill="Positive FBP", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme( plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.80)
printWithNumber(cohortstatusFBPpos)
cohortstatusFDGpos <-
ggplot(data=data,
aes(x=factor(STDIAGCODE),
fill=factor(FDGVISUAL)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Visual FDG-PET", fill="Positive FDG", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.80)
printWithNumber(cohortstatusFDGpos)
printWithNumber(blank)
grid.text(label="3. Longitudinal Follow-Up", hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
cohortstatusfup <-
ggplot(data=longdata,
aes(x=factor(STDIAGCODE),
fill=factor(VISIT)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Follow-Up Count", fill="", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatusfup)
cohortstatusfupnps <-
ggplot(data=longdata[!is.na(longdata$NPSDATE),],
aes(x=factor(STDIAGCODE),
fill=factor(VISIT)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Follow-Up NPS Count", fill="", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatusfupnps)
cohortstatusfupcsf <-
ggplot(data=longdata[!is.na(longdata$CSFDATE),],
aes(x=factor(STDIAGCODE),
fill=factor(VISIT)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Follow-Up CSF Count", fill="", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatusfupcsf)
cohortstatusfupmri <-
ggplot(data=longdata[!is.na(longdata$MRIDATE),],
aes(x=STDIAGCODE,
fill=VISIT))+
geom_bar(data=data, aes(x=data$STDIAGCODE, fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Follow-Up MRI Count", fill="", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatusfupmri)
printWithNumber(blank)
grid.text(label="4. Neuropsychology - BASELINE", hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
cohortfcsrttotal <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=FCSRTTOTAL, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="FCSRT total", title="SPIN Cohort - FCSRT total", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortfcsrttotal)
cohortcerad <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CERADLISTRECALL, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="CERAD list recall", title="SPIN Cohort - CERAD list recall", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortcerad)
cohortdigdir <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=DIGDIR, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Digits direct", title="SPIN Cohort - Digits direct", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortdigdir)
cohortdigrev <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=DIGREV, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Digits reverse", title="SPIN Cohort - Digits reverse", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortdigrev)
cohorttmta <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=TMTA, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Trail Making Test A", title="SPIN Cohort - Trail Making Test A", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohorttmta)
cohorttmtb <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=TMTB, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Trail Making Test B", title="SPIN Cohort - Trail Making Test B", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohorttmtb)
cohortbnt <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=BNT, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Boston Naming Test (60)", title="SPIN Cohort - Boston Naming Test", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortbnt)
cohortreycopy <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=REYCOPY, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Rey Complex Figure copy", title="SPIN Cohort - Rey Complex Figure copy", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortreycopy)
cohortPHONFLU <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=PHONFLU, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Phonetic Fluency", title="SPIN Cohort - Phonetic Fluency", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortPHONFLU)
cohortSEMFLU <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=SEMFLU, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Semantic Fluency", title="SPIN Cohort - Semantic Fluency", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortSEMFLU)
cohortVOSP <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=VOSP, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="VOSP", title="SPIN Cohort - VOSP", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortVOSP)
cohortNPI <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=NPITOTAL, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Neuropsychiatric Inventory", title="SPIN Cohort - Neuropsychiatric Inventory", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortNPI)
cohortNPIq <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=NPI.Q.TOTAL, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Neuropsychiatric Inventory Q-Total", title="SPIN Cohort - Neuropsychiatric Inventory Q-Total", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortNPIq)
printWithNumber(blank)
grid.text(label="5. CSF Biomarkers - BASELINE", hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
cohortabeta <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFABETA42, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Abeta42 (pg/ml)", title="SPIN Cohort - CSF Abeta42", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortabeta)
cohortabetaage <-
ggplot(data, aes(y=CSFABETA42,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="ABeta42 (pg/ml)",y="Age (years)", title="SPIN Cohort - CSF Abeta42 - Age", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("Age (years)") + xlim(0,100) +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetaage)
cohortabetammse <-
ggplot(data, aes(y=CSFABETA42,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="ABeta42 (pg/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF Abeta42 - MMSE", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("MMSE score") + xlim(0,30) +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetammse)
cohorttau <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFTAU, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Abeta42 (pg/ml)", title="SPIN Cohort - CSF T-Tau", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohorttau)
cohorttauage <-
ggplot(data, aes(y=CSFTAU,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="Age (years)", title="SPIN Cohort - CSF T-Tau - Age", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("Age (years)") + xlim(0,100) +
ylab("T-Tau (pg/ml)") + ylim(0,2500) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttauage)
cohorttaummse <-
ggplot(data, aes(y=CSFTAU,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF T-Tau - MMSE", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("MMSE score") + xlim(0,30) +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttaummse)
cohortptau <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFPTAU, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="P-Tau (pg/ml)", title="SPIN Cohort - CSF P-Tau", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 300), breaks=seq(0, 300, 25))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortptau)
cohortptauage <-
ggplot(data, aes(y=CSFPTAU,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="P-Tau (pg/ml)",y="Age (years)", title="SPIN Cohort - CSF P-Tau - Age", caption=caption, colour="")+
geom_hline(yintercept=61, linetype=2, colour="gray")+
xlab("Age (years)") + xlim(0,100) +
ylab("P-Tau (pg/ml)") + ylim(0,300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortptauage)
cohortptaummse <-
ggplot(data, aes(y=CSFPTAU,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="P-Tau (pg/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF P-Tau - MMSE", caption=caption, colour="")+
geom_hline(yintercept=61, linetype=2, colour="gray")+
xlab("MMSE score") + xlim(0,30) +
ylab("P-Tau (pg/ml)") + ylim(0,300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortptaummse)
cohortabetatau <-
ggplot(data, aes(y=CSFABETA42,
x=CSFTAU,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="ABeta42 (pg/ml)",y="T-Tau (pg/ml)", title="SPIN Cohort - CSF Abeta42 - T-Tau", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
geom_vline(xintercept=350, linetype=2, colour="gray")+
xlab("Total-Tau (pg/ml)") + xlim(0,2500) +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetatau)
cohortabetaptau <-
ggplot(data, aes(y=CSFABETA42,
x=CSFPTAU,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="ABeta42 (pg/ml)",y="P-Tau (pg/ml)", title="SPIN Cohort - CSF Abeta42 - P-Tau", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
geom_vline(xintercept=61, linetype=2, colour="gray")+
xlab("P-Tau (pg/ml)") + xlim(0,300) +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetaptau)
cohorttauptau <-
ggplot(data, aes(y=CSFTAU,
x=CSFPTAU,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="P-Tau (pg/ml)", title="SPIN Cohort - CSF T-Tau - P-Tau", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
geom_vline(xintercept=61, linetype=2, colour="gray")+
xlab("P-Tau (pg/ml)") + xlim(0,300) +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttauptau)
cohortykl <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFYKL40, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="YKL-40 (ng/ml)", title="SPIN Cohort - CSF YKL-40", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 500), breaks=seq(0, 500, 50))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortykl)
cohoryklage <-
ggplot(data, aes(y=CSFYKL40,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="YKL-40 (ng/ml)",y="Age (years)", title="SPIN Cohort - CSF YKL-40 - Age", caption=caption, colour="")+
xlab("Age (years)") + xlim(0,100) +
ylab("YKL-40 (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohoryklage)
cohortyklmmse <-
ggplot(data, aes(y=CSFYKL40,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="YKL-40 (ng/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF YKL-40 - MMSE", caption=caption, colour="")+
xlab("MMSE score") + xlim(0,30) +
ylab("YKL-40 (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortyklmmse)
cohortabetaykl <-
ggplot(data, aes(y=CSFABETA42,
x=CSFYKL40,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Abeta42 (pg/ml)",y="YKL-40 (ng/ml)", title="SPIN Cohort - CSF Abeta42 - YKL-40", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("YKL-40 (ng/ml)") +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetaykl)
cohorttauykl <-
ggplot(data, aes(y=CSFTAU,
x=CSFYKL40,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="YKL-40 (ng/ml)", title="SPIN Cohort - CSF T-Tau - YKL-40", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("YKL-40 (ng/ml)") +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttauykl)
cohortnfl <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFNFL, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="NFL (pg/ml)", title="SPIN Cohort - CSF Neurofilaments light", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 6000), breaks=seq(0, 6000, 500))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortnfl)
cohortnflage <-
ggplot(data, aes(y=CSFNFL,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="NFL (ng/ml)",y="Age (years)", title="SPIN Cohort - CSF NFL - Age", caption=caption, colour="")+
xlab("Age (years)") + xlim(0,100) +
ylab("NFL (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortnflage)
cohortnflmmse <-
ggplot(data, aes(y=CSFNFL,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="NFL (ng/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF NFL - MMSE", caption=caption, colour="")+
xlab("MMSE score") + xlim(0,30) +
ylab("NFL (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortnflmmse)
cohortabetanfl <-
ggplot(data, aes(y=CSFABETA42,
x=CSFNFL,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Abeta42 (pg/ml)",y="NFL (pg/ml)", title="SPIN Cohort - CSF Abeta42 - NFL", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("NFL (ng/ml)") +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetanfl)
cohorttaunfl <-
ggplot(data, aes(y=CSFTAU,
x=CSFNFL,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="NFL (pg/ml)", title="SPIN Cohort - CSF T-Tau - NFL", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("NFL (pg/ml)") +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttaunfl)
cohortsappb <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFSAPPBETA, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="sAPPbeta (ng/ml)", title="SPIN Cohort - CSF sAPPbeta", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 3000), breaks=seq(0, 3000, 500))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortsappb)
cohortsappbage <-
ggplot(data, aes(y=CSFSAPPBETA,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="sAPPbeta (ng/ml)",y="Age (years)", title="SPIN Cohort - CSF sAPPbeta - Age", caption=caption, colour="")+
xlab("Age (years)") + xlim(0,100) +
ylab("sAPPbeta (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortsappbage)
cohortsappbmmse <-
ggplot(data, aes(y=CSFSAPPBETA,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="sAPPbeta (ng/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF sAPPbeta - MMSE", caption=caption, colour="")+
xlab("MMSE score") + xlim(0,30) +
ylab("sAPPbeta (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortsappbmmse)
cohortabetasappb <-
ggplot(data, aes(y=CSFABETA42,
x=CSFSAPPBETA,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Abeta42 (pg/ml)",y="sAPPbeta (ng/ml)", title="SPIN Cohort - CSF Abeta42 - sAPPbeta", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("sAPPbeta (ng/ml)") +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetasappb)
cohorttausappb <-
ggplot(data, aes(y=CSFTAU,
x=CSFSAPPBETA,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="sAPPbeta (ng/ml)", title="SPIN Cohort - CSF T-Tau - sAPPbeta", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("sAPPbeta (ng/ml)") +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttausappb)
cohortprogranulin <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFPROGRANULIN, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Progranulin (ng/ml)", title="SPIN Cohort - CSF Progranulin", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 10), breaks=seq(0, 10, 2))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortprogranulin)
cohortprogranulinage <-
ggplot(data, aes(y=CSFPROGRANULIN,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Progranulin (ng/ml)",y="Age (years)", title="SPIN Cohort - CSF Progranulin - Age", caption=caption, colour="")+
xlab("Age (years)") + xlim(0,100) +
ylab("Progranulin (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortprogranulinage)
cohortprogranulinmmse <-
ggplot(data, aes(y=CSFPROGRANULIN,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Progranulin (ng/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF Progranulin - MMSE", caption=caption, colour="")+
xlab("MMSE score") + xlim(0,30) +
ylab("Progranulin (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortprogranulinmmse)
cohortabetaprogranulin <-
ggplot(data, aes(y=CSFABETA42,
x=CSFPROGRANULIN,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Abeta42 (pg/ml)",y="Progranulin (ng/ml)", title="SPIN Cohort - CSF Abeta42 - Progranulin", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("Progranulin (ng/ml)") +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetaprogranulin)
cohorttauprogranulin <-
ggplot(data, aes(y=CSFTAU,
x=CSFPROGRANULIN,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="Progranulin (ng/ml)", title="SPIN Cohort - CSF T-Tau - Progranulin", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("Progranulin (ng/ml)") +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttauprogranulin)
### Density plots ######
printWithNumber(blank)
grid.text(label="6. Biomarkers distribution - BASELINE", hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
for (i in 1:length(dx)) {
plot <- data[data$STDIAGCODE==dx[i],]
abeta <- round(quantile(plot$CSFABETA42, probs=c(0.05, 0.5, 0.95), na.rm=TRUE), digits=1)
abetamean <- round(mean(plot$CSFABETA42,na.rm=TRUE), digits=1)
abetasd <- round(sd(plot$CSFABETA42,na.rm=TRUE), digits=1)
labelabeta <- paste("ABETA42", "\n 5% Perc: ", abeta[1], "pg/ml\n50% Perc: ", abeta[2], "pg/ml\n95% Perc: ", abeta[3], "pg/ml\n\nMean: ",abetamean,"pg/ml\nSD: ",abetasd,"pg/ml", sep="")
abeta42 <- round(quantile(plot$AB1.42, probs=c(0.05, 0.5, 0.95), na.rm=TRUE), digits=1)
abeta42mean <- round(mean(plot$AB1.42,na.rm=TRUE), digits=1)
abeta42sd <- round(sd(plot$AB1.42,na.rm=TRUE), digits=1)
labelabeta42 <- paste("ABETA1-42 (Lumipulse)", "\n 5% Perc: ", abeta42[1], "pg/ml\n50% Perc: ", abeta42[2], "pg/ml\n95% Perc: ", abeta42[3], "pg/ml\n\nMean: ",abeta42mean,"pg/ml\nSD: ",abeta42sd,"pg/ml", sep="")
tau <- round(quantile(plot$CSFTAU, probs=c(0.05, 0.5, 0.95), na.rm=TRUE), digits=1)
taumean <- round(mean(plot$CSFTAU,na.rm=TRUE), digits=1)
tausd <- round(sd(plot$CSFTAU,na.rm=TRUE), digits=1)
labeltau <- paste("T-TAU", "\n 5% Perc: ", tau[1], "pg/ml\n50% Perc: ", tau[2], "pg/ml\n95% Perc: ", tau[3], "pg/ml\n\nMean: ", taumean,"pg/ml\nSD: ",tausd, "pg/ml",sep="")
ptau <- round(quantile(plot$CSFPTAU, probs=c(0.05, 0.5, 0.95), na.rm=TRUE), digits=1)
ptaumean <- round(mean(plot$CSFPTAU,na.rm=TRUE), digits=1)
ptausd <- round(sd(plot$CSFPTAU,na.rm=TRUE), digits=1)
labelptau <- paste("P-TAU", "\n 5% Perc: ", ptau[1], "pg/ml\n50% Perc: ", ptau[2], "pg/ml\n95% Perc: ", ptau[3], "pg/ml\n\nMean: ",ptaumean,"pg/ml\nSD: ",ptausd, "pg/ml",sep="")
age <- round(quantile(plot$AGE, probs=c(0.05, 0.5, 0.95), na.rm=TRUE), digits=1)
agemean <- round(mean(plot$AGE,na.rm=TRUE), digits=1)
agesd <- round(sd(plot$AGE,na.rm=TRUE), digits=1)
labelage <- paste("AGE", "\n 5% Perc: ", age[1],"years\n50% Perc: ", age[2], "years\n95% Perc: ", age[3], "years\n\nMean: ",agemean,"years\nSD: ",agesd, "years",sep="")
plotabeta <-
ggplot(plot, aes(x=plot$CSFABETA42, fill=""))+
#geom_histogram(alpha=0.2, bins=30, colour="black")+
geom_density(alpha=0.2)+
xlab("Abeta42 (pg/ml)")+
ylab("Density estimate")+
geom_vline(xintercept= abeta[2]) +
geom_vline(xintercept= abeta[1], linetype=2) +
geom_vline(xintercept= abeta[3], linetype=2) +
xlim(0, 2000)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_fill_manual(values="green")+
labs(title=paste("DIAGNOSTIC GROUP: ", dx[i]), subtitle="")+
labs(caption=caption)+
annotate(geom="text", x=Inf,y=Inf, label=labelabeta, size=3, hjust=1, vjust=1)
plotabeta42 <-
ggplot(plot, aes(x=plot$AB1.42, fill=""))+
#geom_histogram(alpha=0.2, bins=30, colour="black")+
geom_density(alpha=0.2)+
xlab("Abeta42 - Lumipulse (pg/ml)")+
ylab("Density estimate")+
geom_vline(xintercept= abeta42[2]) +
geom_vline(xintercept= abeta42[1], linetype=2) +
geom_vline(xintercept= abeta42[3], linetype=2) +
xlim(0, 3000)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_fill_manual(values="green")+
labs(title=paste("DIAGNOSTIC GROUP: ", dx[i]), subtitle="")+
labs(caption=caption)+
annotate(geom="text", x=Inf,y=Inf, label=labelabeta42, size=3, hjust=1, vjust=1)
plottau <-
ggplot(plot, aes(x=plot$CSFTAU, fill=""))+
#geom_histogram(alpha=0.2, bins=30, colour="black")+
geom_density(alpha=0.2)+
xlab("T-Tau (pg/ml)")+
ylab("Density estimate")+
geom_vline(xintercept= tau[2]) +
geom_vline(xintercept= tau[1], linetype=2) +
geom_vline(xintercept= tau[3], linetype=2) +
xlim(0,2000)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_fill_manual(values="yellow")+
labs(title=paste("DIAGNOSTIC GROUP: ", dx[i]), subtitle="")+
labs(caption=caption)+
annotate(geom="text", x = Inf, y=Inf, label=labeltau,size=3, hjust=1, vjust=1)
plotptau <-
ggplot(plot, aes(x=plot$CSFPTAU, fill=""))+
#geom_histogram(alpha=0.2, bins=30, colour="black")+
geom_density(alpha=0.2)+
xlab("P-Tau (pg/ml)")+
ylab("Density estimate")+
geom_vline(xintercept= ptau[2]) +
geom_vline(xintercept= ptau[1], linetype=2) +
geom_vline(xintercept= ptau[3], linetype=2) +
xlim(0,250)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_fill_manual(values="purple")+
labs(title=paste("DIAGNOSTIC GROUP: ", dx[i]), subtitle="")+
labs(caption=caption)+
annotate(geom="text", x = Inf, y=Inf, label=labelptau,size=3, hjust=1, vjust=1)
plotage <-
ggplot(plot, aes(x=plot$AGE, fill=""))+
#geom_histogram(alpha=0.2, bins=30, colour="black")+
geom_density(alpha=0.2)+
xlab("Age (years)")+
ylab("Density estimate")+
geom_vline(xintercept= age[2]) +
geom_vline(xintercept= age[1], linetype=2) +
geom_vline(xintercept= age[3], linetype=2) +
xlim(0, 100)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_fill_manual(values="blue")+
labs(title=paste("DIAGNOSTIC GROUP: ", dx[i]), subtitle="")+
labs(caption=caption)+
annotate(geom="text", x = Inf, y=Inf, label=labelage, size=3, hjust=1, vjust=1)
printWithNumber(plotage)
printWithNumber(plotabeta)
printWithNumber(plotabeta42)
printWithNumber(plottau)
printWithNumber(plotptau)
}
dev.off() | /SPIN.R | no_license | MemoryUnitSantPau/SPIN | R | false | false | 66,316 | r |
## ANALISIS OF THE SPIN cohort DATA #################
#Load gmodels package
#install.packages(c("gmodels", "rpart", "randomForest", "knitr", "psych", "pgirmess", "Hmisc", "car", "fBasics", "sm", "foreign", "xlsx"))
#install.packages(c("car", "xlsx", "ggplot2", "gridExtra"), repos='http://cran.us.r-project.org')
library(grid)
#library(gmodels)
#library(knitr)
#library(psych)
#library(survival)
#library(Hmisc)
library(car)
#library(stats)
#library(fBasics)
#library(sm)
library(xlsx)
#library(pgirmess)
library(ggplot2)
library(gridExtra)
#library(pROC)
#library(randomForest)
#library(rpart)
#library(rpart.plot)
#library(pwr)
options(scipen=8, warn=-1)
printWithNumber = function(gg_plot, top_right=counter, resetcounter=FALSE)
{
plot(gg_plot)
if (resetcounter==TRUE){
counter <<- 0
}
counter <<- counter+1
label = textGrob(top_right,
x = 0.98, # right side
y = 0.98, # top
just="right",
hjust = NULL,
vjust = 1,
gp=gpar(fontsize=10, col="gray"))
grid.draw(label)
}
#### RETRIEVE DATA and ARRANGE VARIABLES######
#DATA in Santpau
setwd("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Resultados")
data <- data.frame(read.xlsx ("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Tablas exportadas/CohBasalGlobal.xlsx", 1))
data1a <- data.frame(read.xlsx ("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Tablas exportadas/CCoh1aGlobalExportable.xlsx", 1))
data2a <- data.frame(read.xlsx ("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Tablas exportadas/CCoh2aGlobalExportable.xlsx", 1))
data3a <- data.frame(read.xlsx ("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Tablas exportadas/CCoh3aGlobalExportable.xlsx", 1))
data4a <- data.frame(read.xlsx ("//dspau.santpau.es/U/CarpUMNeurologia/BaseUdM/Export-analisis/Tablas exportadas/CCoh4aGlobalExportable.xlsx", 1))
#DATA in MACBOOK
#data <- data.frame(read.xlsx ("/Users/Daniel/Google Drive/WORK/ExportacionBaseUdM/CohBasalGlobal.xlsx", 1))
#setwd("/Users/Daniel/Google Drive/WORK/Projectes/02-PROYECTOS ACTIVOS/SPIN cohort/Analisis R")
data$STDIAGCODE <- factor(data$STDIAGCODE)
data1a$STDIAGCODE <- factor(data1a$STDIAGCODE)
data2a$STDIAGCODE <- factor(data2a$STDIAGCODE)
data3a$STDIAGCODE <- factor(data3a$STDIAGCODE)
data4a$STDIAGCODE <- factor(data4a$STDIAGCODE)
data1a$STDIAGCODE1a <- factor(data1a$STDIAGCODE1a)
data2a$STDIAGCODE2a <- factor(data2a$STDIAGCODE2a)
data3a$STDIAGCODE3a <- factor(data3a$STDIAGCODE3a)
data4a$STDIAGCODE4a <- factor(data4a$STDIAGCODE4a)
levels(data$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data1a$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data1a$STDIAGCODE1a) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data2a$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data2a$STDIAGCODE2a) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data3a$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data3a$STDIAGCODE3a) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data4a$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
levels(data4a$STDIAGCODE4a) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
dx <- levels(data$STDIAGCODE)
#Set up Cut-off point for ABETA42, TAU and P-TAU
for (i in 1:length(data$DBCODE)) {
data$ABETA42status[i] <- ifelse(data$CSFABETA42[i] < 550, "ABETA42pos",
ifelse(data$CSFABETA42[i] >550, "ABETA42neg", NA))
data$TAUstatus[i] <- ifelse (data$CSFTAU[i]>350, "TAUpos",
ifelse(data$CSFTAU[i] <350, "TAUneg", NA))
data$PTAUstatus[i] <- ifelse (data$CSFPTAU[i]>61, "PTAUpos",
ifelse(data$CSFPTAU[i] <61, "PTAUneg", NA))
}
data$RatioTauAbeta <- data$CSFTAU / data$CSFABETA42
data$RatioTauPtau <- data$CSFTAU / data$CSFPTAU
data$RatioTausappbeta <- data$CSFTAU / data$CSFSAPPBETA
data$RatioPtausappbeta <- data$CSFPTAU / data$CSFSAPPBETA
data$RatioYKLsappbeta <- data$CSFYKL40 / data$CSFSAPPBETA
data$RatioAbetasappbeta <- data$CSFABETA42 / data$CSFSAPPBETA
for (i in 1:length(data$RatioTauAbeta)) {
data$RatioAD[i] <- ifelse(data$RatioTauAbeta[i] > 0.52, "RatioAD+", "RatioAD-")
}
for (i in 1:length(data$APOE)) {
data$APOE4[i] <- ifelse(data$APOE[i]=="22"|
data$APOE[i]=="23"|
data$APOE[i]=="33", "APOE4-",
ifelse(data$APOE[i]=="24"|
data$APOE[i]=="34"|
data$APOE[i]=="44", "APOE4+", NA))
}
myvars <- c("DBCODE", "STDIAGCODE", "FUPDATEbasal", "NPSDATE", "CSFDATE", "MRIDATE")
myvars1 <- c("DBCODE", "STDIAGCODE", "FUPDATE1a", "NPSDATE")
myvars2 <- c("DBCODE", "STDIAGCODE", "FUPDATE2a", "NPSDATE", "CSFDATE", "MRIDATE")
myvars3 <- c("DBCODE", "STDIAGCODE", "FUPDATE3a", "NPSDATE")
myvars4 <- c("DBCODE", "STDIAGCODE", "FUPDATE4a", "NPSDATE", "CSFDATE", "MRIDATE")
subdata <- data[myvars]
subdata$VISIT <- "BASELINE"
subdata1a <- data1a[myvars1]
subdata1a$CSFDATE <- NA
subdata1a$MRIDATE <- NA
names(subdata1a) <- myvars
subdata1a$VISIT <- "YEAR 1"
subdata2a <- data2a[myvars2]
names(subdata2a) <- myvars
subdata2a$VISIT <- "YEAR 2"
subdata3a <- data3a[myvars3]
subdata3a$CSFDATE <- NA
subdata3a$MRIDATE <- NA
names(subdata3a) <- myvars
subdata3a$VISIT <- "YEAR 3"
subdata4a <- data4a[myvars4]
names(subdata4a) <- myvars
subdata4a$VISIT <- "YEAR 4"
longdata <- rbind(subdata, subdata1a, subdata2a, subdata3a, subdata4a)
rm(subdata)
rm(subdata1a)
rm(subdata2a)
rm(subdata3a)
rm(subdata4a)
longdata <- longdata[!is.na(longdata$FUPDATE),]
levels(longdata$STDIAGCODE) <- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
names(longdata)<- c("DBCODE", "STDIAGCODE", "FUPDATE", "NPSDATE", "CSFDATE", "MRIDATE", "VISIT")
##TABLE SUMMARY####
attach(data)
quantvariables <-data.frame(AGE, EDUC, MMSE, FCSRTTOTALFREE, FCSRTTOTAL, FCSRTDELTOT, CERADLISTRECALL, DIGDIR, DIGREV, TMTA, TMTB, BNT, REYCOPY, PHONFLU, SEMFLU, VOSP, NPI.Q.TOTAL, CSFABETA42, AB1.42, CSFTAU, CSFPTAU, CSFYKL40, CSFSAPPBETA, CSFNFL, CSFPROGRANULIN)
propvariables <- data.frame(SEX, APOE4, RatioAD, ABETA42status, TAUstatus, PTAUstatus, FBPVISUAL, FDGVISUAL, STDIAGCODE)
DATATABLE <- matrix(nrow=length(quantvariables)*4, ncol=length(dx))
colnames(DATATABLE) <- dx
NAMES<-vector()
for(i in 1:length(quantvariables)) {
VAR <- quantvariables[i]
NAMES <- c(NAMES, paste("Summary-",names(VAR), sep=""), paste(names(VAR),"-N", sep=""), paste(names(VAR),"-MEAN", sep=""), paste(names(VAR),"-SD", sep=""))
for (k in 1:length(dx)) {
DATATABLE[i*4-3,k] <- paste("")
DATATABLE[i*4-2,k] <- length(VAR[data$STDIAGCODE==dx[k]&!is.na(VAR),])
DATATABLE[i*4-1,k] <- round(mean(VAR[data$STDIAGCODE==dx[k],],na.rm=TRUE), digits=1)
DATATABLE[i*4,k] <- round(sd(VAR[data$STDIAGCODE==dx[k],],na.rm=TRUE), digits=1)
}
}
row.names(DATATABLE) <- NAMES
colnames(DATATABLE)<- c("Control", "SCI", "MCI", "AD", "LBD", "FTD", "Down", "ALS")
date<-paste("Data updated on", format(Sys.Date(), "%Y-%m-%d"))
NAMESprop <- vector()
proptable <- table(dx)
for(i in 1:length((propvariables))) {
VAR <- propvariables[,i]
a<-(table(VAR, propvariables$STDIAGCODE))
proptable <- rbind(proptable, a)
}
PROPTABLE <- proptable[2:(nrow(proptable)-length(dx)),]
DATATABLE <- rbind(DATATABLE, PROPTABLE)
write.xlsx2(DATATABLE, "DATATABLE.xlsx", sheetName=date, showNA=FALSE)
Sys.setlocale(locale = "en")
caption <- paste("Summary data from the SPIN cohort, Hospital de la Santa Creu i Sant Pau, Barcelona\nLast update on", format(Sys.time(), "%A, %B %d %Y at %H:%M"))
palettenumber<- "BuPu"
blank <- ggplot(data, aes()) +
geom_blank() +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0))+
theme_minimal()
### SUMMARY PDF ######
######################
pdf("SPINsummary.pdf", 8, 6)
#### >>pdf section 1####
grid.text(label="SPIN cohort",x=0.5, y=0.6,just="centre", gp=gpar(fontsize=24, col="#177db7", fontface="bold"))
grid.text(label="Sant Pau Initiative on Neurodegeneration",x=0.5, y=0.5, gp=gpar(fontsize=14, col="black", fontface="bold"))
grid.text(label=caption, x=0.5, y=0.22, vjust=0, hjust=0.5, gp=gpar(fontsize=9, col="gray", fontface="bold"))
#### >>pdf section 1####
printWithNumber(blank, resetcounter = TRUE)
grid.text(label="1. Summary Table",hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
printWithNumber(blank)
table1 <- (tableGrob(DATATABLE[1:20,], theme=ttheme_minimal(base_size=7)))
grid.draw(table1)
printWithNumber(blank)
table2 <- (tableGrob(DATATABLE[21:40,], theme=ttheme_minimal(base_size=7)))
grid.draw(table2)
printWithNumber(blank)
table3 <- (tableGrob(DATATABLE[41:60,], theme=ttheme_minimal(base_size=7)))
grid.draw(table3)
printWithNumber(blank)
table4 <- (tableGrob(DATATABLE[61:80,], theme=ttheme_minimal(base_size=7)))
grid.draw(table4)
printWithNumber(blank)
table5 <- (tableGrob(DATATABLE[81:100,], theme=ttheme_minimal(base_size=7)))
grid.draw(table5)
printWithNumber(blank)
table6 <- (tableGrob(DATATABLE[101:nrow(DATATABLE),], theme=ttheme_minimal(base_size=7)))
grid.draw(table6)
#### >>pdf section 2###
printWithNumber(blank)
grid.text(label="2. Cohort Summary Counts - BASELINE",hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
#### COHORT STATUS GRAPHICS ###
### Summary graphs#####
cohortstatus <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=SEX)) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Count", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatus)
plotages <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=AGE, fill=factor(data$STDIAGCODE))) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Age (years)", title="SPIN Cohort - Age", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 110), breaks=seq(0, 100, 20))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(plotages)
ploteduc <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=EDUC, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="", y="Education (years)", title="SPIN Cohort - Education", caption=caption)+
guides(fill=FALSE)+
scale_y_continuous(limits=c(0, 25), breaks=seq(0, 20, 2))+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(ploteduc)
plotmmse <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=MMSE, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Mini-Mental State Examination", title="SPIN Cohort - MMSE", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 32), breaks=seq(0, 30, 2))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(plotmmse)
cohortapoe <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=APOE4)) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - APOE4", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortapoe)
cohortabetastatus <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=ABETA42status)) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - ABETA42 status (cut-off=550 pg/ml)", fill="ABETA42", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortabetastatus)
cohorttaustatus <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=TAUstatus)) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - T-TAU status (cut-off=350 pg/ml)", fill="T-TAU", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohorttaustatus)
cohortptaustatus <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=PTAUstatus)) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - P-TAU status (cut-off=61 pg/ml)", fill="P-TAU", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortptaustatus)
cohortinclusion <-
ggplot(data, aes(y=STDIAGCODE,
x=FUPDATEbasal,
colour=(STDIAGCODE))) +
geom_point(size=3, alpha=0.7)+
labs(x="Baseline date",y="Diagnostic group", title="SPIN Cohort - Inclusion", caption=caption, colour="")+
scale_x_date(date_breaks = "4 months", date_labels = "%b-%Y")+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"), axis.text.x = element_text(face="bold", hjust=0, angle=330))
printWithNumber(cohortinclusion)
cohortstatusMRI <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=!is.na(data$MRIDATE))) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - MRI Count", fill="MRI is available", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatusMRI)
cohortstatusCSF <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=!is.na(data$CSFDATE))) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - CSF Count", fill="CSF is available", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.85)
printWithNumber(cohortstatusCSF)
cohortstatusFBP <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=!is.na(data$FBPDATE))) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - 18F-Florbetapir PET Count", fill="FBP is available", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.80)
printWithNumber(cohortstatusFBP)
cohortstatusFDG <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE),
fill=!is.na(data$FDGDATE))) +
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - 18F-Fluorodeoxyglucose PET Count", fill="FDG is available", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.80)
printWithNumber(cohortstatusFDG)
cohortstatusRatiopos <-
ggplot(data=data,
aes(x=factor(STDIAGCODE),
fill=factor(RatioAD)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - RatioAD in CSF", fill="RatioAD<0.52\n(AD profile)", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.80)
printWithNumber(cohortstatusRatiopos)
cohortstatusFBPpos <-
ggplot(data=data,
aes(x=factor(STDIAGCODE),
fill=factor(FBPVISUAL)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Visual FBP-PET", fill="Positive FBP", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme( plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.80)
printWithNumber(cohortstatusFBPpos)
cohortstatusFDGpos <-
ggplot(data=data,
aes(x=factor(STDIAGCODE),
fill=factor(FDGVISUAL)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Visual FDG-PET", fill="Positive FDG", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.80)
printWithNumber(cohortstatusFDGpos)
printWithNumber(blank)
grid.text(label="3. Longitudinal Follow-Up", hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
cohortstatusfup <-
ggplot(data=longdata,
aes(x=factor(STDIAGCODE),
fill=factor(VISIT)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Follow-Up Count", fill="", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatusfup)
cohortstatusfupnps <-
ggplot(data=longdata[!is.na(longdata$NPSDATE),],
aes(x=factor(STDIAGCODE),
fill=factor(VISIT)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Follow-Up NPS Count", fill="", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatusfupnps)
cohortstatusfupcsf <-
ggplot(data=longdata[!is.na(longdata$CSFDATE),],
aes(x=factor(STDIAGCODE),
fill=factor(VISIT)))+
geom_bar(data=data, aes(x=factor(data$STDIAGCODE), fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Follow-Up CSF Count", fill="", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatusfupcsf)
cohortstatusfupmri <-
ggplot(data=longdata[!is.na(longdata$MRIDATE),],
aes(x=STDIAGCODE,
fill=VISIT))+
geom_bar(data=data, aes(x=data$STDIAGCODE, fill="", alpha=0.1), show.legend=FALSE)+
geom_bar(colour="black", alpha=0.4, position="dodge")+
labs(x="", y="Count", title="SPIN Cohort - Follow-Up MRI Count", fill="", caption=caption)+
scale_y_continuous(limits=c(0, 315), breaks=seq(0, 300, 20))+
scale_fill_brewer(palette=palettenumber)+
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
geom_text(stat="count", aes(label=..count..),cex=3,col="gray", position = position_dodge(width = 1), show.legend=FALSE, vjust = -0.6)+
geom_text(data=data, aes(x=factor(data$STDIAGCODE), fill="",label=..count..),cex=6, show.legend=FALSE, stat="count", position = position_dodge(width = 0.8), vjust = -0.8)
printWithNumber(cohortstatusfupmri)
printWithNumber(blank)
grid.text(label="4. Neuropsychology - BASELINE", hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
cohortfcsrttotal <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=FCSRTTOTAL, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="FCSRT total", title="SPIN Cohort - FCSRT total", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortfcsrttotal)
cohortcerad <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CERADLISTRECALL, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="CERAD list recall", title="SPIN Cohort - CERAD list recall", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortcerad)
cohortdigdir <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=DIGDIR, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Digits direct", title="SPIN Cohort - Digits direct", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortdigdir)
cohortdigrev <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=DIGREV, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Digits reverse", title="SPIN Cohort - Digits reverse", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortdigrev)
cohorttmta <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=TMTA, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Trail Making Test A", title="SPIN Cohort - Trail Making Test A", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohorttmta)
cohorttmtb <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=TMTB, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Trail Making Test B", title="SPIN Cohort - Trail Making Test B", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohorttmtb)
cohortbnt <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=BNT, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Boston Naming Test (60)", title="SPIN Cohort - Boston Naming Test", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortbnt)
cohortreycopy <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=REYCOPY, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Rey Complex Figure copy", title="SPIN Cohort - Rey Complex Figure copy", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortreycopy)
cohortPHONFLU <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=PHONFLU, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Phonetic Fluency", title="SPIN Cohort - Phonetic Fluency", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortPHONFLU)
cohortSEMFLU <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=SEMFLU, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Semantic Fluency", title="SPIN Cohort - Semantic Fluency", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortSEMFLU)
cohortVOSP <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=VOSP, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="VOSP", title="SPIN Cohort - VOSP", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortVOSP)
cohortNPI <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=NPITOTAL, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Neuropsychiatric Inventory", title="SPIN Cohort - Neuropsychiatric Inventory", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortNPI)
cohortNPIq <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=NPI.Q.TOTAL, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Neuropsychiatric Inventory Q-Total", title="SPIN Cohort - Neuropsychiatric Inventory Q-Total", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
#scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortNPIq)
printWithNumber(blank)
grid.text(label="5. CSF Biomarkers - BASELINE", hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
cohortabeta <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFABETA42, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Abeta42 (pg/ml)", title="SPIN Cohort - CSF Abeta42", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortabeta)
cohortabetaage <-
ggplot(data, aes(y=CSFABETA42,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="ABeta42 (pg/ml)",y="Age (years)", title="SPIN Cohort - CSF Abeta42 - Age", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("Age (years)") + xlim(0,100) +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetaage)
cohortabetammse <-
ggplot(data, aes(y=CSFABETA42,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="ABeta42 (pg/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF Abeta42 - MMSE", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("MMSE score") + xlim(0,30) +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetammse)
cohorttau <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFTAU, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Abeta42 (pg/ml)", title="SPIN Cohort - CSF T-Tau", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 2200), breaks=seq(0, 2000, 200))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohorttau)
cohorttauage <-
ggplot(data, aes(y=CSFTAU,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="Age (years)", title="SPIN Cohort - CSF T-Tau - Age", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("Age (years)") + xlim(0,100) +
ylab("T-Tau (pg/ml)") + ylim(0,2500) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttauage)
cohorttaummse <-
ggplot(data, aes(y=CSFTAU,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF T-Tau - MMSE", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("MMSE score") + xlim(0,30) +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttaummse)
cohortptau <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFPTAU, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="P-Tau (pg/ml)", title="SPIN Cohort - CSF P-Tau", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 300), breaks=seq(0, 300, 25))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortptau)
cohortptauage <-
ggplot(data, aes(y=CSFPTAU,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="P-Tau (pg/ml)",y="Age (years)", title="SPIN Cohort - CSF P-Tau - Age", caption=caption, colour="")+
geom_hline(yintercept=61, linetype=2, colour="gray")+
xlab("Age (years)") + xlim(0,100) +
ylab("P-Tau (pg/ml)") + ylim(0,300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortptauage)
cohortptaummse <-
ggplot(data, aes(y=CSFPTAU,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="P-Tau (pg/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF P-Tau - MMSE", caption=caption, colour="")+
geom_hline(yintercept=61, linetype=2, colour="gray")+
xlab("MMSE score") + xlim(0,30) +
ylab("P-Tau (pg/ml)") + ylim(0,300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortptaummse)
cohortabetatau <-
ggplot(data, aes(y=CSFABETA42,
x=CSFTAU,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="ABeta42 (pg/ml)",y="T-Tau (pg/ml)", title="SPIN Cohort - CSF Abeta42 - T-Tau", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
geom_vline(xintercept=350, linetype=2, colour="gray")+
xlab("Total-Tau (pg/ml)") + xlim(0,2500) +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetatau)
cohortabetaptau <-
ggplot(data, aes(y=CSFABETA42,
x=CSFPTAU,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="ABeta42 (pg/ml)",y="P-Tau (pg/ml)", title="SPIN Cohort - CSF Abeta42 - P-Tau", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
geom_vline(xintercept=61, linetype=2, colour="gray")+
xlab("P-Tau (pg/ml)") + xlim(0,300) +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetaptau)
cohorttauptau <-
ggplot(data, aes(y=CSFTAU,
x=CSFPTAU,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="P-Tau (pg/ml)", title="SPIN Cohort - CSF T-Tau - P-Tau", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
geom_vline(xintercept=61, linetype=2, colour="gray")+
xlab("P-Tau (pg/ml)") + xlim(0,300) +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttauptau)
cohortykl <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFYKL40, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="YKL-40 (ng/ml)", title="SPIN Cohort - CSF YKL-40", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 500), breaks=seq(0, 500, 50))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortykl)
cohoryklage <-
ggplot(data, aes(y=CSFYKL40,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="YKL-40 (ng/ml)",y="Age (years)", title="SPIN Cohort - CSF YKL-40 - Age", caption=caption, colour="")+
xlab("Age (years)") + xlim(0,100) +
ylab("YKL-40 (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohoryklage)
cohortyklmmse <-
ggplot(data, aes(y=CSFYKL40,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="YKL-40 (ng/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF YKL-40 - MMSE", caption=caption, colour="")+
xlab("MMSE score") + xlim(0,30) +
ylab("YKL-40 (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortyklmmse)
cohortabetaykl <-
ggplot(data, aes(y=CSFABETA42,
x=CSFYKL40,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Abeta42 (pg/ml)",y="YKL-40 (ng/ml)", title="SPIN Cohort - CSF Abeta42 - YKL-40", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("YKL-40 (ng/ml)") +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetaykl)
cohorttauykl <-
ggplot(data, aes(y=CSFTAU,
x=CSFYKL40,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="YKL-40 (ng/ml)", title="SPIN Cohort - CSF T-Tau - YKL-40", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("YKL-40 (ng/ml)") +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttauykl)
cohortnfl <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFNFL, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="NFL (pg/ml)", title="SPIN Cohort - CSF Neurofilaments light", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 6000), breaks=seq(0, 6000, 500))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortnfl)
cohortnflage <-
ggplot(data, aes(y=CSFNFL,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="NFL (ng/ml)",y="Age (years)", title="SPIN Cohort - CSF NFL - Age", caption=caption, colour="")+
xlab("Age (years)") + xlim(0,100) +
ylab("NFL (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortnflage)
cohortnflmmse <-
ggplot(data, aes(y=CSFNFL,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="NFL (ng/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF NFL - MMSE", caption=caption, colour="")+
xlab("MMSE score") + xlim(0,30) +
ylab("NFL (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortnflmmse)
cohortabetanfl <-
ggplot(data, aes(y=CSFABETA42,
x=CSFNFL,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Abeta42 (pg/ml)",y="NFL (pg/ml)", title="SPIN Cohort - CSF Abeta42 - NFL", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("NFL (ng/ml)") +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetanfl)
cohorttaunfl <-
ggplot(data, aes(y=CSFTAU,
x=CSFNFL,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="NFL (pg/ml)", title="SPIN Cohort - CSF T-Tau - NFL", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("NFL (pg/ml)") +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttaunfl)
cohortsappb <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFSAPPBETA, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="sAPPbeta (ng/ml)", title="SPIN Cohort - CSF sAPPbeta", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 3000), breaks=seq(0, 3000, 500))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortsappb)
cohortsappbage <-
ggplot(data, aes(y=CSFSAPPBETA,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="sAPPbeta (ng/ml)",y="Age (years)", title="SPIN Cohort - CSF sAPPbeta - Age", caption=caption, colour="")+
xlab("Age (years)") + xlim(0,100) +
ylab("sAPPbeta (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortsappbage)
cohortsappbmmse <-
ggplot(data, aes(y=CSFSAPPBETA,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="sAPPbeta (ng/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF sAPPbeta - MMSE", caption=caption, colour="")+
xlab("MMSE score") + xlim(0,30) +
ylab("sAPPbeta (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortsappbmmse)
cohortabetasappb <-
ggplot(data, aes(y=CSFABETA42,
x=CSFSAPPBETA,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Abeta42 (pg/ml)",y="sAPPbeta (ng/ml)", title="SPIN Cohort - CSF Abeta42 - sAPPbeta", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("sAPPbeta (ng/ml)") +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetasappb)
cohorttausappb <-
ggplot(data, aes(y=CSFTAU,
x=CSFSAPPBETA,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="sAPPbeta (ng/ml)", title="SPIN Cohort - CSF T-Tau - sAPPbeta", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("sAPPbeta (ng/ml)") +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttausappb)
cohortprogranulin <-
ggplot(data=data,
aes(x=factor(data$STDIAGCODE), y=CSFPROGRANULIN, fill=factor(data$STDIAGCODE), legend)) +
geom_boxplot(stat="boxplot", colour="black", alpha=0.4)+
labs(x="",y="Progranulin (ng/ml)", title="SPIN Cohort - CSF Progranulin", caption=caption)+
guides(fill=FALSE)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_y_continuous(limits=c(0, 10), breaks=seq(0, 10, 2))+
stat_summary(fun.y=mean, geom="text", show_guide = FALSE, vjust=0, position="identity", aes(label=round(..y.., digits=1), fontface="bold"))
printWithNumber(cohortprogranulin)
cohortprogranulinage <-
ggplot(data, aes(y=CSFPROGRANULIN,
x=AGE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Progranulin (ng/ml)",y="Age (years)", title="SPIN Cohort - CSF Progranulin - Age", caption=caption, colour="")+
xlab("Age (years)") + xlim(0,100) +
ylab("Progranulin (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortprogranulinage)
cohortprogranulinmmse <-
ggplot(data, aes(y=CSFPROGRANULIN,
x=MMSE,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Progranulin (ng/ml)",y="Mini-Mental State Examination", title="SPIN Cohort - CSF Progranulin - MMSE", caption=caption, colour="")+
xlab("MMSE score") + xlim(0,30) +
ylab("Progranulin (ng/ml)") +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortprogranulinmmse)
cohortabetaprogranulin <-
ggplot(data, aes(y=CSFABETA42,
x=CSFPROGRANULIN,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="Abeta42 (pg/ml)",y="Progranulin (ng/ml)", title="SPIN Cohort - CSF Abeta42 - Progranulin", caption=caption, colour="")+
geom_hline(yintercept=550, linetype=2, colour="gray")+
xlab("Progranulin (ng/ml)") +
ylab("Abeta42 (pg/ml)") + ylim(0,2000) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohortabetaprogranulin)
cohorttauprogranulin <-
ggplot(data, aes(y=CSFTAU,
x=CSFPROGRANULIN,
colour=(STDIAGCODE))) +
geom_point(size=2, alpha=0.7)+
labs(x="T-Tau (pg/ml)",y="Progranulin (ng/ml)", title="SPIN Cohort - CSF T-Tau - Progranulin", caption=caption, colour="")+
geom_hline(yintercept=350, linetype=2, colour="gray")+
xlab("Progranulin (ng/ml)") +
ylab("T-Tau (pg/ml)") + ylim(0,2300) +
theme_classic()+
theme(plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))
printWithNumber(cohorttauprogranulin)
### Density plots ######
printWithNumber(blank)
grid.text(label="6. Biomarkers distribution - BASELINE", hjust="centre", vjust="bottom", gp=gpar(fontsize=16, col="#177db7", fontface="bold"))
for (i in 1:length(dx)) {
plot <- data[data$STDIAGCODE==dx[i],]
abeta <- round(quantile(plot$CSFABETA42, probs=c(0.05, 0.5, 0.95), na.rm=TRUE), digits=1)
abetamean <- round(mean(plot$CSFABETA42,na.rm=TRUE), digits=1)
abetasd <- round(sd(plot$CSFABETA42,na.rm=TRUE), digits=1)
labelabeta <- paste("ABETA42", "\n 5% Perc: ", abeta[1], "pg/ml\n50% Perc: ", abeta[2], "pg/ml\n95% Perc: ", abeta[3], "pg/ml\n\nMean: ",abetamean,"pg/ml\nSD: ",abetasd,"pg/ml", sep="")
abeta42 <- round(quantile(plot$AB1.42, probs=c(0.05, 0.5, 0.95), na.rm=TRUE), digits=1)
abeta42mean <- round(mean(plot$AB1.42,na.rm=TRUE), digits=1)
abeta42sd <- round(sd(plot$AB1.42,na.rm=TRUE), digits=1)
labelabeta42 <- paste("ABETA1-42 (Lumipulse)", "\n 5% Perc: ", abeta42[1], "pg/ml\n50% Perc: ", abeta42[2], "pg/ml\n95% Perc: ", abeta42[3], "pg/ml\n\nMean: ",abeta42mean,"pg/ml\nSD: ",abeta42sd,"pg/ml", sep="")
tau <- round(quantile(plot$CSFTAU, probs=c(0.05, 0.5, 0.95), na.rm=TRUE), digits=1)
taumean <- round(mean(plot$CSFTAU,na.rm=TRUE), digits=1)
tausd <- round(sd(plot$CSFTAU,na.rm=TRUE), digits=1)
labeltau <- paste("T-TAU", "\n 5% Perc: ", tau[1], "pg/ml\n50% Perc: ", tau[2], "pg/ml\n95% Perc: ", tau[3], "pg/ml\n\nMean: ", taumean,"pg/ml\nSD: ",tausd, "pg/ml",sep="")
ptau <- round(quantile(plot$CSFPTAU, probs=c(0.05, 0.5, 0.95), na.rm=TRUE), digits=1)
ptaumean <- round(mean(plot$CSFPTAU,na.rm=TRUE), digits=1)
ptausd <- round(sd(plot$CSFPTAU,na.rm=TRUE), digits=1)
labelptau <- paste("P-TAU", "\n 5% Perc: ", ptau[1], "pg/ml\n50% Perc: ", ptau[2], "pg/ml\n95% Perc: ", ptau[3], "pg/ml\n\nMean: ",ptaumean,"pg/ml\nSD: ",ptausd, "pg/ml",sep="")
age <- round(quantile(plot$AGE, probs=c(0.05, 0.5, 0.95), na.rm=TRUE), digits=1)
agemean <- round(mean(plot$AGE,na.rm=TRUE), digits=1)
agesd <- round(sd(plot$AGE,na.rm=TRUE), digits=1)
labelage <- paste("AGE", "\n 5% Perc: ", age[1],"years\n50% Perc: ", age[2], "years\n95% Perc: ", age[3], "years\n\nMean: ",agemean,"years\nSD: ",agesd, "years",sep="")
plotabeta <-
ggplot(plot, aes(x=plot$CSFABETA42, fill=""))+
#geom_histogram(alpha=0.2, bins=30, colour="black")+
geom_density(alpha=0.2)+
xlab("Abeta42 (pg/ml)")+
ylab("Density estimate")+
geom_vline(xintercept= abeta[2]) +
geom_vline(xintercept= abeta[1], linetype=2) +
geom_vline(xintercept= abeta[3], linetype=2) +
xlim(0, 2000)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_fill_manual(values="green")+
labs(title=paste("DIAGNOSTIC GROUP: ", dx[i]), subtitle="")+
labs(caption=caption)+
annotate(geom="text", x=Inf,y=Inf, label=labelabeta, size=3, hjust=1, vjust=1)
plotabeta42 <-
ggplot(plot, aes(x=plot$AB1.42, fill=""))+
#geom_histogram(alpha=0.2, bins=30, colour="black")+
geom_density(alpha=0.2)+
xlab("Abeta42 - Lumipulse (pg/ml)")+
ylab("Density estimate")+
geom_vline(xintercept= abeta42[2]) +
geom_vline(xintercept= abeta42[1], linetype=2) +
geom_vline(xintercept= abeta42[3], linetype=2) +
xlim(0, 3000)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_fill_manual(values="green")+
labs(title=paste("DIAGNOSTIC GROUP: ", dx[i]), subtitle="")+
labs(caption=caption)+
annotate(geom="text", x=Inf,y=Inf, label=labelabeta42, size=3, hjust=1, vjust=1)
plottau <-
ggplot(plot, aes(x=plot$CSFTAU, fill=""))+
#geom_histogram(alpha=0.2, bins=30, colour="black")+
geom_density(alpha=0.2)+
xlab("T-Tau (pg/ml)")+
ylab("Density estimate")+
geom_vline(xintercept= tau[2]) +
geom_vline(xintercept= tau[1], linetype=2) +
geom_vline(xintercept= tau[3], linetype=2) +
xlim(0,2000)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_fill_manual(values="yellow")+
labs(title=paste("DIAGNOSTIC GROUP: ", dx[i]), subtitle="")+
labs(caption=caption)+
annotate(geom="text", x = Inf, y=Inf, label=labeltau,size=3, hjust=1, vjust=1)
plotptau <-
ggplot(plot, aes(x=plot$CSFPTAU, fill=""))+
#geom_histogram(alpha=0.2, bins=30, colour="black")+
geom_density(alpha=0.2)+
xlab("P-Tau (pg/ml)")+
ylab("Density estimate")+
geom_vline(xintercept= ptau[2]) +
geom_vline(xintercept= ptau[1], linetype=2) +
geom_vline(xintercept= ptau[3], linetype=2) +
xlim(0,250)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_fill_manual(values="purple")+
labs(title=paste("DIAGNOSTIC GROUP: ", dx[i]), subtitle="")+
labs(caption=caption)+
annotate(geom="text", x = Inf, y=Inf, label=labelptau,size=3, hjust=1, vjust=1)
plotage <-
ggplot(plot, aes(x=plot$AGE, fill=""))+
#geom_histogram(alpha=0.2, bins=30, colour="black")+
geom_density(alpha=0.2)+
xlab("Age (years)")+
ylab("Density estimate")+
geom_vline(xintercept= age[2]) +
geom_vline(xintercept= age[1], linetype=2) +
geom_vline(xintercept= age[3], linetype=2) +
xlim(0, 100)+
theme_classic()+
theme(legend.position="none", plot.title = element_text(face="bold", hjust=0.5), plot.subtitle=element_text(hjust=0.5), plot.caption=element_text(size=6, face="italic"))+
scale_fill_manual(values="blue")+
labs(title=paste("DIAGNOSTIC GROUP: ", dx[i]), subtitle="")+
labs(caption=caption)+
annotate(geom="text", x = Inf, y=Inf, label=labelage, size=3, hjust=1, vjust=1)
printWithNumber(plotage)
printWithNumber(plotabeta)
printWithNumber(plotabeta42)
printWithNumber(plottau)
printWithNumber(plotptau)
}
dev.off() |
library(tidyverse)
library(exifr)
library(sf)
library(here)
#### Parameters to set for each run (specific to a given photo set) ####
# Top-level folder of all mission images. Do not include trailing slash.
photoset_path = "/storage/temp/27_EmPo_90_90_90m_25deg_-03ev_merged"
# Path to save the thinned photoset to. Exclude the actual photoset folder(s) as they will be appended to the path provided here. Do not include trailing slash.
destination_path = "/storage/temp/thinned"
# Name to prepend to all thinned sets based on this photoset
photoset_name = "set27b"
## Should transects be thinned in pairs? Use for angled-gimbal photosets.
transect_pairs = TRUE
# Specify manual stringer images (images that MapPilot collects along the project boundary when moving from one transect to the next) to exclude if they're not picked up by the algorithm
# for 15a: manual_stringer_photos = c("2019:09:10 11:12:42","2019:09:10 11:12:44","2019:09:10 11:12:47","2019:09:10 11:12:49","2019:09:10 11:12:52")
# for 16: manual_stringer_photos = c("2019:09:11 11:34:10","2019:09:11 12:01:49","2019:09:11 12:02:45","2019:09:11 12:02:46","2019:09:11 12:03:39","2019:09:11 11:34:10","2019:09:11 11:12:05","2019:09:11 11:12:12","2019:09:11 11:12:13","2019:09:11 11:12:15","2019:09:11 11:12:18","2019:09:11 11:15:08","2019:09:11 11:15:10","2019:09:11 11:15:18","2019:09:11 11:12:07","2019:09:11 11:15:19","2019:09:11 11:12:19")
# for 26: manual_stringer_photos = c("2019:09:11 14:47:32","2019:09:11 14:47:29","2019:09:11 14:47:27","2019:09:11 14:47:25","2019:09:11 14:47:22","2019:09:11 14:47:19","2019:09:11 14:47:17","2019:09:11 14:47:15","2019:09:11 14:47:11")
# for 21: manual_stringer_photos = c("2019:09:12 11:01:47", "2019:09:12 11:01:49", "2019:09:12 11:01:51", "2019:09:12 11:01:54")
# for 22: manual_stringer_photos = NULL
# for 19: manual_stringer_photos = NULL
# for 20: manual_stringer_photos = NULL
# for 27:
manual_stringer_photos = c("2019:09:12 11:03:21","2019:09:12 11:03:22","2019:09:12 11:01:47","2019:09:12 11:01:49","2019:09:12 11:01:51","2019:09:12 11:01:54")
#manual_stringer_photos = NULL
# How many degrees (angle) change in transect path signals a new transect?
change_thresh = 8
## Stringer detection:
# Within how many degrees (in terms of the orientation of the transect) does a focal transect have to be from other transects to not be considered a stringer
tolerance = 3
# What proportion of other transects have to be within the tolerance for the focal transect to not be considered a stringer transect?
proportion_matching_threshold = .2
# Min photos per transect to consider it a transect
# for 27
min_photos = 6
# for rest
min_photos = 4
## Specify thinning factors (forward then side, one row per thinned set)
thins = matrix(c(#1,1,
#1,2,
#2,1,
#2,2
2,4,
4,2,
4,4
),
ncol=2,
byrow=TRUE)
#### Convenience functions ####
source(here("scripts/convenience_functions.R"))
#### Assign transect IDs to photos ####
#### This is to allow thinning by transect
## Find all original drone photos (use regex to search for DJI photos in case there are some non-drone photos in the folder)
photo_files = list.files(photoset_path,recursive=TRUE,pattern="DJI_[0-9]{4}.JPG",full.names = TRUE)
get_last_2_and_join = function(x) {
last_2 = (x[(length(x)-1):length(x)])
joined = paste(last_2,collapse = "/")
}
photo_folder_file = str_split(photo_files,"/") %>% map(get_last_2_and_join) %>% unlist
d_exif = read_exif(photo_files , tags = c("ImageDescription","GPSLatitude","GPSLongitude","CreateDate"))
d = d_exif %>%
select(ImageDescription,GPSLatitude,GPSLongitude,CreateDate) %>%
#separate(ImageDescription,c(NA,"Folder","File"), sep = "\\\\") %>%
mutate(Folder_File = photo_folder_file) %>%
arrange(CreateDate,Folder_File) # sort by time and then by name (in case any were from the same exact time, the name should increment)
### Delete repeat photo locations (they seem to only be at the end of transects when the drone is pivoting to the next angle)
d$d_repeat = d %>%
select(GPSLatitude,GPSLongitude) %>%
duplicated()
d = d[!d$d_repeat,]
## filtering for set 16
d = d %>%
filter(CreateDate != "2019:09:11 11:34:10")
## Make it spatial
d_sp = st_as_sf(d,coords=c("GPSLongitude","GPSLatitude"), crs=4326)
## Convert to meter units (CA Albers projection)
d_sp = st_transform(d_sp,3310)
## Get x and y coords
d_sp = cbind(d_sp,st_coordinates(d_sp))
d_coords = d_sp
st_geometry(d_coords) = NULL # remove geom (convert to normal data frame)
## Compute the angle from each point to the next point
for(i in 1:(nrow(d_coords)-1)) {
focal_x = d_coords[i,"X"]
focal_y = d_coords[i,"Y"]
next_x = d_coords[i+1,"X"]
next_y = d_coords[i+1,"Y"]
x_dist = next_x - focal_x
y_dist = next_y - focal_y
hypotenuse = sqrt(x_dist^2 + y_dist^2)
angle = atan2( x_dist,y_dist ) %>% rad2deg %>% abs # angle from one point to next
#angle = ifelse(x_dist < 0, angle + 180,angle)
# Some times two photo points are directly on top of each other (DJI bug?), so consider them for transect-delineation purposes to be in the same transect. To do this need to save a code as -1 which the angle-difference computing script looks for
if(x_dist == 0 & y_dist == 0) {
angle = -1
}
d_coords[i,"angle"] = angle
}
## Get average angle
avg_angle = mean(d_coords$angle,na.rm=TRUE)
## Compute change in angle from one point to the next
for(i in 2:(nrow(d_coords))) {
last_angle = d_coords[i-1,"angle"]
current_angle = d_coords[i,"angle"]
angle_change = 360-(360-abs(current_angle-last_angle))
if (is.na(current_angle) | current_angle == -1 | last_angle == -1) {
angle_change = 0
}
d_coords[i,"angle_change"] = angle_change
}
## Give a unique ID to every string of plots with < X degrees angle change from one photo to the next
transect_id = 1 # starting value
just_incremented = FALSE # variable to keep track of whether we just incremented the transect ID (hit a new transect). if increment twice in a row, it's the end of a transect and we shouldn't increment the second time
for(i in 1:nrow(d_coords)) {
d_coords[i,"transect_id"] = transect_id
point = d_coords[i,]
if(is.na(point$angle_change)) next()
if(point$angle_change > change_thresh) { # if the next point is a large angle different from the current point, increment transect ID so the next point is assigned to a new transect
if(just_incremented) { # we incremented on the previous point and also this point, so it's the end of a transect so we shouldn't increment for this point
just_incremented = FALSE
} else {
transect_id = transect_id + 1
just_incremented = TRUE
}
} else {
just_incremented = FALSE
}
}
## Eliminate the stringers of points that MapPilot places along perimeter of flight area when going from one transect to the next
## Get average angle of each transect. Count number of transects with average angle within 3 degrees. If < 10% of transects are within 3 degree, it's a stringer
transect_summ = d_coords %>%
filter(angle != -1) %>% # Don't include points with no angle (two points on top of each other)
group_by(transect_id) %>%
slice(2:n()) %>% # drop the first photo of each group because it probably has a crazy angle
slice(1:(n()-1)) %>% # drop the last row of each group because it could have a crazy angle
summarize(avg_angle = mean(angle),
n_photos = n())
# only compare against transects that are not very short (probably MapPilot edge stringers)
transects_long = transect_summ %>%
filter((n_photos + 2) > min_photos)
n_transects = nrow(transects_long)
for(i in 1:nrow(transect_summ)) {
transect = transect_summ[i,]
angle = transect$avg_angle
transect_id = transect$transect_id
lower_bound = (angle-tolerance)
upper_bound = (angle+tolerance)
matching_transects = transects_long %>%
filter((avg_angle < upper_bound | avg_angle < (upper_bound %% 360)) &
avg_angle > lower_bound | avg_angle > (lower_bound %% 360))
n_matching = nrow(matching_transects)
proportion_matching = n_matching/n_transects
if(proportion_matching < proportion_matching_threshold | (transect$n_photos + 2) < min_photos) {
d_coords[d_coords$transect_id == transect_id,"stringer"] = TRUE
} else {
d_coords[d_coords$transect_id == transect_id,"stringer"] = FALSE
}
}
# assign manual stringer photos
d_coords[d_coords$CreateDate %in% manual_stringer_photos,"stringer"] = TRUE
## give each point the mean x and y coordinate of all its photos, also the mean angle
transect_summ = d_coords %>%
group_by(transect_id) %>%
summarize(mean_x_coord = mean(X),
mean_y_coord = mean(Y),
mean_angle = mean(angle))
d_coords = left_join(d_coords,transect_summ)
# ## Assign new transect IDs, but only to non-stringer transects, and do it by incrementing transects based on their average x coordinate
# d_coords = d_coords %>%
# arrange(mean_x_coord,CreateDate,Folder_File)
## Alternatively, when transects not N-S, assign transect IDs just by CreateDate. This means all must be consecutive.
d_coords = d_coords %>%
arrange(CreateDate,Folder_File)
#### !!!! here need to loop through each d_coord.
# whenver hit a new transect_id, increment the count. assign the count as sorted_transect_id
## to use for assigning paired transects the same ID for purposes of thinning angled gimbal datasets
transect_id_lookup = rep(1:1000,each=2)
transect_ids_encountered = NULL
for(i in 1:nrow(d_coords)) {
#if(i == 82) { browser() }
photo = d_coords[i,]
if(photo$stringer) next()
transect_id = photo$transect_id
if(!(transect_id %in% transect_ids_encountered)) {
transect_ids_encountered = c(transect_ids_encountered,transect_id)
}
if(!transect_pairs) {
d_coords[i,"transect_id_new"] = length(transect_ids_encountered)
} else {
d_coords[i,"transect_id_new"] = transect_id_lookup[length(transect_ids_encountered)]
}
}
# # redo numbering for only
# d_coords[!d_coords$stringer,"transect_id_new"] = d_coords[!d_coords$stringer,] %>% group_indices(transect_id)
d_coords = d_coords %>%
mutate(odd_transect_new = (transect_id_new %% 2))
## Assign incrementing photo IDs
d_coords$photo_id = 1:nrow(d_coords)
## Make it spatial again for checking results on a map
d_tsect_sp = st_as_sf(d_coords,coords=c("X","Y"), crs=3310)
plot(d_tsect_sp)
st_write(d_tsect_sp %>% st_transform(4326), "/storage/temp/temp_transect_eval.geojson",delete_dsn=TRUE)
#### Generate thinned photoset copies ####
# copy photo sets with specified front and side thinning factor combinations (exclude stringers)
# always generate a set with thinning factors of 1 and 1 which exclude stringers
# reverse the thins so we do the small photosets first
thins_rev = thins[nrow(thins):1,]
thins_rev = as.data.frame(thins_rev)
names(thins_rev) = c("forward_thin","side_thin")
thins_rev = thins_rev %>%
dplyr::mutate(thin_name = paste(forward_thin,side_thin,sep="_"))
## Give all photos an incrementing number in sequence so the front-thin photo selections are the same in each dataset
d_coords$image_sequence_number = 1:nrow(d_coords)
for(i in 1:nrow(thins_rev)) {
thin = thins_rev[i,]
photos_side_thin = d_coords %>%
filter(!stringer) %>% # exclude stringers
filter((transect_id_new %% thin$side_thin) == 0) # perform side thinning
# perform forward thinning
photos = photos_side_thin[(photos_side_thin$image_sequence_number %% thin$forward_thin) == 0,]
thinned_photoset_name = paste0(photoset_name,"_thin",thin$forward_thin,thin$side_thin)
## Copy thinned set to destination path
# Get the necessary paths
photos = photos %>%
mutate(source_path = paste0(photoset_path,"/",Folder_File),
dest_path = paste0(destination_path,"/",thinned_photoset_name,"/",Folder_File)) %>%
mutate(dest_directory = dest_path %>% map(path_drop_last) %>% unlist ) # this is the destination file without the file at the end: for creating the directory for it via dir.create below
# Make sure all needed directories exist
dest_directories = unique(photos$dest_directory)
walk(dest_directories,dir.create,recursive=TRUE)
file.copy(photos$source_path,photos$dest_path, overwrite=FALSE)
}
| /scripts/thin_drone_photoset.R | no_license | youngdjn/tahoe-forest-structure-drone | R | false | false | 12,464 | r | library(tidyverse)
library(exifr)
library(sf)
library(here)
#### Parameters to set for each run (specific to a given photo set) ####
# Top-level folder of all mission images. Do not include trailing slash.
photoset_path = "/storage/temp/27_EmPo_90_90_90m_25deg_-03ev_merged"
# Path to save the thinned photoset to. Exclude the actual photoset folder(s) as they will be appended to the path provided here. Do not include trailing slash.
destination_path = "/storage/temp/thinned"
# Name to prepend to all thinned sets based on this photoset
photoset_name = "set27b"
## Should transects be thinned in pairs? Use for angled-gimbal photosets.
transect_pairs = TRUE
# Specify manual stringer images (images that MapPilot collects along the project boundary when moving from one transect to the next) to exclude if they're not picked up by the algorithm
# for 15a: manual_stringer_photos = c("2019:09:10 11:12:42","2019:09:10 11:12:44","2019:09:10 11:12:47","2019:09:10 11:12:49","2019:09:10 11:12:52")
# for 16: manual_stringer_photos = c("2019:09:11 11:34:10","2019:09:11 12:01:49","2019:09:11 12:02:45","2019:09:11 12:02:46","2019:09:11 12:03:39","2019:09:11 11:34:10","2019:09:11 11:12:05","2019:09:11 11:12:12","2019:09:11 11:12:13","2019:09:11 11:12:15","2019:09:11 11:12:18","2019:09:11 11:15:08","2019:09:11 11:15:10","2019:09:11 11:15:18","2019:09:11 11:12:07","2019:09:11 11:15:19","2019:09:11 11:12:19")
# for 26: manual_stringer_photos = c("2019:09:11 14:47:32","2019:09:11 14:47:29","2019:09:11 14:47:27","2019:09:11 14:47:25","2019:09:11 14:47:22","2019:09:11 14:47:19","2019:09:11 14:47:17","2019:09:11 14:47:15","2019:09:11 14:47:11")
# for 21: manual_stringer_photos = c("2019:09:12 11:01:47", "2019:09:12 11:01:49", "2019:09:12 11:01:51", "2019:09:12 11:01:54")
# for 22: manual_stringer_photos = NULL
# for 19: manual_stringer_photos = NULL
# for 20: manual_stringer_photos = NULL
# for 27:
manual_stringer_photos = c("2019:09:12 11:03:21","2019:09:12 11:03:22","2019:09:12 11:01:47","2019:09:12 11:01:49","2019:09:12 11:01:51","2019:09:12 11:01:54")
#manual_stringer_photos = NULL
# How many degrees (angle) change in transect path signals a new transect?
change_thresh = 8
## Stringer detection:
# Within how many degrees (in terms of the orientation of the transect) does a focal transect have to be from other transects to not be considered a stringer
tolerance = 3
# What proportion of other transects have to be within the tolerance for the focal transect to not be considered a stringer transect?
proportion_matching_threshold = .2
# Min photos per transect to consider it a transect
# for 27
min_photos = 6
# for rest
min_photos = 4
## Specify thinning factors (forward then side, one row per thinned set)
thins = matrix(c(#1,1,
#1,2,
#2,1,
#2,2
2,4,
4,2,
4,4
),
ncol=2,
byrow=TRUE)
#### Convenience functions ####
source(here("scripts/convenience_functions.R"))
#### Assign transect IDs to photos ####
#### This is to allow thinning by transect
## Find all original drone photos (use regex to search for DJI photos in case there are some non-drone photos in the folder)
photo_files = list.files(photoset_path,recursive=TRUE,pattern="DJI_[0-9]{4}.JPG",full.names = TRUE)
get_last_2_and_join = function(x) {
last_2 = (x[(length(x)-1):length(x)])
joined = paste(last_2,collapse = "/")
}
photo_folder_file = str_split(photo_files,"/") %>% map(get_last_2_and_join) %>% unlist
d_exif = read_exif(photo_files , tags = c("ImageDescription","GPSLatitude","GPSLongitude","CreateDate"))
d = d_exif %>%
select(ImageDescription,GPSLatitude,GPSLongitude,CreateDate) %>%
#separate(ImageDescription,c(NA,"Folder","File"), sep = "\\\\") %>%
mutate(Folder_File = photo_folder_file) %>%
arrange(CreateDate,Folder_File) # sort by time and then by name (in case any were from the same exact time, the name should increment)
### Delete repeat photo locations (they seem to only be at the end of transects when the drone is pivoting to the next angle)
d$d_repeat = d %>%
select(GPSLatitude,GPSLongitude) %>%
duplicated()
d = d[!d$d_repeat,]
## filtering for set 16
d = d %>%
filter(CreateDate != "2019:09:11 11:34:10")
## Make it spatial
d_sp = st_as_sf(d,coords=c("GPSLongitude","GPSLatitude"), crs=4326)
## Convert to meter units (CA Albers projection)
d_sp = st_transform(d_sp,3310)
## Get x and y coords
d_sp = cbind(d_sp,st_coordinates(d_sp))
d_coords = d_sp
st_geometry(d_coords) = NULL # remove geom (convert to normal data frame)
## Compute the angle from each point to the next point
for(i in 1:(nrow(d_coords)-1)) {
focal_x = d_coords[i,"X"]
focal_y = d_coords[i,"Y"]
next_x = d_coords[i+1,"X"]
next_y = d_coords[i+1,"Y"]
x_dist = next_x - focal_x
y_dist = next_y - focal_y
hypotenuse = sqrt(x_dist^2 + y_dist^2)
angle = atan2( x_dist,y_dist ) %>% rad2deg %>% abs # angle from one point to next
#angle = ifelse(x_dist < 0, angle + 180,angle)
# Some times two photo points are directly on top of each other (DJI bug?), so consider them for transect-delineation purposes to be in the same transect. To do this need to save a code as -1 which the angle-difference computing script looks for
if(x_dist == 0 & y_dist == 0) {
angle = -1
}
d_coords[i,"angle"] = angle
}
## Get average angle
avg_angle = mean(d_coords$angle,na.rm=TRUE)
## Compute change in angle from one point to the next
for(i in 2:(nrow(d_coords))) {
last_angle = d_coords[i-1,"angle"]
current_angle = d_coords[i,"angle"]
angle_change = 360-(360-abs(current_angle-last_angle))
if (is.na(current_angle) | current_angle == -1 | last_angle == -1) {
angle_change = 0
}
d_coords[i,"angle_change"] = angle_change
}
## Give a unique ID to every string of plots with < X degrees angle change from one photo to the next
transect_id = 1 # starting value
just_incremented = FALSE # variable to keep track of whether we just incremented the transect ID (hit a new transect). if increment twice in a row, it's the end of a transect and we shouldn't increment the second time
for(i in 1:nrow(d_coords)) {
d_coords[i,"transect_id"] = transect_id
point = d_coords[i,]
if(is.na(point$angle_change)) next()
if(point$angle_change > change_thresh) { # if the next point is a large angle different from the current point, increment transect ID so the next point is assigned to a new transect
if(just_incremented) { # we incremented on the previous point and also this point, so it's the end of a transect so we shouldn't increment for this point
just_incremented = FALSE
} else {
transect_id = transect_id + 1
just_incremented = TRUE
}
} else {
just_incremented = FALSE
}
}
## Eliminate the stringers of points that MapPilot places along perimeter of flight area when going from one transect to the next
## Get average angle of each transect. Count number of transects with average angle within 3 degrees. If < 10% of transects are within 3 degree, it's a stringer
transect_summ = d_coords %>%
filter(angle != -1) %>% # Don't include points with no angle (two points on top of each other)
group_by(transect_id) %>%
slice(2:n()) %>% # drop the first photo of each group because it probably has a crazy angle
slice(1:(n()-1)) %>% # drop the last row of each group because it could have a crazy angle
summarize(avg_angle = mean(angle),
n_photos = n())
# only compare against transects that are not very short (probably MapPilot edge stringers)
transects_long = transect_summ %>%
filter((n_photos + 2) > min_photos)
n_transects = nrow(transects_long)
for(i in 1:nrow(transect_summ)) {
transect = transect_summ[i,]
angle = transect$avg_angle
transect_id = transect$transect_id
lower_bound = (angle-tolerance)
upper_bound = (angle+tolerance)
matching_transects = transects_long %>%
filter((avg_angle < upper_bound | avg_angle < (upper_bound %% 360)) &
avg_angle > lower_bound | avg_angle > (lower_bound %% 360))
n_matching = nrow(matching_transects)
proportion_matching = n_matching/n_transects
if(proportion_matching < proportion_matching_threshold | (transect$n_photos + 2) < min_photos) {
d_coords[d_coords$transect_id == transect_id,"stringer"] = TRUE
} else {
d_coords[d_coords$transect_id == transect_id,"stringer"] = FALSE
}
}
# assign manual stringer photos
d_coords[d_coords$CreateDate %in% manual_stringer_photos,"stringer"] = TRUE
## give each point the mean x and y coordinate of all its photos, also the mean angle
transect_summ = d_coords %>%
group_by(transect_id) %>%
summarize(mean_x_coord = mean(X),
mean_y_coord = mean(Y),
mean_angle = mean(angle))
d_coords = left_join(d_coords,transect_summ)
# ## Assign new transect IDs, but only to non-stringer transects, and do it by incrementing transects based on their average x coordinate
# d_coords = d_coords %>%
# arrange(mean_x_coord,CreateDate,Folder_File)
## Alternatively, when transects not N-S, assign transect IDs just by CreateDate. This means all must be consecutive.
d_coords = d_coords %>%
arrange(CreateDate,Folder_File)
#### !!!! here need to loop through each d_coord.
# whenver hit a new transect_id, increment the count. assign the count as sorted_transect_id
## to use for assigning paired transects the same ID for purposes of thinning angled gimbal datasets
transect_id_lookup = rep(1:1000,each=2)
transect_ids_encountered = NULL
for(i in 1:nrow(d_coords)) {
#if(i == 82) { browser() }
photo = d_coords[i,]
if(photo$stringer) next()
transect_id = photo$transect_id
if(!(transect_id %in% transect_ids_encountered)) {
transect_ids_encountered = c(transect_ids_encountered,transect_id)
}
if(!transect_pairs) {
d_coords[i,"transect_id_new"] = length(transect_ids_encountered)
} else {
d_coords[i,"transect_id_new"] = transect_id_lookup[length(transect_ids_encountered)]
}
}
# # redo numbering for only
# d_coords[!d_coords$stringer,"transect_id_new"] = d_coords[!d_coords$stringer,] %>% group_indices(transect_id)
d_coords = d_coords %>%
mutate(odd_transect_new = (transect_id_new %% 2))
## Assign incrementing photo IDs
d_coords$photo_id = 1:nrow(d_coords)
## Make it spatial again for checking results on a map
d_tsect_sp = st_as_sf(d_coords,coords=c("X","Y"), crs=3310)
plot(d_tsect_sp)
st_write(d_tsect_sp %>% st_transform(4326), "/storage/temp/temp_transect_eval.geojson",delete_dsn=TRUE)
#### Generate thinned photoset copies ####
# copy photo sets with specified front and side thinning factor combinations (exclude stringers)
# always generate a set with thinning factors of 1 and 1 which exclude stringers
# reverse the thins so we do the small photosets first
thins_rev = thins[nrow(thins):1,]
thins_rev = as.data.frame(thins_rev)
names(thins_rev) = c("forward_thin","side_thin")
thins_rev = thins_rev %>%
dplyr::mutate(thin_name = paste(forward_thin,side_thin,sep="_"))
## Give all photos an incrementing number in sequence so the front-thin photo selections are the same in each dataset
d_coords$image_sequence_number = 1:nrow(d_coords)
for(i in 1:nrow(thins_rev)) {
thin = thins_rev[i,]
photos_side_thin = d_coords %>%
filter(!stringer) %>% # exclude stringers
filter((transect_id_new %% thin$side_thin) == 0) # perform side thinning
# perform forward thinning
photos = photos_side_thin[(photos_side_thin$image_sequence_number %% thin$forward_thin) == 0,]
thinned_photoset_name = paste0(photoset_name,"_thin",thin$forward_thin,thin$side_thin)
## Copy thinned set to destination path
# Get the necessary paths
photos = photos %>%
mutate(source_path = paste0(photoset_path,"/",Folder_File),
dest_path = paste0(destination_path,"/",thinned_photoset_name,"/",Folder_File)) %>%
mutate(dest_directory = dest_path %>% map(path_drop_last) %>% unlist ) # this is the destination file without the file at the end: for creating the directory for it via dir.create below
# Make sure all needed directories exist
dest_directories = unique(photos$dest_directory)
walk(dest_directories,dir.create,recursive=TRUE)
file.copy(photos$source_path,photos$dest_path, overwrite=FALSE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in r/char_retrieve.R
\name{char.columns.default}
\alias{char.columns.default}
\title{default char columns}
\usage{
char.columns.default(impound = F)
}
| /man/char.columns.default.Rd | permissive | anarosner/conteStreamflow | R | false | true | 224 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in r/char_retrieve.R
\name{char.columns.default}
\alias{char.columns.default}
\title{default char columns}
\usage{
char.columns.default(impound = F)
}
|
#### Course 3 Project ####
# One of the most exciting areas in all of data science right now is wearable computing -
# see for example this article . Companies like Fitbit, Nike, and Jawbone Up are racing
# to develop the most advanced algorithms to attract new users. The data linked to from
# the course website represent data collected from the accelerometers from the Samsung
# Galaxy S smartphone. A full description is available at the site where the data was obtained:
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
# Here are the data for the project:
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# You should create one R script called run_analysis.R that does the following.
setwd("C:\\Users\\Owner\\Desktop\\Coursera R Course 3 Data\\Course 3 R Project Data")
# work setwd("D:\\Coursera\\Coursera R Course 3 Data\\Course 3 R Project Data")
# Grabbing all the relevant data
actLab = read.table("activity_labels.txt", header = FALSE)
features = read.table("features.txt", header = FALSE)
subTest = read.table("subject_test.txt", header = F)
subTrain = read.table("subject_train.txt", header = F)
xTest = read.table("x_test.txt", header = F)
xTrain = read.table("x_train.txt", header = F)
yTest = read.table("y_test.txt", header = F)
yTrain = read.table("y_train.txt", header = F)
## 1. Merging the test and training data
testDatax = rbind(xTrain, xTest)
testDatay = rbind(yTrain, yTest)
subData = rbind(subTrain, subTest)
## 3. (out of order, 2 is below) Uses descriptive activity names to name the activities in the
## data set. adding the activity to each row
testDatay$ID = seq.int(nrow(testDatay))
testDatax1 = merge(testDatay, actLab, by.x = "V1", by.y = "V1")
testDatax1 = testDatax1[order(testDatax1$ID), ]
testDatax1 = as.data.frame(testDatax1[, -1])
colnames(testDatax1) = "V0"
testDatax2 = cbind(testDatax1,testDatax)
## 4. (out of order, 2 is below) Appropriately labels the data set with descriptive variable names.
## labeling the data
library(plyr)
editdata = rbind(data.frame(V1 = 0, V2 = "activity"), features)
editdata = as.data.frame(editdata[, -1])
editdata1 = t(editdata)
colnames(editdata1) = c(1:562)
colnames(testDatax2) = c(1:562)
testDatax2[] = lapply(testDatax2, as.character)
finalData = rbind(editdata1, testDatax2)
colnames(finalData) = as.character(unlist(finalData[1, ]))
finalData = finalData[-1, ]
## 2.(out of order) Extracts only the measurements on the mean and standard deviation for each measurement.
MeanData = finalData[,grepl("mean",colnames(finalData))]
StdData = finalData[,grepl("std",colnames(finalData))]
MSData = cbind(subData, finalData$activity, MeanData, StdData)
colnames(MSData)[2] = "activity"
MSData[3:81] = lapply(MSData, as.character)[3:81]
MSData[3:81] = lapply(MSData, as.numeric)[3:81]
## 5. From the data set in step 4, create a second, independent tidy data set with the
## average of each variable for each activity and each subject
MeanData = aggregate(. ~ activity + V1, MSData, mean)
MeanData = MeanData[order(MeanData$V1, MeanData$activity), ]
write.table(MeanData, "C:\\Users\\Owner\\Desktop\\Coursera R Course 3 Data\\tidy.csv")
| /Run_Analysis.R | no_license | fisch2332/Coursera-Getting-and-Cleaning-Data-Course-Project | R | false | false | 3,303 | r | #### Course 3 Project ####
# One of the most exciting areas in all of data science right now is wearable computing -
# see for example this article . Companies like Fitbit, Nike, and Jawbone Up are racing
# to develop the most advanced algorithms to attract new users. The data linked to from
# the course website represent data collected from the accelerometers from the Samsung
# Galaxy S smartphone. A full description is available at the site where the data was obtained:
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
# Here are the data for the project:
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# You should create one R script called run_analysis.R that does the following.
setwd("C:\\Users\\Owner\\Desktop\\Coursera R Course 3 Data\\Course 3 R Project Data")
# work setwd("D:\\Coursera\\Coursera R Course 3 Data\\Course 3 R Project Data")
# Grabbing all the relevant data
actLab = read.table("activity_labels.txt", header = FALSE)
features = read.table("features.txt", header = FALSE)
subTest = read.table("subject_test.txt", header = F)
subTrain = read.table("subject_train.txt", header = F)
xTest = read.table("x_test.txt", header = F)
xTrain = read.table("x_train.txt", header = F)
yTest = read.table("y_test.txt", header = F)
yTrain = read.table("y_train.txt", header = F)
## 1. Merging the test and training data
testDatax = rbind(xTrain, xTest)
testDatay = rbind(yTrain, yTest)
subData = rbind(subTrain, subTest)
## 3. (out of order, 2 is below) Uses descriptive activity names to name the activities in the
## data set. adding the activity to each row
testDatay$ID = seq.int(nrow(testDatay))
testDatax1 = merge(testDatay, actLab, by.x = "V1", by.y = "V1")
testDatax1 = testDatax1[order(testDatax1$ID), ]
testDatax1 = as.data.frame(testDatax1[, -1])
colnames(testDatax1) = "V0"
testDatax2 = cbind(testDatax1,testDatax)
## 4. (out of order, 2 is below) Appropriately labels the data set with descriptive variable names.
## labeling the data
library(plyr)
editdata = rbind(data.frame(V1 = 0, V2 = "activity"), features)
editdata = as.data.frame(editdata[, -1])
editdata1 = t(editdata)
colnames(editdata1) = c(1:562)
colnames(testDatax2) = c(1:562)
testDatax2[] = lapply(testDatax2, as.character)
finalData = rbind(editdata1, testDatax2)
colnames(finalData) = as.character(unlist(finalData[1, ]))
finalData = finalData[-1, ]
## 2.(out of order) Extracts only the measurements on the mean and standard deviation for each measurement.
MeanData = finalData[,grepl("mean",colnames(finalData))]
StdData = finalData[,grepl("std",colnames(finalData))]
MSData = cbind(subData, finalData$activity, MeanData, StdData)
colnames(MSData)[2] = "activity"
MSData[3:81] = lapply(MSData, as.character)[3:81]
MSData[3:81] = lapply(MSData, as.numeric)[3:81]
## 5. From the data set in step 4, create a second, independent tidy data set with the
## average of each variable for each activity and each subject
MeanData = aggregate(. ~ activity + V1, MSData, mean)
MeanData = MeanData[order(MeanData$V1, MeanData$activity), ]
write.table(MeanData, "C:\\Users\\Owner\\Desktop\\Coursera R Course 3 Data\\tidy.csv")
|
## CCRCN Data Library Hook Script ####
# Data Release: Long-term soil carbon data and accretion from four marsh types in Mississippi River Delta in 2015
# Data published by Science Base: https://www.sciencebase.gov/catalog/item/5b3299d4e4b040769c159bb0
# Contact: Melissa Baustian
# Data Citation:
# Baustian, M.M., Stagg, C.L., Perry, C.L., Moss, L.C., Carruthers, T.J.B., Allison, M.A., and Hall, C.T., 2021,
# Long-term soil carbon data and accretion from four marsh types in Mississippi River Delta in 2015: U.S. Geological Survey data release,
# https://doi.org/10.5066/P93U3B3E.
## Prep Workspace ####
# load libraries
library(tidyverse)
library(lubridate)
library(RefManageR)
library(readxl)
library(leaflet)
# library(anytime)
source("./scripts/1_data_formatting/qa_functions.R")
# read in data
raw_depthseries <- read_csv("./data/primary_studies/Baustian_et_al_2021/intermediate/Baustian_Long_Term_Carbon_Soil.csv", na = ".")
raw_radio <- read_csv("./data/primary_studies/Baustian_et_al_2021/original/Baustian_Long_Term_Radionuclide.csv", na = ".")
crms_sites <- read_csv("./data/primary_studies/Baustian_et_al_2021/intermediate/CRMS_Long_Lat.csv")
raw_siteinfo <- read_xlsx("data/primary_studies/Baustian_et_al_2021/original/Baustian_WI_LongTerm_Soil_Core_Site Info.xlsx")
# raw_coreinfo <- read_csv("./data/primary_studies/Baustian_et_al_2021/intermediate/Baustian_Short Term Carbon_Soil Core Data.csv")
guidance <- read_csv("docs/ccrcn_database_structure.csv")
## Trim Data to Library ####
id <- "Baustian_et_al_2021"
# Reference Tables ----
cores_ref <- data.frame(salinity_class = c("fresh", "intermediate", "brackish", "saline"),
# Marsh_Type = c(1:4),
core_elevation = c(0.34, 0.13, 0.14, 0.14))
# map referenced for marsh type assignment: https://pubs.usgs.gov/sim/3290/pdf/sim3290.pdf
# depth_ref <- data.frame(Core_Increment = c(1,2,3,5),
# depth_min = c(0,2,4,8),
# depth_max = c(2,4,6,10))
# locations <- crms_sites %>%
# mutate(core_id = gsub("CRMS0", "", `CRMS Site`)) %>%
# mutate(core_id = gsub("CRMS", "", core_id))
coreinfo <- raw_siteinfo %>%
rename(species = `Target Species`,
core_latitude = Lat,
core_longitude = Long,
core_id = `CRMS Site ID`) %>%
mutate(study_id = id,
site_id = recode(Basin,
"BA" = "Barataria basin",
"TE" = "Terrebonne basin"),
salinity_class = tolower(`2014 Habitat Type`),
core_position_method = "RTK",
core_elevation_datum = "NAVD88",
vegetation_class = "emergent",
vegetation_method = "measurement",
salinity_method = "field observation") %>%
full_join(cores_ref) %>%
# Marsh type is defined as intermediate salinity (based on vegetation) but I'm reclassifying it to brackish
mutate(salinity_class = recode(salinity_class, "intermediate" = "brackish")) %>%
select(-c(Basin, `2014 Habitat Type`))
# core dates will have to be merged from depthseries
# coreinfo <- raw_coreinfo %>%
# select(Site, Basin, Marsh_Type) %>%
# distinct() %>%
# rename(core_id = Site) %>%
# mutate(study_id = id,
# site_id = recode(Basin,
# "BA" = "Barataria basin",
# "TE" = "Terrebonne basin"),
# core_position_method = "RTK",
# core_elevation_datum = "NAVD88",
# vegetation_class = "emergent",
# vegetation_method = "measurement",
# salinity_method = "field observation") %>%
# left_join(locations, by = "core_id") %>%
# full_join(cores_ref) %>%
# rename(core_latitude = Latitude,
# core_longitude = Longitude) %>%
# mutate(vegetation_notes = case_when(salinity_class == "fresh" ~ "dominated by Panicum hemitomon, Sagittaria lancifolia, Eleocharis baldwinii, or Cladium jamaicense",
# salinity_class == "intermediate" ~ "dominated by Leptochloa fusca, Panicum virgatum, Paspalum vaginatum, Phragmites australis, or Schoenoplectus americanus",
# salinity_class == "brackish" ~ "dominated by Spartina patens but occasionally by Spartina cynosuroides, Spartina spartinae, or Bolboschoenus robustus",
# salinity_class == "saline" ~ " dominated by Spartina alterniflora, Distichlis spicata, or Avicennia germinans.")) %>%
# select(-c(Basin, Marsh_Type, "CRMS Site"))
# create core site lookup
site_core <- coreinfo %>% select(site_id, core_id)
# Depthseries ----
stock <- raw_depthseries %>%
rename(core_id = Site,
dry_bulk_density = `Bulk Density (g/cm^3)`) %>%
mutate(study_id = id,
fraction_organic_matter = `Organic matter (percent)`/100,
increments = recode(`Core Increment (cm)`,
"14-Dec" = "12-14",
"12-Oct" = "10-12")) %>%
separate(col = increments, into = c("depth_min", "depth_max"), sep = "-") %>%
select(-c(`Core Increment (cm)`, `Moisture (percent)`, `Organic matter (percent)`))
# curate radionuclide data
radionuclides <- raw_radio %>%
drop_na(`CRMS Site`) %>%
rename("core_id" = "CRMS Site",
"core_date" = "Field Collection Date",
"cs137_activity" = "Cs-137 (dpm/g)",
"cs137_activity_se" = "Cs-137 - error (dpm/g)",
"total_pb210_activity" = "Total Pb-210 (dpm/g)",
"total_pb210_activity_se" = "Total Pb-210 - error (dpm/g)",
"excess_pb210_activity" = "Excess Pb-210 (dpm/g)",
"excess_pb210_activity_se" = "Excess Pb-210 - error (dpm/g)") %>%
separate(col = `Core Increment (cm)`, into = c("depth_min", "depth_max"), sep = "-") %>%
mutate(study_id = id,
core_id = as.character(core_id),
# depth_max = gsub(" cm", "", depth_max),
# depth_max = as.numeric(depth_max),
# depth_min = as.numeric(depth_min),
pb210_unit = "disintegrationsPerMinutePerGram",
cs137_unit = "disintegrationsPerMinutePerGram") %>%
left_join(core_batch) %>%
select(-`Mid-Depth (cm)`, -`Radionuclide Counted Date`, -core_date)
# there are 7 sites missing from the radionuclide table
unique(stock$core_id)[which(!(unique(stock$core_id) %in% unique(radionuclides$core_id)))]
# join depthseries info
depthseries <- full_join(stock, radionuclides) %>%
full_join(site_core) # merge site info
# create date ref for core table
# date_ref <- depthseries %>% select(site_id, core_id, core_date) %>% distinct() %>%
# drop_na(core_date) # drop NA dates
final_depthseries <- reorderColumns("depthseries", depthseries) %>%
select(-c(Batch, core_date, "2014_Habitat Type", Most_Freq_Occ_Habitat_1949to1988))
# Cores ----
# use this to supply core dates to the core table
date_ref <- stock %>% select(Batch, core_id) %>% distinct() %>%
mutate(core_date = case_when(Batch == "1" ~ "2/1/2015",
Batch == "2" ~ "7/1/2015"))
cores <- left_join(coreinfo, date_ref) %>%
mutate(core_year = year(as.Date(core_date, format = "%m/%d/%Y")),
core_month = month(as.Date(core_date, format = "%m/%d/%Y")),
core_day = day(as.Date(core_date, format = "%m/%d/%Y"))) %>%
mutate(core_length_flag = "core depth limited by length of corer") %>%
select(-core_date, -species, -Batch)
final_cores <- reorderColumns("cores", cores)
# Species
species <- coreinfo %>%
select(study_id, site_id, core_id, species) %>%
mutate(species = str_split(species, "/")) %>%
unnest(species) %>% mutate(species = trimws(species)) %>%
separate(species, into = c("genus", "species"), sep = " ")
# Methods ----
methods <- data.frame(study_id = id,
coring_method = "mcauley corer",
dry_bulk_density_flag = "freeze dried",
loss_on_ignition_temperature = 550,
loss_on_ignition_time = 14,
# fraction_carbon_type = "total carbon", # no fraction carbon in the tables but metadata says it was calculated
cs137_counting_method = "gamma",
pb210_counting_method = "gamma",
dry_bulk_density_sample_volume = pi*((5.1/2)^2)*2)
final_methods <- reorderColumns("methods", methods)
#### Study Citation ####
id_doi <- "10.5066/P93U3B3E"
data_bib <- GetBibEntryWithDOI(id_doi)
# Convert citations to dataframe
data_citation <- as.data.frame(data_bib) %>%
rownames_to_column("key") %>%
mutate(study_id = id) %>%
mutate(doi = tolower(doi),
bibliography_id = id,
key = id)
# # Curate biblio so ready to read out as a BibTex-style .bib file
study_citations <- data_citation %>%
# bind_rows(report_citation) %>%
mutate(publication_type = bibtype) %>%
select(study_id, bibliography_id, publication_type, key, bibtype, everything())
# Write .bib file
bib_file <- study_citations %>%
# slice(1) %>%
select(-study_id, -bibliography_id, -publication_type) %>%
# distinct() %>%
column_to_rownames("key")
WriteBib(as.BibEntry(bib_file), "data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021.bib")
## QA/QC ###############
leaflet(cores) %>%
addProviderTiles(providers$CartoDB) %>%
addCircleMarkers(lng = ~as.numeric(core_longitude), lat = ~as.numeric(core_latitude),
radius = 5, label = ~core_id)
# Make sure column names are formatted correctly:
test_colnames("cores", final_cores)
test_colnames("depthseries", final_depthseries)
test_colnames("methods", methods)
test_unique_cores(final_cores)
test_core_relationships(final_cores, final_depthseries)
## Write derivative data ####
# write_csv(sites, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_sites.csv")
write_csv(final_cores, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_cores.csv")
# write_csv(species, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_species.csv")
write_csv(final_methods, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_methods.csv")
write_csv(final_depthseries, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_depthseries.csv")
write_csv(study_citations, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_study_citations.csv")
| /scripts/0_data_hooks/Baustian_2021_hook.R | no_license | jfontestad/CCRCN-Data-Library | R | false | false | 10,439 | r | ## CCRCN Data Library Hook Script ####
# Data Release: Long-term soil carbon data and accretion from four marsh types in Mississippi River Delta in 2015
# Data published by Science Base: https://www.sciencebase.gov/catalog/item/5b3299d4e4b040769c159bb0
# Contact: Melissa Baustian
# Data Citation:
# Baustian, M.M., Stagg, C.L., Perry, C.L., Moss, L.C., Carruthers, T.J.B., Allison, M.A., and Hall, C.T., 2021,
# Long-term soil carbon data and accretion from four marsh types in Mississippi River Delta in 2015: U.S. Geological Survey data release,
# https://doi.org/10.5066/P93U3B3E.
## Prep Workspace ####
# load libraries
library(tidyverse)
library(lubridate)
library(RefManageR)
library(readxl)
library(leaflet)
# library(anytime)
source("./scripts/1_data_formatting/qa_functions.R")
# read in data
raw_depthseries <- read_csv("./data/primary_studies/Baustian_et_al_2021/intermediate/Baustian_Long_Term_Carbon_Soil.csv", na = ".")
raw_radio <- read_csv("./data/primary_studies/Baustian_et_al_2021/original/Baustian_Long_Term_Radionuclide.csv", na = ".")
crms_sites <- read_csv("./data/primary_studies/Baustian_et_al_2021/intermediate/CRMS_Long_Lat.csv")
raw_siteinfo <- read_xlsx("data/primary_studies/Baustian_et_al_2021/original/Baustian_WI_LongTerm_Soil_Core_Site Info.xlsx")
# raw_coreinfo <- read_csv("./data/primary_studies/Baustian_et_al_2021/intermediate/Baustian_Short Term Carbon_Soil Core Data.csv")
guidance <- read_csv("docs/ccrcn_database_structure.csv")
## Trim Data to Library ####
id <- "Baustian_et_al_2021"
# Reference Tables ----
cores_ref <- data.frame(salinity_class = c("fresh", "intermediate", "brackish", "saline"),
# Marsh_Type = c(1:4),
core_elevation = c(0.34, 0.13, 0.14, 0.14))
# map referenced for marsh type assignment: https://pubs.usgs.gov/sim/3290/pdf/sim3290.pdf
# depth_ref <- data.frame(Core_Increment = c(1,2,3,5),
# depth_min = c(0,2,4,8),
# depth_max = c(2,4,6,10))
# locations <- crms_sites %>%
# mutate(core_id = gsub("CRMS0", "", `CRMS Site`)) %>%
# mutate(core_id = gsub("CRMS", "", core_id))
coreinfo <- raw_siteinfo %>%
rename(species = `Target Species`,
core_latitude = Lat,
core_longitude = Long,
core_id = `CRMS Site ID`) %>%
mutate(study_id = id,
site_id = recode(Basin,
"BA" = "Barataria basin",
"TE" = "Terrebonne basin"),
salinity_class = tolower(`2014 Habitat Type`),
core_position_method = "RTK",
core_elevation_datum = "NAVD88",
vegetation_class = "emergent",
vegetation_method = "measurement",
salinity_method = "field observation") %>%
full_join(cores_ref) %>%
# Marsh type is defined as intermediate salinity (based on vegetation) but I'm reclassifying it to brackish
mutate(salinity_class = recode(salinity_class, "intermediate" = "brackish")) %>%
select(-c(Basin, `2014 Habitat Type`))
# core dates will have to be merged from depthseries
# coreinfo <- raw_coreinfo %>%
# select(Site, Basin, Marsh_Type) %>%
# distinct() %>%
# rename(core_id = Site) %>%
# mutate(study_id = id,
# site_id = recode(Basin,
# "BA" = "Barataria basin",
# "TE" = "Terrebonne basin"),
# core_position_method = "RTK",
# core_elevation_datum = "NAVD88",
# vegetation_class = "emergent",
# vegetation_method = "measurement",
# salinity_method = "field observation") %>%
# left_join(locations, by = "core_id") %>%
# full_join(cores_ref) %>%
# rename(core_latitude = Latitude,
# core_longitude = Longitude) %>%
# mutate(vegetation_notes = case_when(salinity_class == "fresh" ~ "dominated by Panicum hemitomon, Sagittaria lancifolia, Eleocharis baldwinii, or Cladium jamaicense",
# salinity_class == "intermediate" ~ "dominated by Leptochloa fusca, Panicum virgatum, Paspalum vaginatum, Phragmites australis, or Schoenoplectus americanus",
# salinity_class == "brackish" ~ "dominated by Spartina patens but occasionally by Spartina cynosuroides, Spartina spartinae, or Bolboschoenus robustus",
# salinity_class == "saline" ~ " dominated by Spartina alterniflora, Distichlis spicata, or Avicennia germinans.")) %>%
# select(-c(Basin, Marsh_Type, "CRMS Site"))
# create core site lookup
site_core <- coreinfo %>% select(site_id, core_id)
# Depthseries ----
stock <- raw_depthseries %>%
rename(core_id = Site,
dry_bulk_density = `Bulk Density (g/cm^3)`) %>%
mutate(study_id = id,
fraction_organic_matter = `Organic matter (percent)`/100,
increments = recode(`Core Increment (cm)`,
"14-Dec" = "12-14",
"12-Oct" = "10-12")) %>%
separate(col = increments, into = c("depth_min", "depth_max"), sep = "-") %>%
select(-c(`Core Increment (cm)`, `Moisture (percent)`, `Organic matter (percent)`))
# curate radionuclide data
radionuclides <- raw_radio %>%
drop_na(`CRMS Site`) %>%
rename("core_id" = "CRMS Site",
"core_date" = "Field Collection Date",
"cs137_activity" = "Cs-137 (dpm/g)",
"cs137_activity_se" = "Cs-137 - error (dpm/g)",
"total_pb210_activity" = "Total Pb-210 (dpm/g)",
"total_pb210_activity_se" = "Total Pb-210 - error (dpm/g)",
"excess_pb210_activity" = "Excess Pb-210 (dpm/g)",
"excess_pb210_activity_se" = "Excess Pb-210 - error (dpm/g)") %>%
separate(col = `Core Increment (cm)`, into = c("depth_min", "depth_max"), sep = "-") %>%
mutate(study_id = id,
core_id = as.character(core_id),
# depth_max = gsub(" cm", "", depth_max),
# depth_max = as.numeric(depth_max),
# depth_min = as.numeric(depth_min),
pb210_unit = "disintegrationsPerMinutePerGram",
cs137_unit = "disintegrationsPerMinutePerGram") %>%
left_join(core_batch) %>%
select(-`Mid-Depth (cm)`, -`Radionuclide Counted Date`, -core_date)
# there are 7 sites missing from the radionuclide table
unique(stock$core_id)[which(!(unique(stock$core_id) %in% unique(radionuclides$core_id)))]
# join depthseries info
depthseries <- full_join(stock, radionuclides) %>%
full_join(site_core) # merge site info
# create date ref for core table
# date_ref <- depthseries %>% select(site_id, core_id, core_date) %>% distinct() %>%
# drop_na(core_date) # drop NA dates
final_depthseries <- reorderColumns("depthseries", depthseries) %>%
select(-c(Batch, core_date, "2014_Habitat Type", Most_Freq_Occ_Habitat_1949to1988))
# Cores ----
# use this to supply core dates to the core table
date_ref <- stock %>% select(Batch, core_id) %>% distinct() %>%
mutate(core_date = case_when(Batch == "1" ~ "2/1/2015",
Batch == "2" ~ "7/1/2015"))
cores <- left_join(coreinfo, date_ref) %>%
mutate(core_year = year(as.Date(core_date, format = "%m/%d/%Y")),
core_month = month(as.Date(core_date, format = "%m/%d/%Y")),
core_day = day(as.Date(core_date, format = "%m/%d/%Y"))) %>%
mutate(core_length_flag = "core depth limited by length of corer") %>%
select(-core_date, -species, -Batch)
final_cores <- reorderColumns("cores", cores)
# Species
species <- coreinfo %>%
select(study_id, site_id, core_id, species) %>%
mutate(species = str_split(species, "/")) %>%
unnest(species) %>% mutate(species = trimws(species)) %>%
separate(species, into = c("genus", "species"), sep = " ")
# Methods ----
methods <- data.frame(study_id = id,
coring_method = "mcauley corer",
dry_bulk_density_flag = "freeze dried",
loss_on_ignition_temperature = 550,
loss_on_ignition_time = 14,
# fraction_carbon_type = "total carbon", # no fraction carbon in the tables but metadata says it was calculated
cs137_counting_method = "gamma",
pb210_counting_method = "gamma",
dry_bulk_density_sample_volume = pi*((5.1/2)^2)*2)
final_methods <- reorderColumns("methods", methods)
#### Study Citation ####
id_doi <- "10.5066/P93U3B3E"
data_bib <- GetBibEntryWithDOI(id_doi)
# Convert citations to dataframe
data_citation <- as.data.frame(data_bib) %>%
rownames_to_column("key") %>%
mutate(study_id = id) %>%
mutate(doi = tolower(doi),
bibliography_id = id,
key = id)
# # Curate biblio so ready to read out as a BibTex-style .bib file
study_citations <- data_citation %>%
# bind_rows(report_citation) %>%
mutate(publication_type = bibtype) %>%
select(study_id, bibliography_id, publication_type, key, bibtype, everything())
# Write .bib file
bib_file <- study_citations %>%
# slice(1) %>%
select(-study_id, -bibliography_id, -publication_type) %>%
# distinct() %>%
column_to_rownames("key")
WriteBib(as.BibEntry(bib_file), "data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021.bib")
## QA/QC ###############
leaflet(cores) %>%
addProviderTiles(providers$CartoDB) %>%
addCircleMarkers(lng = ~as.numeric(core_longitude), lat = ~as.numeric(core_latitude),
radius = 5, label = ~core_id)
# Make sure column names are formatted correctly:
test_colnames("cores", final_cores)
test_colnames("depthseries", final_depthseries)
test_colnames("methods", methods)
test_unique_cores(final_cores)
test_core_relationships(final_cores, final_depthseries)
## Write derivative data ####
# write_csv(sites, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_sites.csv")
write_csv(final_cores, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_cores.csv")
# write_csv(species, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_species.csv")
write_csv(final_methods, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_methods.csv")
write_csv(final_depthseries, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_depthseries.csv")
write_csv(study_citations, "./data/primary_studies/Baustian_et_al_2021/derivative/Baustian_et_al_2021_study_citations.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{CorStats}
\alias{CorStats}
\title{Calculate the correlation structure based statistics}
\usage{
CorStats(pCorD, discIndices, pCorT, testIndices)
}
\arguments{
\item{pCorD, pCorT}{SEXP containers for the pointers to the correlation
structure matrices for the \emph{discovery} and \emph{test} networks.}
\item{discIndices, testIndices}{indices of the network subset in
the \emph{discovery} and \emph{test} networks respectively.}
}
\value{
A vector containing:
\enumerate{
\item{\emph{cor.discovery}:}{
A flattened vector of the module's correlation structure in the
\emph{discovery} dataset.
}
\item{\emph{cor.test}:}{
A flattened vector of the module's correlation structure in the
\emph{test} dataset.
}
\item{\emph{corDensity}:}{
The mean sign-aware correlation structure density of the network module.
}
}
}
\description{
Both of the "concordance of correlation structure" and "density of
correlation structure" are calculated using all pairwise
correlation coefficients between nodesin both the \emph{discovery} and
\emph{test} datasets. For the other module preservation statistics we can
store components of each statistic calculated in each dataset separately,
and save time by only calculating them in the discovery dataset once.
This would have a substantial memory overhead for the correlation structure
based statistics, so we don't use this strategy.
}
\references{
\enumerate{
\item{
Langfelder, P., Luo, R., Oldham, M. C. & Horvath, S. \emph{Is my
network module preserved and reproducible?} PLoS Comput. Biol.
\strong{7}, e1001057 (2011).
}
}
}
| /man/CorStats-cpp.Rd | no_license | sauwai/NetRep | R | false | true | 1,755 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{CorStats}
\alias{CorStats}
\title{Calculate the correlation structure based statistics}
\usage{
CorStats(pCorD, discIndices, pCorT, testIndices)
}
\arguments{
\item{pCorD, pCorT}{SEXP containers for the pointers to the correlation
structure matrices for the \emph{discovery} and \emph{test} networks.}
\item{discIndices, testIndices}{indices of the network subset in
the \emph{discovery} and \emph{test} networks respectively.}
}
\value{
A vector containing:
\enumerate{
\item{\emph{cor.discovery}:}{
A flattened vector of the module's correlation structure in the
\emph{discovery} dataset.
}
\item{\emph{cor.test}:}{
A flattened vector of the module's correlation structure in the
\emph{test} dataset.
}
\item{\emph{corDensity}:}{
The mean sign-aware correlation structure density of the network module.
}
}
}
\description{
Both of the "concordance of correlation structure" and "density of
correlation structure" are calculated using all pairwise
correlation coefficients between nodesin both the \emph{discovery} and
\emph{test} datasets. For the other module preservation statistics we can
store components of each statistic calculated in each dataset separately,
and save time by only calculating them in the discovery dataset once.
This would have a substantial memory overhead for the correlation structure
based statistics, so we don't use this strategy.
}
\references{
\enumerate{
\item{
Langfelder, P., Luo, R., Oldham, M. C. & Horvath, S. \emph{Is my
network module preserved and reproducible?} PLoS Comput. Biol.
\strong{7}, e1001057 (2011).
}
}
}
|
#' @title Diagnostic function for b3lmeta object in jarbes
#'
#' @description This function performers an approximated Bayesian cross-validation for a b3lmeta object
#'
#' @param object The object generated by the function b3lmeta.
#' @param post.p.value.cut Posterior p-value cut point to assess outliers.
#' @param study.names Character vector containing names of the studies used.
#' @param size.forest Size of the center symbol mark in the forest-plot lines
#' @param lwd.forest Thickness of the lines in the forest-plot
#' @param shape.forest Type of symbol for the center mark in the forest-plot lines
#' @param ... \dots
#'
#'
#' @import ggplot2
#'
#' @export
diagnostic.b3lmeta = function(object,
post.p.value.cut = 0.05,
study.names = NULL,
size.forest = 0.4,
lwd.forest = 0.2,
shape.forest = 23,
...) {
x=y=ylo=yhi=NULL
# Data preparation
y.ghost = object$BUGSoutput$sims.list$y.ghost
g.m = apply(y.ghost, 2, median)
g.u = apply(y.ghost, 2, quantile, prob = 0.95)
g.l = apply(y.ghost, 2, quantile, prob = 0.025)
n.studies = length(g.m)
TE = object$data$TE
if (is.null(study.names)) {
study.names = 1:n.studies
}
# Posterior p-values to detect outliers...
p.vec = NULL
for(i in 1:n.studies)
{
p1 = sum(y.ghost[,i]<TE[i])/length(y.ghost[,i])
p2 = sum(y.ghost[,i]>TE[i])/length(y.ghost[,i])
p.val = min(p1, p2)
p.vec = c(p.vec, p.val)
}
p.col = ifelse(p.vec < post.p.value.cut, "red", "blue")
data.plot = data.frame(
x = study.names,
TE = TE,
g.m = g.m,
ylo = g.l,
yhi = g.u,
p.vec = p.vec,
p.col = p.col)
p = ggplot(data.plot, aes(x = x, y = TE,
ymin = ylo, ymax = yhi,
size = size.forest # Point size
)) +
geom_pointrange(colour = p.col,
lwd = lwd.forest, # Thickness of the lines
shape = shape.forest)+
coord_flip() +
xlab("Study") +
ylab("Posterior Predictive observation") +
ggtitle("Bayesian Cross-Valdiation") +
theme_bw()
return(p)
}
| /R/diagnostic.b3lmeta.R | no_license | cran/jarbes | R | false | false | 2,337 | r | #' @title Diagnostic function for b3lmeta object in jarbes
#'
#' @description This function performers an approximated Bayesian cross-validation for a b3lmeta object
#'
#' @param object The object generated by the function b3lmeta.
#' @param post.p.value.cut Posterior p-value cut point to assess outliers.
#' @param study.names Character vector containing names of the studies used.
#' @param size.forest Size of the center symbol mark in the forest-plot lines
#' @param lwd.forest Thickness of the lines in the forest-plot
#' @param shape.forest Type of symbol for the center mark in the forest-plot lines
#' @param ... \dots
#'
#'
#' @import ggplot2
#'
#' @export
diagnostic.b3lmeta = function(object,
post.p.value.cut = 0.05,
study.names = NULL,
size.forest = 0.4,
lwd.forest = 0.2,
shape.forest = 23,
...) {
x=y=ylo=yhi=NULL
# Data preparation
y.ghost = object$BUGSoutput$sims.list$y.ghost
g.m = apply(y.ghost, 2, median)
g.u = apply(y.ghost, 2, quantile, prob = 0.95)
g.l = apply(y.ghost, 2, quantile, prob = 0.025)
n.studies = length(g.m)
TE = object$data$TE
if (is.null(study.names)) {
study.names = 1:n.studies
}
# Posterior p-values to detect outliers...
p.vec = NULL
for(i in 1:n.studies)
{
p1 = sum(y.ghost[,i]<TE[i])/length(y.ghost[,i])
p2 = sum(y.ghost[,i]>TE[i])/length(y.ghost[,i])
p.val = min(p1, p2)
p.vec = c(p.vec, p.val)
}
p.col = ifelse(p.vec < post.p.value.cut, "red", "blue")
data.plot = data.frame(
x = study.names,
TE = TE,
g.m = g.m,
ylo = g.l,
yhi = g.u,
p.vec = p.vec,
p.col = p.col)
p = ggplot(data.plot, aes(x = x, y = TE,
ymin = ylo, ymax = yhi,
size = size.forest # Point size
)) +
geom_pointrange(colour = p.col,
lwd = lwd.forest, # Thickness of the lines
shape = shape.forest)+
coord_flip() +
xlab("Study") +
ylab("Posterior Predictive observation") +
ggtitle("Bayesian Cross-Valdiation") +
theme_bw()
return(p)
}
|
setwd("J:/INLAExamples")
library(RandomFields)
library(TMB)
library(INLA)
###coefficients
b0 <- 1.0
b1 <- 0.7
b2 <- 1.0
##Functions
#function to find which box number a point is in
#(boxes are numbered left to right, bottom to top, e.g. lowest row
#is 1,2,3,4 next is 5,6,7,8 etc.
#but how they're numbered doesn't really matter)
boxfind <- function(coord, box.x.length, box.y.length, box.y.n){
x.num <- max(1, ceiling(coord[1]/box.x.length))
y.num <- max(1, ceiling(coord[2]/box.y.length))
return(y.num + (x.num-1)*box.y.n)
}
#function to plot and colour pixels based on a vector of values
pixelplot <- function(values, total.x.size, total.y.size, x.n, y.n, main=NULL){
x.length <- total.x.size/x.n
y.length <- total.y.size/y.n
rbPal <- colorRampPalette(c('red','blue'))
col <- rbPal(10)[as.numeric(cut(values,breaks = 10))]
plot(x=NULL, y=NULL, xlim=range(0:total.x.size), ylim=range(0:total.y.size), main = main)
m <- 1
for(i in 1:x.n){
for(j in 1:y.n){
rect(xleft = (i-1)*x.length, ybottom = (j-1)*y.length, xright = i*x.length, ytop = j*y.length, density = NULL, angle = 45,
col = col[m], border = col[m], lty = par("lty"), lwd = par("lwd"))
m <- m+1
}
}
return()
}
###Set up
#TMB stuff
compile("TMBPoisson.cpp")
dyn.load(dynlib("TMBPoisson"))
compile("TMBPoissonNoPixels.cpp")
dyn.load(dynlib("TMBPoissonNoPixels"))
#choose size
total.x.size <- 70
total.y.size <- 70
#choose number of pixels
x.n <- 70
y.n <- 70
x.length <- total.x.size/x.n
y.length <- total.y.size/y.n
n.total <- x.n*y.n
#generate coordinates
x <- seq(0, total.x.size, length.out=x.n)
y <- seq(0, total.y.size, length.out=y.n)
m <- 1
coord <- array(0,c(n.total,2))
for(i in 1:x.n){
for(j in 1:y.n){
coord[m,] = c(x[i], y[j])
m <- m+1
}
}
#create coordinates for border
#(actually need just the corners, so border.n=1 and this is unecessary, it turns out...)
border.n <- 1
border <- array(0, c(4*(border.n+1), 2))
for(i in 1:(border.n + 1)){
border[i,] <- c(0, (i-1)*total.y.size/border.n)
border[i + (border.n+1), ] <- c(total.x.size, (i-1)*total.y.size/border.n)
border[i + 2*(border.n+1), ] <- c((i-1)*total.x.size/border.n, 0)
border[i + 3*(border.n+1), ] <- c((i-1)*total.x.size/border.n, total.y.size)
}
plot(border)
plot(coord)
###Generate fake data
#generate random field
a <- RMmatern(3)
as <- RFsimulate(a, x=coord[,1], y=coord[,2])
a.field <- as.matrix(as)
a2 <- RMmatern(4, scale=3, var=1)
as2 <- RFsimulate(a2, x=coord[,1], y=coord[,2])
cov <- as.matrix(as2)
a3 <- RMmatern(0.2, scale=5, var=6)
as3 <- RFsimulate(a3, x=coord[,1], y=coord[,2])
cov2 <- as.matrix(as3)
#visualise random field
pixelplot(c, total.x.size, total.y.size, x.n, y.n, main="Matern RF (Random Fields)")
plot(coord, cex = c, main = "Matern RF (Random Fields)")
#another way to generate random field
if(FALSE){
library(geoR)
d <- grf(n, grid = coord, xlims = c(0, 10), ylims = c(0, 10), nsim = 1, cov.model = "matern",
cov.pars = c(1,1),
kappa = 0.5, nugget = 0, lambda = 1.00,
mean = 0, RF=TRUE)
plot(coord, cex=d$data, main = "Matern RF (geoR)")
}
#generate covariate and responses
#cov <- rep(0, n.total)
response <- rep(0, n.total)
for(i in 1:n.total){
#cov[i] <- rnorm(1,coord[i,1]+coord[i,2], 20)/20
#cov[i] <- runif(1,0,3)
response[i] <- rpois(1, exp(b0 + b1*cov[i] + b2*cov2[i] + a.field[i]))
}
#visualise response
pixelplot(response, total.x.size, total.y.size, x.n, y.n, main="Response")
###Create spde object
mesh <- inla.mesh.2d(loc.domain = border, max.edge=c(3,2), offset=c(0.03, 0.5), cutoff=1, max.n = 500)
plot(mesh)
spde <- (inla.spde2.matern(mesh=mesh, alpha=2)$param.inla)[c("M0","M1","M2")]
idx <- mesh$idx$loc
n_s <- nrow(spde$M0)
x <- rep(0, n_s)
###Create "box" (i.e. aggregate) data
box.x.n <- 10
box.y.n <- 10
box.n.total <- box.x.n*box.y.n
box.x.length <- total.x.size/box.x.n
box.y.length <- total.y.size/box.y.n
box.cov <- rep(0, box.n.total)
box.response <- rep(0, box.n.total)
box.total <- rep(0, box.n.total)
box.index <- rep(c(), box.n.total)
coord.box.number <- rep(0, n.total)
#for each pixel, find which box it's in, then sum the pixel covariates and responses for each box
for(i in 1:n.total){
box.number <- boxfind(coord[i,], box.x.length, box.y.length, box.x.n)
coord.box.number[i] <- box.number
box.cov[box.number] <- box.cov[box.number] + cov[i]
box.response[box.number] <- box.response[box.number] + response[i]
box.total[box.number] <- box.total[box.number] + 1
}
ordered.coord <- array(0, c(n.total, 2))
ordered.index <- rep(0, n.total)
ordered.cov <- rep(0, n.total)
ordered.cov2 <- rep(0, n.total)
m <- 1
for(i in 1:box.n.total){
for(j in 1:n.total){
if(coord.box.number[j] == i){
ordered.coord[m, ] <- coord[j, ]
ordered.cov[m] <- cov[j]
ordered.cov2[m] <- cov2[j]
ordered.index[m] <- j
m <- m+1
}
}
}
ordered.index.inverse <- rep(0, n.total)
for(i in 1:n.total){
ordered.index.inverse[ordered.index[i]] = i
}
#get an average value for covariates and responses - not sure we actually want this
for(i in 1:box.n.total){
box.index[[i]] <- rep(0, box.total[i])
#box.cov[i] = box.cov[i]/box.total[i]
#box.response[i] = box.response[i]/box.total[i]
}
box.count <- rep(1, box.n.total)
for(i in 1:n.total){
box.number <- boxfind(coord[i,], box.x.length, box.y.length, box.y.n)
box.index[[box.number]][box.count[box.number]] = i
box.count[box.number] = box.count[box.number] + 1
}
#visualise aggregated data
pixelplot(box.cov, total.x.size, total.y.size, box.x.n, box.y.n, main="Aggregated (averaged) covariate")
pixelplot(box.response, total.x.size, total.y.size, box.x.n, box.y.n, main="Aggregated (averaged) response")
A <- inla.spde.make.A(mesh=mesh, loc=ordered.coord)
if(TRUE){
f <- MakeADFun(
data = list(X=box.response, cov=ordered.cov, cov2 = ordered.cov2, spde=spde, Apixel = A, box_total = box.total),
parameters = list(beta0=0, beta1=0, beta2=0, log_kappa=2.5, log_tau=0.0, x=runif(n_s,0,10)),
random="x",
DLL = "TMBPoisson"
)
#fit <- nlminb(f$par,f$fn,f$gr,lower=c(-10,-10,0,0))
fit.box <- nlminb(f$par,f$fn,f$gr)
if(FALSE){
f3 <- MakeADFun(
data = list(X=box.response, cov=cov, cov2 = cov2, spde=spde, Apixel = A, box_total = box.total),
parameters = list(beta0=0, beta1=0, beta2=0, log_kappa=2.5, log_tau=0.0, x=runif(n_s,0,10)),
random="x",
DLL = "TMBPoisson"
)
#fit <- nlminb(f$par,f$fn,f$gr,lower=c(-10,-10,0,0))
fit.box.wrong <- nlminb(f3$par,f3$fn,f3$gr)
}
A2 <- inla.spde.make.A(mesh=mesh, loc=coord)
f2 <- MakeADFun(
data = list(X=response, cov=cov, cov2 = cov2, spde=spde, Apixel = A2),
parameters = list(beta0=0, beta1=0, beta2=0, log_kappa=2.5, log_tau=0.0, x=runif(n_s,0,10)),
random="x",
DLL = "TMBPoissonNoPixels"
)
#fit <- nlminb(f$par,f$fn,f$gr,lower=c(-10,-10,0,0))
fit.points <- nlminb(f2$par,f2$fn,f2$gr)
#print(fit.box.wrong$par)
print(fit.box$par)
print(fit.points$par)
cat("beta0", b0, "beta1", b1, "beta2", b2,"\n")
}
if(FALSE){
if(FALSE){
compile("TMBExample.cpp")
dyn.load(dynlib("TMBExample"))
g <- MakeADFun(
data = list(x=response, cov=cov),
parameters = list(beta0=0, beta1=0, sigma=1),
DLL = "TMBExample"
)
fit2 <- nlminb(g$par,g$fn,g$gr,lower=c(-10,-10,0))
print(fit2)
}
pred <- rep(0, n.total)
beta0 <- fit$par[1]
beta1 <- fit$par[2]
error1 = 0
for(i in 1:n.total){
pred[i] <- rpois(1, exp(beta0 + beta1*cov[i]))
error1 = error1 + abs(pred[i] - response[i])
}
pixelplot(response, total.x.size, total.y.size, x.n, y.n, main="Response")
pixelplot(pred, total.x.size, total.y.size, x.n, y.n, main="Predicted Response")
####
field <- rep(0, n.total)
out <- sdreport(f, getJointPrecision = 1)
a = array(0, c(1035, 1))
for(i in 1:1035){
a[i] <- out$par.random[i]
}
error = 0
for(i in 1:n.total){
pred[i] <- exp(beta0 + beta1*cov[i] + (A%*%a)[ordered.index.inverse[i]])
field[i] <- (A%*%a)[ordered.index.inverse[i]]
error = error + abs(pred[i] - response[i])
}
pixelplot(response, total.x.size, total.y.size, x.n, y.n, main="Response")
pixelplot(pred, total.x.size, total.y.size, x.n, y.n, main="Predicted Response")
pixelplot(field, total.x.size, total.y.size, x.n, y.n, main="Field")
pixelplot(c, total.x.size, total.y.size, x.n, y.n, main="Field")
##confidence intervals...
b <- summary(out, "fixed")
beta0max <- b[1,1] + 2*b[1,2]
beta0min <- b[1,1] - 2*b[1,2]
beta1max <- b[2,1] + 2*b[2,2]
beta1min <- b[2,1] - 2*b[2,2]
count <- 0
predmax <- rep(0, n.total)
predmin <- rep(0, n.total)
for(i in 1:n.total){
predmax[i] <- exp(beta0max + beta1max*cov[i] + (A%*%a)[ordered.index.inverse[i]])
predmin[i] <- exp(beta0min + beta1min*cov[i] + (A%*%a)[ordered.index.inverse[i]])
if(response[i]<predmax[i] && response[i]>predmin[i]){
count <- count+1
}
}
}
| /TMBPoisson.R | no_license | PunamA/Spatial-Modelling-Examples | R | false | false | 8,846 | r | setwd("J:/INLAExamples")
library(RandomFields)
library(TMB)
library(INLA)
###coefficients
b0 <- 1.0
b1 <- 0.7
b2 <- 1.0
##Functions
#function to find which box number a point is in
#(boxes are numbered left to right, bottom to top, e.g. lowest row
#is 1,2,3,4 next is 5,6,7,8 etc.
#but how they're numbered doesn't really matter)
boxfind <- function(coord, box.x.length, box.y.length, box.y.n){
x.num <- max(1, ceiling(coord[1]/box.x.length))
y.num <- max(1, ceiling(coord[2]/box.y.length))
return(y.num + (x.num-1)*box.y.n)
}
#function to plot and colour pixels based on a vector of values
pixelplot <- function(values, total.x.size, total.y.size, x.n, y.n, main=NULL){
x.length <- total.x.size/x.n
y.length <- total.y.size/y.n
rbPal <- colorRampPalette(c('red','blue'))
col <- rbPal(10)[as.numeric(cut(values,breaks = 10))]
plot(x=NULL, y=NULL, xlim=range(0:total.x.size), ylim=range(0:total.y.size), main = main)
m <- 1
for(i in 1:x.n){
for(j in 1:y.n){
rect(xleft = (i-1)*x.length, ybottom = (j-1)*y.length, xright = i*x.length, ytop = j*y.length, density = NULL, angle = 45,
col = col[m], border = col[m], lty = par("lty"), lwd = par("lwd"))
m <- m+1
}
}
return()
}
###Set up
#TMB stuff
compile("TMBPoisson.cpp")
dyn.load(dynlib("TMBPoisson"))
compile("TMBPoissonNoPixels.cpp")
dyn.load(dynlib("TMBPoissonNoPixels"))
#choose size
total.x.size <- 70
total.y.size <- 70
#choose number of pixels
x.n <- 70
y.n <- 70
x.length <- total.x.size/x.n
y.length <- total.y.size/y.n
n.total <- x.n*y.n
#generate coordinates
x <- seq(0, total.x.size, length.out=x.n)
y <- seq(0, total.y.size, length.out=y.n)
m <- 1
coord <- array(0,c(n.total,2))
for(i in 1:x.n){
for(j in 1:y.n){
coord[m,] = c(x[i], y[j])
m <- m+1
}
}
#create coordinates for border
#(actually need just the corners, so border.n=1 and this is unecessary, it turns out...)
border.n <- 1
border <- array(0, c(4*(border.n+1), 2))
for(i in 1:(border.n + 1)){
border[i,] <- c(0, (i-1)*total.y.size/border.n)
border[i + (border.n+1), ] <- c(total.x.size, (i-1)*total.y.size/border.n)
border[i + 2*(border.n+1), ] <- c((i-1)*total.x.size/border.n, 0)
border[i + 3*(border.n+1), ] <- c((i-1)*total.x.size/border.n, total.y.size)
}
plot(border)
plot(coord)
###Generate fake data
#generate random field
a <- RMmatern(3)
as <- RFsimulate(a, x=coord[,1], y=coord[,2])
a.field <- as.matrix(as)
a2 <- RMmatern(4, scale=3, var=1)
as2 <- RFsimulate(a2, x=coord[,1], y=coord[,2])
cov <- as.matrix(as2)
a3 <- RMmatern(0.2, scale=5, var=6)
as3 <- RFsimulate(a3, x=coord[,1], y=coord[,2])
cov2 <- as.matrix(as3)
#visualise random field
pixelplot(c, total.x.size, total.y.size, x.n, y.n, main="Matern RF (Random Fields)")
plot(coord, cex = c, main = "Matern RF (Random Fields)")
#another way to generate random field
if(FALSE){
library(geoR)
d <- grf(n, grid = coord, xlims = c(0, 10), ylims = c(0, 10), nsim = 1, cov.model = "matern",
cov.pars = c(1,1),
kappa = 0.5, nugget = 0, lambda = 1.00,
mean = 0, RF=TRUE)
plot(coord, cex=d$data, main = "Matern RF (geoR)")
}
#generate covariate and responses
#cov <- rep(0, n.total)
response <- rep(0, n.total)
for(i in 1:n.total){
#cov[i] <- rnorm(1,coord[i,1]+coord[i,2], 20)/20
#cov[i] <- runif(1,0,3)
response[i] <- rpois(1, exp(b0 + b1*cov[i] + b2*cov2[i] + a.field[i]))
}
#visualise response
pixelplot(response, total.x.size, total.y.size, x.n, y.n, main="Response")
###Create spde object
mesh <- inla.mesh.2d(loc.domain = border, max.edge=c(3,2), offset=c(0.03, 0.5), cutoff=1, max.n = 500)
plot(mesh)
spde <- (inla.spde2.matern(mesh=mesh, alpha=2)$param.inla)[c("M0","M1","M2")]
idx <- mesh$idx$loc
n_s <- nrow(spde$M0)
x <- rep(0, n_s)
###Create "box" (i.e. aggregate) data
box.x.n <- 10
box.y.n <- 10
box.n.total <- box.x.n*box.y.n
box.x.length <- total.x.size/box.x.n
box.y.length <- total.y.size/box.y.n
box.cov <- rep(0, box.n.total)
box.response <- rep(0, box.n.total)
box.total <- rep(0, box.n.total)
box.index <- rep(c(), box.n.total)
coord.box.number <- rep(0, n.total)
#for each pixel, find which box it's in, then sum the pixel covariates and responses for each box
for(i in 1:n.total){
box.number <- boxfind(coord[i,], box.x.length, box.y.length, box.x.n)
coord.box.number[i] <- box.number
box.cov[box.number] <- box.cov[box.number] + cov[i]
box.response[box.number] <- box.response[box.number] + response[i]
box.total[box.number] <- box.total[box.number] + 1
}
ordered.coord <- array(0, c(n.total, 2))
ordered.index <- rep(0, n.total)
ordered.cov <- rep(0, n.total)
ordered.cov2 <- rep(0, n.total)
m <- 1
for(i in 1:box.n.total){
for(j in 1:n.total){
if(coord.box.number[j] == i){
ordered.coord[m, ] <- coord[j, ]
ordered.cov[m] <- cov[j]
ordered.cov2[m] <- cov2[j]
ordered.index[m] <- j
m <- m+1
}
}
}
ordered.index.inverse <- rep(0, n.total)
for(i in 1:n.total){
ordered.index.inverse[ordered.index[i]] = i
}
#get an average value for covariates and responses - not sure we actually want this
for(i in 1:box.n.total){
box.index[[i]] <- rep(0, box.total[i])
#box.cov[i] = box.cov[i]/box.total[i]
#box.response[i] = box.response[i]/box.total[i]
}
box.count <- rep(1, box.n.total)
for(i in 1:n.total){
box.number <- boxfind(coord[i,], box.x.length, box.y.length, box.y.n)
box.index[[box.number]][box.count[box.number]] = i
box.count[box.number] = box.count[box.number] + 1
}
#visualise aggregated data
pixelplot(box.cov, total.x.size, total.y.size, box.x.n, box.y.n, main="Aggregated (averaged) covariate")
pixelplot(box.response, total.x.size, total.y.size, box.x.n, box.y.n, main="Aggregated (averaged) response")
A <- inla.spde.make.A(mesh=mesh, loc=ordered.coord)
if(TRUE){
f <- MakeADFun(
data = list(X=box.response, cov=ordered.cov, cov2 = ordered.cov2, spde=spde, Apixel = A, box_total = box.total),
parameters = list(beta0=0, beta1=0, beta2=0, log_kappa=2.5, log_tau=0.0, x=runif(n_s,0,10)),
random="x",
DLL = "TMBPoisson"
)
#fit <- nlminb(f$par,f$fn,f$gr,lower=c(-10,-10,0,0))
fit.box <- nlminb(f$par,f$fn,f$gr)
if(FALSE){
f3 <- MakeADFun(
data = list(X=box.response, cov=cov, cov2 = cov2, spde=spde, Apixel = A, box_total = box.total),
parameters = list(beta0=0, beta1=0, beta2=0, log_kappa=2.5, log_tau=0.0, x=runif(n_s,0,10)),
random="x",
DLL = "TMBPoisson"
)
#fit <- nlminb(f$par,f$fn,f$gr,lower=c(-10,-10,0,0))
fit.box.wrong <- nlminb(f3$par,f3$fn,f3$gr)
}
A2 <- inla.spde.make.A(mesh=mesh, loc=coord)
f2 <- MakeADFun(
data = list(X=response, cov=cov, cov2 = cov2, spde=spde, Apixel = A2),
parameters = list(beta0=0, beta1=0, beta2=0, log_kappa=2.5, log_tau=0.0, x=runif(n_s,0,10)),
random="x",
DLL = "TMBPoissonNoPixels"
)
#fit <- nlminb(f$par,f$fn,f$gr,lower=c(-10,-10,0,0))
fit.points <- nlminb(f2$par,f2$fn,f2$gr)
#print(fit.box.wrong$par)
print(fit.box$par)
print(fit.points$par)
cat("beta0", b0, "beta1", b1, "beta2", b2,"\n")
}
if(FALSE){
if(FALSE){
compile("TMBExample.cpp")
dyn.load(dynlib("TMBExample"))
g <- MakeADFun(
data = list(x=response, cov=cov),
parameters = list(beta0=0, beta1=0, sigma=1),
DLL = "TMBExample"
)
fit2 <- nlminb(g$par,g$fn,g$gr,lower=c(-10,-10,0))
print(fit2)
}
pred <- rep(0, n.total)
beta0 <- fit$par[1]
beta1 <- fit$par[2]
error1 = 0
for(i in 1:n.total){
pred[i] <- rpois(1, exp(beta0 + beta1*cov[i]))
error1 = error1 + abs(pred[i] - response[i])
}
pixelplot(response, total.x.size, total.y.size, x.n, y.n, main="Response")
pixelplot(pred, total.x.size, total.y.size, x.n, y.n, main="Predicted Response")
####
field <- rep(0, n.total)
out <- sdreport(f, getJointPrecision = 1)
a = array(0, c(1035, 1))
for(i in 1:1035){
a[i] <- out$par.random[i]
}
error = 0
for(i in 1:n.total){
pred[i] <- exp(beta0 + beta1*cov[i] + (A%*%a)[ordered.index.inverse[i]])
field[i] <- (A%*%a)[ordered.index.inverse[i]]
error = error + abs(pred[i] - response[i])
}
pixelplot(response, total.x.size, total.y.size, x.n, y.n, main="Response")
pixelplot(pred, total.x.size, total.y.size, x.n, y.n, main="Predicted Response")
pixelplot(field, total.x.size, total.y.size, x.n, y.n, main="Field")
pixelplot(c, total.x.size, total.y.size, x.n, y.n, main="Field")
##confidence intervals...
b <- summary(out, "fixed")
beta0max <- b[1,1] + 2*b[1,2]
beta0min <- b[1,1] - 2*b[1,2]
beta1max <- b[2,1] + 2*b[2,2]
beta1min <- b[2,1] - 2*b[2,2]
count <- 0
predmax <- rep(0, n.total)
predmin <- rep(0, n.total)
for(i in 1:n.total){
predmax[i] <- exp(beta0max + beta1max*cov[i] + (A%*%a)[ordered.index.inverse[i]])
predmin[i] <- exp(beta0min + beta1min*cov[i] + (A%*%a)[ordered.index.inverse[i]])
if(response[i]<predmax[i] && response[i]>predmin[i]){
count <- count+1
}
}
}
|
library(tidyverse)
library(survey)
server_weight_calculator <- function(input, output, session){
get_raw_data <- reactive({
inFile <- input$file_wt
if (is.null(inFile))
return(NULL)
dat <- read.csv(inFile$datapath, header = TRUE, stringsAsFactors = FALSE)
vars <- names(dat)
for(name in c("samp_prob_wt", "strata_wt","locations_wt","day_wt", "time_wt", "num_observed_wt"))
updateSelectizeInput(session, name, choices = c("Choose Variable"="",vars))
dat
})
get_sample_weights <- reactive({
dat <- get_raw_data()
if(is.null(dat))
return (NULL)
if(is.null(input$samp_prob_wt) || input$samp_prob_wt == ""){
showNotification("Please select a variable for the venue-time sampling probability")
return(NULL)
}
if(is.null(input$locations_wt) || input$locations_wt == ""){
showNotification("Please select a variable for the venue")
return(NULL)
}
if(is.null(input$time_wt) || input$time_wt == ""){
showNotification("Please select a variable for the time of day")
return(NULL)
}
if(is.null(input$day_wt) || input$day_wt == ""){
showNotification("Please select a variable for the day of the week")
return(NULL)
}
if(is.null(input$num_observed_wt) || input$num_observed_wt == ""){
showNotification("Please select a variable for the numbr of individuals observed at the location during the time of sampling")
return(NULL)
}
strata <- if(is.null(input$strata_wt) || input$strata_wt == "") "_" else as.character(dat[[input$strata_wt]])
df <- data.frame(
location = as.character(dat[[input$locations_wt]]),
day_of_week = as.character(dat[[input$day_wt]]),
time_of_day = as.character(dat[[input$time_wt]]),
sampling_strata = strata,
selection_probability = as.numeric(dat[[input$samp_prob_wt]]),
subjects_observed = as.numeric(dat[[input$num_observed_wt]])
)
tmp <- df %>% group_by(location, day_of_week, time_of_day) %>% summarise(n_sampled=n())
df <- merge(df,tmp)
df$analysis_weights <- df$subjects_observed / (df$selection_probability * df$n_sampled)
df$obs_id <- 1:nrow(df)
df$cluster_id <- paste0(df$location,df$day_of_week,df$time_of_day,sep="_")
dsn <- svydesign(id=~cluster_id + obs_id, weights=~analysis_weights, data=df, strata = ~sampling_strata)
rep_dsn <- as.svrepdesign(dsn,type="bootstrap", replicates=input$n_rep_wts)
rep_wts <- weights(rep_dsn)
rep_wts <- as.data.frame(sweep(rep_wts, 1, df$analysis_weights, FUN = "*"))
names(rep_wts) <- paste0("rep_weights_", 1:ncol(rep_wts))
#df <- cbind(df,rep_wts)
dat_wt <- cbind(dat, df[c("cluster_id","obs_id","analysis_weights")], rep_wts)
dat_wt
})
output$table_wt <- renderTable({
if(is.null(get_raw_data()))
return(NULL)
get_raw_data()
})
output$result_table_wt <- renderTable({
if(is.null(get_raw_data()))
return(NULL)
get_sample_weights()
})
output$download_wt <- downloadHandler(
filename = function() {
paste('tls-data-with-weights-', Sys.Date(), '.csv', sep='')
},
content = function(con) {
dat <- get_sample_weights()
if(is.null(dat))
return()
write.csv(dat, con, row.names = FALSE)
}
)
}
| /inst/apps/tls_app/server-weight-calculator.R | no_license | fellstat/shinytls | R | false | false | 3,299 | r | library(tidyverse)
library(survey)
server_weight_calculator <- function(input, output, session){
get_raw_data <- reactive({
inFile <- input$file_wt
if (is.null(inFile))
return(NULL)
dat <- read.csv(inFile$datapath, header = TRUE, stringsAsFactors = FALSE)
vars <- names(dat)
for(name in c("samp_prob_wt", "strata_wt","locations_wt","day_wt", "time_wt", "num_observed_wt"))
updateSelectizeInput(session, name, choices = c("Choose Variable"="",vars))
dat
})
get_sample_weights <- reactive({
dat <- get_raw_data()
if(is.null(dat))
return (NULL)
if(is.null(input$samp_prob_wt) || input$samp_prob_wt == ""){
showNotification("Please select a variable for the venue-time sampling probability")
return(NULL)
}
if(is.null(input$locations_wt) || input$locations_wt == ""){
showNotification("Please select a variable for the venue")
return(NULL)
}
if(is.null(input$time_wt) || input$time_wt == ""){
showNotification("Please select a variable for the time of day")
return(NULL)
}
if(is.null(input$day_wt) || input$day_wt == ""){
showNotification("Please select a variable for the day of the week")
return(NULL)
}
if(is.null(input$num_observed_wt) || input$num_observed_wt == ""){
showNotification("Please select a variable for the numbr of individuals observed at the location during the time of sampling")
return(NULL)
}
strata <- if(is.null(input$strata_wt) || input$strata_wt == "") "_" else as.character(dat[[input$strata_wt]])
df <- data.frame(
location = as.character(dat[[input$locations_wt]]),
day_of_week = as.character(dat[[input$day_wt]]),
time_of_day = as.character(dat[[input$time_wt]]),
sampling_strata = strata,
selection_probability = as.numeric(dat[[input$samp_prob_wt]]),
subjects_observed = as.numeric(dat[[input$num_observed_wt]])
)
tmp <- df %>% group_by(location, day_of_week, time_of_day) %>% summarise(n_sampled=n())
df <- merge(df,tmp)
df$analysis_weights <- df$subjects_observed / (df$selection_probability * df$n_sampled)
df$obs_id <- 1:nrow(df)
df$cluster_id <- paste0(df$location,df$day_of_week,df$time_of_day,sep="_")
dsn <- svydesign(id=~cluster_id + obs_id, weights=~analysis_weights, data=df, strata = ~sampling_strata)
rep_dsn <- as.svrepdesign(dsn,type="bootstrap", replicates=input$n_rep_wts)
rep_wts <- weights(rep_dsn)
rep_wts <- as.data.frame(sweep(rep_wts, 1, df$analysis_weights, FUN = "*"))
names(rep_wts) <- paste0("rep_weights_", 1:ncol(rep_wts))
#df <- cbind(df,rep_wts)
dat_wt <- cbind(dat, df[c("cluster_id","obs_id","analysis_weights")], rep_wts)
dat_wt
})
output$table_wt <- renderTable({
if(is.null(get_raw_data()))
return(NULL)
get_raw_data()
})
output$result_table_wt <- renderTable({
if(is.null(get_raw_data()))
return(NULL)
get_sample_weights()
})
output$download_wt <- downloadHandler(
filename = function() {
paste('tls-data-with-weights-', Sys.Date(), '.csv', sep='')
},
content = function(con) {
dat <- get_sample_weights()
if(is.null(dat))
return()
write.csv(dat, con, row.names = FALSE)
}
)
}
|
##
## runit-regression.R - unit tests to avoid regressions
##
test.override_rulebase_1 <- function() {
rb <- rulebase()
checkIdentical(quote(a*a),
simplify(quote(a*a), rulebase=rb))
}
test.override_rulebase_2 <- function() {
rb <- rulebase(rule(log(a*b), log(a) + log(b)))
checkIdentical(quote(log(a) + log(b)),
simplify(quote(log(a*b)), rulebase=rb))
}
test.simplify_function <- function() {
f1 <- function() { x * x + 0 }
f2 <- function() { x^2 }
checkIdentical(f2, simplify(f1))
}
| /inst/unittests/runit-regression.R | no_license | perNyfelt/rrules | R | false | false | 540 | r | ##
## runit-regression.R - unit tests to avoid regressions
##
test.override_rulebase_1 <- function() {
rb <- rulebase()
checkIdentical(quote(a*a),
simplify(quote(a*a), rulebase=rb))
}
test.override_rulebase_2 <- function() {
rb <- rulebase(rule(log(a*b), log(a) + log(b)))
checkIdentical(quote(log(a) + log(b)),
simplify(quote(log(a*b)), rulebase=rb))
}
test.simplify_function <- function() {
f1 <- function() { x * x + 0 }
f2 <- function() { x^2 }
checkIdentical(f2, simplify(f1))
}
|
#'Regularized Wasserstein Barycenters
#'
#' \code{WaBarycenter} takes in a list of matrices representing joint measures on the row and column space and outputs the
#'corresponding Barycenter.
#'The list has to consist of matrices having all the same dimensions, for instance, each matrix represents the normalized weights of the corresponding pixels of images.
#'@author Marcel Klatt
#'@param images A list of matrices satisfying the prerequisites described above.
#'@param maxIter Maximum number of iterations.
#'@param lambda Non-negative regularization parameter (for large lambda the regularized Barycenter is close to its true counterpart). If FALSE the algorithm uses a lambda depending on \code{costm}.
#'@param costm A matrix of pairwise distances between the locations. If FALSE the algorithm uses the usual euclidean distance matrix on a [0,1]x[0,1] equidistant pixel grid.
#'@return The Barycenter of the matrices, represented by a \eqn{n x m} matrix.
#'
#'Given the MNIST dataset, a Barycenter of the digit three is shown below. The Barycenter is based on 4351 images each represented by
#'a 28 x 28 pixel grid, respectively. The values for \code{lambda} and \code{maxIter} were set by default. The dataset is also available in this package (c.f. \link{three}).
#'
#'\figure{threeMNIST.png}{test}
#'@references Cuturi, M.: \code{Fast Computation of Wasserstein Barycenters}, Proceedings of the International Conference on Machine Learning, Beijing, China, 2014
#'@examples #Computation of a Barycenter based on five images representing the digit eight, respectively.
#'WaBarycenter(eight,lambda=10)
#'#For a more reasonable but longer computation!
#'\dontrun{WaBarycenter(eight)}
WaBarycenter <- function(images, maxIter = 10, lambda = FALSE, costm = FALSE){
time <- proc.time() #to analyze the computation time
#Check if the specific inputs are correct
if(is.list(images) == FALSE){
stop("The images have to be passed as a list each entry representing a matrix!")
}
if(length(unique(lapply(images,dim))) == 1){
dimension <- dim(images[[1]])
}
else{
stop("Dimensions of the images are not equal!")
}
if(is.matrix(costm) == FALSE){
#create a grid of the same dimension as the images on [0,1]² and create the cost matrix
n <- dimension[1]*dimension[2]
coord1 <- seq(0,1,length.out = dimension[2])
coord2 <- rev(seq(0,1,length.out = dimension[1]))
coordinates <- expand.grid(coord1,coord2)
costm <- as.matrix(dist(coordinates, diag=TRUE, upper=TRUE))
}
else{
n <- dimension[1]*dimension[2]
if(identical(dim(costm),rep(n,2)) == FALSE){
print(costm)
stop("Dimension of the cost matrix is not compatible with the given images!")
}
}
if(lambda == FALSE){
lambda <- 60/median(costm)
}
########### Starting the main algorithm ##########
#initialize a_tild and a_hat
a_tild <- rep(1/n,n)
a_hat <- rep(1/n,n)
t_0 <- 2
t <- t_0
#images <- lapply(images,t)
#iteration using Sinkhorn_Distance (Algorithm 1)
for(i in 1:maxIter){
beta <- (t+1)/2
a <- (1-1/beta) * a_hat + (1/beta) * a_tild
#Form subgradient with Sinkhorn's Algorithm
ALPHA <- 0
for(j in 1:length(images)){
#This step is based on RcppArmadillo. (Algorithm 3)
ALPHA <- Subgradient(a,t(images[[j]]),costm,lambda) + ALPHA #Note, that we need to transpose each matrix, to be compatible with the coordinates defined above.
}
# ALPHA <- (1/length(images)) * Reduce("+",lapply(lapply(images,t), Subgradient,a=a,M=costm,lambda))
ALPHA <- (1/length(images)) * ALPHA
a_tild <- a_tild*exp(-(t_0)*beta*ALPHA)
a_tild <- a_tild/sum(a_tild)
a_hat <- (1-1/beta)*a_hat + (1/beta) * a_tild
t <- t+1
}
if(length(unique(dimension)) == 1){
#Transforming Barycenter s.t. function "image" returns the correct orientation.
a.temp <- matrix(a,dimension[1],dimension[2],byrow=TRUE)
a.temp <- a.temp[,nrow(a.temp):1]
#image(a.temp)
}
#Computation time
print(proc.time()-time)
#Return the Barycenter
#result <- matrix(a,dimension[1],dimension[2],byrow=TRUE)
return(a.temp)
}
| /R/Barycenter.R | no_license | cran/Barycenter | R | false | false | 4,178 | r | #'Regularized Wasserstein Barycenters
#'
#' \code{WaBarycenter} takes in a list of matrices representing joint measures on the row and column space and outputs the
#'corresponding Barycenter.
#'The list has to consist of matrices having all the same dimensions, for instance, each matrix represents the normalized weights of the corresponding pixels of images.
#'@author Marcel Klatt
#'@param images A list of matrices satisfying the prerequisites described above.
#'@param maxIter Maximum number of iterations.
#'@param lambda Non-negative regularization parameter (for large lambda the regularized Barycenter is close to its true counterpart). If FALSE the algorithm uses a lambda depending on \code{costm}.
#'@param costm A matrix of pairwise distances between the locations. If FALSE the algorithm uses the usual euclidean distance matrix on a [0,1]x[0,1] equidistant pixel grid.
#'@return The Barycenter of the matrices, represented by a \eqn{n x m} matrix.
#'
#'Given the MNIST dataset, a Barycenter of the digit three is shown below. The Barycenter is based on 4351 images each represented by
#'a 28 x 28 pixel grid, respectively. The values for \code{lambda} and \code{maxIter} were set by default. The dataset is also available in this package (c.f. \link{three}).
#'
#'\figure{threeMNIST.png}{test}
#'@references Cuturi, M.: \code{Fast Computation of Wasserstein Barycenters}, Proceedings of the International Conference on Machine Learning, Beijing, China, 2014
#'@examples #Computation of a Barycenter based on five images representing the digit eight, respectively.
#'WaBarycenter(eight,lambda=10)
#'#For a more reasonable but longer computation!
#'\dontrun{WaBarycenter(eight)}
WaBarycenter <- function(images, maxIter = 10, lambda = FALSE, costm = FALSE){
time <- proc.time() #to analyze the computation time
#Check if the specific inputs are correct
if(is.list(images) == FALSE){
stop("The images have to be passed as a list each entry representing a matrix!")
}
if(length(unique(lapply(images,dim))) == 1){
dimension <- dim(images[[1]])
}
else{
stop("Dimensions of the images are not equal!")
}
if(is.matrix(costm) == FALSE){
#create a grid of the same dimension as the images on [0,1]² and create the cost matrix
n <- dimension[1]*dimension[2]
coord1 <- seq(0,1,length.out = dimension[2])
coord2 <- rev(seq(0,1,length.out = dimension[1]))
coordinates <- expand.grid(coord1,coord2)
costm <- as.matrix(dist(coordinates, diag=TRUE, upper=TRUE))
}
else{
n <- dimension[1]*dimension[2]
if(identical(dim(costm),rep(n,2)) == FALSE){
print(costm)
stop("Dimension of the cost matrix is not compatible with the given images!")
}
}
if(lambda == FALSE){
lambda <- 60/median(costm)
}
########### Starting the main algorithm ##########
#initialize a_tild and a_hat
a_tild <- rep(1/n,n)
a_hat <- rep(1/n,n)
t_0 <- 2
t <- t_0
#images <- lapply(images,t)
#iteration using Sinkhorn_Distance (Algorithm 1)
for(i in 1:maxIter){
beta <- (t+1)/2
a <- (1-1/beta) * a_hat + (1/beta) * a_tild
#Form subgradient with Sinkhorn's Algorithm
ALPHA <- 0
for(j in 1:length(images)){
#This step is based on RcppArmadillo. (Algorithm 3)
ALPHA <- Subgradient(a,t(images[[j]]),costm,lambda) + ALPHA #Note, that we need to transpose each matrix, to be compatible with the coordinates defined above.
}
# ALPHA <- (1/length(images)) * Reduce("+",lapply(lapply(images,t), Subgradient,a=a,M=costm,lambda))
ALPHA <- (1/length(images)) * ALPHA
a_tild <- a_tild*exp(-(t_0)*beta*ALPHA)
a_tild <- a_tild/sum(a_tild)
a_hat <- (1-1/beta)*a_hat + (1/beta) * a_tild
t <- t+1
}
if(length(unique(dimension)) == 1){
#Transforming Barycenter s.t. function "image" returns the correct orientation.
a.temp <- matrix(a,dimension[1],dimension[2],byrow=TRUE)
a.temp <- a.temp[,nrow(a.temp):1]
#image(a.temp)
}
#Computation time
print(proc.time()-time)
#Return the Barycenter
#result <- matrix(a,dimension[1],dimension[2],byrow=TRUE)
return(a.temp)
}
|
## Set Working directory to my directory:
## setwd("C:/Users/jalil/Downloads/Data Science/04 Explore/Course Project 1")
## load all dataset
data_all <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_all$Date <- as.Date(data_all$Date, format="%d/%m/%Y")
## Subset data
data <- subset(data_all, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## unload data from memory
rm(data_all)
## format dates
data$Datetime <- as.POSIXct(paste( as.Date (data$Date), data$Time ))
## Ploting
plot(data$Global_active_power~data$Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
## Saving to file
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
| /plot2.R | no_license | Aljaziri/ExData_Plotting1 | R | false | false | 808 | r | ## Set Working directory to my directory:
## setwd("C:/Users/jalil/Downloads/Data Science/04 Explore/Course Project 1")
## load all dataset
data_all <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_all$Date <- as.Date(data_all$Date, format="%d/%m/%Y")
## Subset data
data <- subset(data_all, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## unload data from memory
rm(data_all)
## format dates
data$Datetime <- as.POSIXct(paste( as.Date (data$Date), data$Time ))
## Ploting
plot(data$Global_active_power~data$Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
## Saving to file
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
#' Generate simulated data using a linear regression model
#'
#' @details Simulate a standard linear regression model
#'
#' @param n Number of observations
#' @param beta True values of \code{beta}.
#' @param beta_cov True posterior covariance of parameters
#' @param sample_size_independent_cov Should the posterior covariance be the same for any N? Default is \code{FALSE}.
#' @param sigma The noise standard deviation
#' @param digits The number of digits used in the dataset
#'
simulate_data_linear_regression <-
function(n,
beta,
beta_cov = diag(length(beta)),
sigma = 1,
sample_size_independent_cov = FALSE,
digits = 5){
checkmate::assert_int(n, lower = 1)
checkmate::assert_numeric(beta, min.len = 1)
checkmate::assert_matrix(beta_cov, nrows = length(beta), ncols = length(beta))
checkmate::assert_number(sigma, lower = .Machine$double.eps)
checkmate::assert_flag(sample_size_independent_cov)
checkmate::assert_int(digits)
D <- length(beta)
sig <- solve(beta_cov)
if(sample_size_independent_cov) sig <- sig/n
x <- mvtnorm::rmvnorm(n = n, mean = rep(0, D), sigma = sig)
x <- round(x, digits = digits)
if(length(beta) == 1) beta <- as.matrix(rep(beta, D))
checkmate::assert_numeric(beta, len = D)
y <- as.vector(x%*%beta + rnorm(n, sd = sigma))
y <- round(y, digits = digits)
list(X = x, D = as.integer(D), N = as.integer(n), y = y)
}
set.seed(4711)
sblrD5n50I <- simulate_data_linear_regression(n = 50, beta = rep(1, 5), digits = 3)
writeLines(jsonlite::toJSON(sblrD5n50I, pretty = TRUE, auto_unbox = TRUE), con = "sblrD5n50I.json")
zip(zipfile = "sblrD5n50I.json.zip", files = "sblrD5n50I.json")
set.seed(4712)
sblrD5n500I <- simulate_data_linear_regression(n = 500, beta = rep(1, 5), digits = 3)
writeLines(jsonlite::toJSON(sblrD5n500I, pretty = TRUE, auto_unbox = TRUE), con = "sblrD5n500I.json")
zip(zipfile = "sblrD5n500I.json.zip", files = "sblrD5n500I.json")
set.seed(4713)
sblrD5n50C07 <- simulate_data_linear_regression(n = 50, beta = rep(1, 5), beta_cov = 0.3 * diag(5) + 0.7, digits = 3)
writeLines(jsonlite::toJSON(sblrD5n50C07, pretty = TRUE, auto_unbox = TRUE), con = "sblrD5n50C07.json")
zip(zipfile = "sblrD5n50C07.json.zip", files = "sblrD5n50C07.json")
set.seed(4714)
sblrD5n500C07 <- simulate_data_linear_regression(n = 500, beta = rep(1, 5), beta_cov = 0.3 * diag(5) + 0.7, digits = 3)
writeLines(jsonlite::toJSON(sblrD5n500C07, pretty = TRUE, auto_unbox = TRUE), con = "sblrD5n500C07.json")
zip(zipfile = "sblrD5n500C07.json.zip", files = "sblrD5n500C07.json")
| /content/data-raw/sblr/sblr.R | no_license | yao-yl/posterior_database | R | false | false | 2,597 | r | #' Generate simulated data using a linear regression model
#'
#' @details Simulate a standard linear regression model
#'
#' @param n Number of observations
#' @param beta True values of \code{beta}.
#' @param beta_cov True posterior covariance of parameters
#' @param sample_size_independent_cov Should the posterior covariance be the same for any N? Default is \code{FALSE}.
#' @param sigma The noise standard deviation
#' @param digits The number of digits used in the dataset
#'
simulate_data_linear_regression <-
function(n,
beta,
beta_cov = diag(length(beta)),
sigma = 1,
sample_size_independent_cov = FALSE,
digits = 5){
checkmate::assert_int(n, lower = 1)
checkmate::assert_numeric(beta, min.len = 1)
checkmate::assert_matrix(beta_cov, nrows = length(beta), ncols = length(beta))
checkmate::assert_number(sigma, lower = .Machine$double.eps)
checkmate::assert_flag(sample_size_independent_cov)
checkmate::assert_int(digits)
D <- length(beta)
sig <- solve(beta_cov)
if(sample_size_independent_cov) sig <- sig/n
x <- mvtnorm::rmvnorm(n = n, mean = rep(0, D), sigma = sig)
x <- round(x, digits = digits)
if(length(beta) == 1) beta <- as.matrix(rep(beta, D))
checkmate::assert_numeric(beta, len = D)
y <- as.vector(x%*%beta + rnorm(n, sd = sigma))
y <- round(y, digits = digits)
list(X = x, D = as.integer(D), N = as.integer(n), y = y)
}
set.seed(4711)
sblrD5n50I <- simulate_data_linear_regression(n = 50, beta = rep(1, 5), digits = 3)
writeLines(jsonlite::toJSON(sblrD5n50I, pretty = TRUE, auto_unbox = TRUE), con = "sblrD5n50I.json")
zip(zipfile = "sblrD5n50I.json.zip", files = "sblrD5n50I.json")
set.seed(4712)
sblrD5n500I <- simulate_data_linear_regression(n = 500, beta = rep(1, 5), digits = 3)
writeLines(jsonlite::toJSON(sblrD5n500I, pretty = TRUE, auto_unbox = TRUE), con = "sblrD5n500I.json")
zip(zipfile = "sblrD5n500I.json.zip", files = "sblrD5n500I.json")
set.seed(4713)
sblrD5n50C07 <- simulate_data_linear_regression(n = 50, beta = rep(1, 5), beta_cov = 0.3 * diag(5) + 0.7, digits = 3)
writeLines(jsonlite::toJSON(sblrD5n50C07, pretty = TRUE, auto_unbox = TRUE), con = "sblrD5n50C07.json")
zip(zipfile = "sblrD5n50C07.json.zip", files = "sblrD5n50C07.json")
set.seed(4714)
sblrD5n500C07 <- simulate_data_linear_regression(n = 500, beta = rep(1, 5), beta_cov = 0.3 * diag(5) + 0.7, digits = 3)
writeLines(jsonlite::toJSON(sblrD5n500C07, pretty = TRUE, auto_unbox = TRUE), con = "sblrD5n500C07.json")
zip(zipfile = "sblrD5n500C07.json.zip", files = "sblrD5n500C07.json")
|
#' Input-output table for Croatia, 2010.
#'
#' 1900 - Symmetric input-output table for imports (product x product)
#' In thousand kunas (T_NAC)
#' @source \href{https://dzs.gov.hr/}{Državni zavod za statistiku}.
#' @usage data(croatia_2010_1900)
#' @format A data frame with 13 variables.
#'\describe{
#' \item{t_rows2}{Technology codes in row names, following the Eurostat convention.}
#' \item{t_rows2_lab}{Longer labels for t_rows2}
#' \item{values}{The actual values of the table in thousand kunas}
#' \item{t_cols2}{Column labels, following the Eurostat convention with differences. CPA_ suffix added to original DZS column names.}
#' \item{t_cols2_lab}{Longer labels for t_cols2}
#' \item{iotables_col}{The standardized iotables column labelling for easier reading.}
#' \item{col_order}{The column ordering to keep the matrix legible.}
#' \item{iotables_row}{The standardized iotables row labelling for easier reading.}
#' \item{row_order}{The row ordering to keep the matrix legible.}
#' \item{unit}{Different from Eurostat tables, in thousand national currency units.}
#' \item{geo}{ISO / Eurostat country code for Croatia}
#' \item{geo_lab}{ISO / Eurostat country name, Croatia.}
#' \item{time}{Date of the SIOT}
#' }
#' @family Croatia 2010 datasets
"croatia_2010_1900" | /R/data-croatia_2010_1900.R | permissive | cran/iotables | R | false | false | 1,356 | r | #' Input-output table for Croatia, 2010.
#'
#' 1900 - Symmetric input-output table for imports (product x product)
#' In thousand kunas (T_NAC)
#' @source \href{https://dzs.gov.hr/}{Državni zavod za statistiku}.
#' @usage data(croatia_2010_1900)
#' @format A data frame with 13 variables.
#'\describe{
#' \item{t_rows2}{Technology codes in row names, following the Eurostat convention.}
#' \item{t_rows2_lab}{Longer labels for t_rows2}
#' \item{values}{The actual values of the table in thousand kunas}
#' \item{t_cols2}{Column labels, following the Eurostat convention with differences. CPA_ suffix added to original DZS column names.}
#' \item{t_cols2_lab}{Longer labels for t_cols2}
#' \item{iotables_col}{The standardized iotables column labelling for easier reading.}
#' \item{col_order}{The column ordering to keep the matrix legible.}
#' \item{iotables_row}{The standardized iotables row labelling for easier reading.}
#' \item{row_order}{The row ordering to keep the matrix legible.}
#' \item{unit}{Different from Eurostat tables, in thousand national currency units.}
#' \item{geo}{ISO / Eurostat country code for Croatia}
#' \item{geo_lab}{ISO / Eurostat country name, Croatia.}
#' \item{time}{Date of the SIOT}
#' }
#' @family Croatia 2010 datasets
"croatia_2010_1900" |
####################
#
# pieDivPlot
# A function to plot diversity-function types of experiments such that diversity is
# on the x-axis, function on the y, and each point is a pie showing something about
# what species are in the treatment
#
# by Jillian Dunic & Jarrett Byrnes
# Last Updated 10/16/2013
#
# Changelog
#
# 10/16/2013: Fixed it so that quotes are not needed for variable names
#
####################
#for color
library(RColorBrewer)
library(plotrix)
# The Function - give it column names and numbers (the SpCols thing is a kludge)
# as well as a color set to work from
pieDivPlot <- function(DiversityColname, FnColname, SpCols, errColname=NULL, data,
radius=0.05, col=brewer.pal(length(SpCols), "Set2"),
controlCol="grey", errLwd=1,
xlab=NA, ylab=NA,
jitterAmount=NULL, ...){
#Deal with the unquoted variable names
arguments <- as.list(match.call()[-1])
#print(arguments)
Diversity <- data[[as.character(arguments$DiversityColname)]]
if(!is.null(jitterAmount)) Diversity <- jitter(Diversity, amount=jitterAmount)
Fn <- data[[as.character(arguments$FnColname)]]
if(!is.null(arguments$errColname)) err <- data[[as.character(arguments$errColname)]]
if(is.na(xlab[1])) xlab <- as.character(arguments$DiversityColname)
if(is.na(ylab[1])) ylab <- as.character(arguments$FnColname)
#first, make the basic plot on which everything else will occur
plot(x=Diversity, y=Fn, ylab=ylab, xlab=xlab, ...)
for(arow in 1:nrow(data)){
sp <- as.numeric(data[arow, SpCols][which(data[arow,SpCols]==1)])
#add error lines if they are to be had
if(!is.null(arguments$errColname)) {
lines(x=rep(Diversity[arow],2),
y=c(Fn[arow] + err[arow], Fn[arow]-err[arow]),
lwd=errLwd)
}
#yes, this string of if/else is ugly
if(!is.na(sp[1]) & sum(sp)>1){
floating.pie(xpos=Diversity[arow],
ypos=Fn[arow],
x=sp,
col=col[which(data[arow,SpCols]==1)],
radius=radius)
}else{
useCol <- col[which(data[arow,SpCols]==1)]
if(is.na(sp[1])){ useCol<- controlCol}
draw.circle(x = Diversity[arow], y = Fn[arow], radius =radius,
col = useCol)
}
}
}
| /pieDivPlot.R | no_license | wikithink/pieDivPlots | R | false | false | 2,362 | r | ####################
#
# pieDivPlot
# A function to plot diversity-function types of experiments such that diversity is
# on the x-axis, function on the y, and each point is a pie showing something about
# what species are in the treatment
#
# by Jillian Dunic & Jarrett Byrnes
# Last Updated 10/16/2013
#
# Changelog
#
# 10/16/2013: Fixed it so that quotes are not needed for variable names
#
####################
#for color
library(RColorBrewer)
library(plotrix)
# The Function - give it column names and numbers (the SpCols thing is a kludge)
# as well as a color set to work from
pieDivPlot <- function(DiversityColname, FnColname, SpCols, errColname=NULL, data,
radius=0.05, col=brewer.pal(length(SpCols), "Set2"),
controlCol="grey", errLwd=1,
xlab=NA, ylab=NA,
jitterAmount=NULL, ...){
#Deal with the unquoted variable names
arguments <- as.list(match.call()[-1])
#print(arguments)
Diversity <- data[[as.character(arguments$DiversityColname)]]
if(!is.null(jitterAmount)) Diversity <- jitter(Diversity, amount=jitterAmount)
Fn <- data[[as.character(arguments$FnColname)]]
if(!is.null(arguments$errColname)) err <- data[[as.character(arguments$errColname)]]
if(is.na(xlab[1])) xlab <- as.character(arguments$DiversityColname)
if(is.na(ylab[1])) ylab <- as.character(arguments$FnColname)
#first, make the basic plot on which everything else will occur
plot(x=Diversity, y=Fn, ylab=ylab, xlab=xlab, ...)
for(arow in 1:nrow(data)){
sp <- as.numeric(data[arow, SpCols][which(data[arow,SpCols]==1)])
#add error lines if they are to be had
if(!is.null(arguments$errColname)) {
lines(x=rep(Diversity[arow],2),
y=c(Fn[arow] + err[arow], Fn[arow]-err[arow]),
lwd=errLwd)
}
#yes, this string of if/else is ugly
if(!is.na(sp[1]) & sum(sp)>1){
floating.pie(xpos=Diversity[arow],
ypos=Fn[arow],
x=sp,
col=col[which(data[arow,SpCols]==1)],
radius=radius)
}else{
useCol <- col[which(data[arow,SpCols]==1)]
if(is.na(sp[1])){ useCol<- controlCol}
draw.circle(x = Diversity[arow], y = Fn[arow], radius =radius,
col = useCol)
}
}
}
|
#!/usr/bin/env Rscript
library(tidyverse)
library(ggsci)
datadir <- "results/"
sample_list <- list.dirs(datadir, full.names = TRUE)
names(sample_list) <- basename(sample_list)
sample_list <- sample_list[grepl("sample.*[0-9]$", sample_list)]
plot_outdir <- "figs/"
# Load data ----
# Test error
test_list <- sample_list[grepl("train.*test", sample_list)]
test_data <- list.files(path = test_list, pattern = "*.test-error.*.RDS", full.names = TRUE) %>%
set_names(sprintf("%s-%s", basename(dirname(.)), basename(.))) %>%
map(readRDS) %>%
map_chr("auc") %>%
enframe() %>%
mutate(value = as.numeric(value)) %>%
mutate(name = gsub("_test-error.*", "", name)) %>%
separate(name, into = c("sample", "model"), sep = "-") %>%
mutate(sample = gsub("_[^_]+$", "", sample)) %>%
rename(ROC = value)
# Resubstitution error
resubst_data <- list.files(path = sample_list, pattern = "*.resubst-error.*.RDS", full.names = TRUE) %>%
set_names(sprintf("%s-%s", basename(dirname(.)), basename(.))) %>%
map(readRDS) %>%
map_chr("auc") %>%
enframe() %>%
mutate(value = as.numeric(value)) %>%
mutate(name = gsub("_resubst-error.*", "", name)) %>%
separate(name, into = c("sample", "model"), sep = "-") %>%
mutate(sample = gsub("_[^_]+$", "", sample)) %>%
rename(ROC = value)
# kcv error
# We need to treat RF and the rest of models separately
## Non-RF
kcv_data_nonRF <- list.files(path = sample_list, pattern = "[xgbTree|logReg|NB|NN]_model_[0-9].*.RDS", full.names = TRUE) %>%
set_names(sprintf("%s-%s", basename(dirname(.)), basename(.))) %>%
map_dfr(readRDS, .id = "name") %>%
mutate(name = gsub("_model.*", "", name)) %>%
separate(name, into = c("sample", "model"), sep = "-") %>%
mutate(sample = gsub("_[^_]+$", "", sample))
## RF
kcv_data_RF <- list.files(path = sample_list, pattern = "RF_model_[0-9].*.RDS", full.names = TRUE) %>%
set_names(sprintf("%s-%s", basename(dirname(.)), basename(.))) %>%
map_dfr(function(x) {
data <- readRDS(x)
data@model$cross_validation_metrics_summary["auc",][-(1:2)]
}, .id = "name") %>%
gather(Resample, ROC, cv_1_valid:cv_10_valid) %>%
mutate(ROC = as.numeric(ROC)) %>%
mutate(Resample = case_when(
Resample == "cv_1_valid" ~ "Fold01",
Resample == "cv_2_valid" ~ "Fold02",
Resample == "cv_3_valid" ~ "Fold03",
Resample == "cv_4_valid" ~ "Fold04",
Resample == "cv_5_valid" ~ "Fold05",
Resample == "cv_6_valid" ~ "Fold06",
Resample == "cv_7_valid" ~ "Fold07",
Resample == "cv_8_valid" ~ "Fold08",
Resample == "cv_9_valid" ~ "Fold09",
Resample == "cv_10_valid" ~ "Fold10")) %>%
mutate(name = gsub("_model.*", "", name)) %>%
separate(name, into = c("sample", "model"), sep = "-") %>%
mutate(sample = gsub("_[^_]+$", "", sample))
## Join data
kcv_data <- bind_rows(kcv_data_nonRF, kcv_data_RF)
## all data
all_data <- bind_rows(resubst_data, select(kcv_data, ROC, sample, model), .id = "origin")
all_data %>%
mutate(origin = ifelse(origin == "1", "resubstitution", "kcv")) -> all_data
test_data %>%
mutate(origin = "sample-out") -> test_data
all_data <- bind_rows(all_data, test_data)
all_data %>%
mutate(idx = gsub("sample", "", sample) %>%
as.numeric()) %>%
mutate(model = ifelse(model == "xgbTree", "XGBoost", model)) %>%
arrange(idx) %>%
mutate(sample = factor(sample, levels = unique(sample))) %>%
select(-idx) -> all_data
# Numeric values ----
all_data %>%
group_by(model, origin) %>%
summarize(median(ROC))
# Plots ----
## Figure 5
all_data %>%
filter(origin == "kcv") %>%
ggplot(aes(x = model, y = ROC, fill = model, colour = model)) +
geom_violin() +
theme_bw() +
scale_fill_npg() +
scale_colour_npg() +
ylab("kcv AUC (CVE)") +
xlab("") +
theme(legend.position = "none",
legend.title = element_blank()) -> fig5
pdf(file = file.path(plot_outdir, "fig5.pdf"), width = 4, height = 2)
print(fig5)
dev.off()
# Figure 4
all_data %>%
group_by(model, origin, sample) %>%
summarize(AUC = median(ROC)) %>%
ungroup() %>%
ggplot(aes(x = model, y = AUC, fill = origin, colour = origin)) +
geom_violin(position = "dodge", scale = 'width', lwd = 0.3) +
theme_bw() +
scale_fill_npg() +
scale_colour_npg() +
ylab("AUC") +
xlab("") +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.key.size = unit(1,"line"),
legend.text = element_text(size = 6)) -> fig4
pdf(file = file.path(plot_outdir, "fig4.pdf"), width = 4, height = 2)
print(fig4)
dev.off()
## Supplementary Figure 6
all_data %>%
filter(origin == "sample-out",
model %in% c("RF", "XGBoost")) %>%
mutate(sample = gsub("train_ALL_test_sample", "", sample)) %>%
mutate(idx = as.numeric(gsub("_.*", "", sample))) %>%
arrange(idx) %>%
mutate(sample = factor(sample, levels = unique(sample))) %>%
ggplot(aes(x = sample, y = ROC, fill = model)) +
geom_bar(stat = "identity", position = "dodge") +
theme_bw() +
scale_fill_manual(values = c("#3C5488FF", "#F39B7FFF")) +
ylab("sample-out AUC (SOE)") +
xlab("sample") +
theme(legend.position = "bottom",
legend.title = element_blank()) -> supp_fig6
pdf(file = file.path(plot_outdir, "supp_fig6.pdf"), width = 9, height = 3)
print(supp_fig6)
dev.off()
## Figure 6
all_data %>%
group_by(model, sample, origin) %>%
summarize(med_ROC = median(ROC)) %>%
ungroup() %>%
mutate(sample = gsub(".*sample", "", sample)) %>%
mutate(idx = as.numeric(gsub("_.*", "", sample))) %>%
arrange(idx) %>%
mutate(sample = factor(sample, levels = unique(sample))) %>%
filter(model == "RF") %>%
ggplot(aes(x = sample, y = med_ROC, fill = origin)) +
geom_bar(stat = "identity", position = "dodge") +
ylab("Average RF AUC") +
xlab("sample") +
coord_cartesian(ylim = c(0.7, 1)) +
scale_fill_manual(values = c("#89CFF0", "#4682B4", "#008081")) +
theme_bw() +
theme(legend.position = "bottom",
legend.title = element_blank())-> fig6
pdf(file = file.path(plot_outdir, "fig6.pdf"), width = 9, height = 3)
print(fig6)
dev.off()
## Figure 1: Deamination vs non-deamination VAF
# Load data --
datadir <- "~/data/ENA_SRP044740/tidydata/"
Y.deam.filenames <- list.files(path = datadir, pattern = "_deaminations_Y.rds", full.names = TRUE)
Y.deam <- Y.deam.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.)))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(isDeam == "1")
Y.mut.filenames <- list.files(path = datadir, pattern = "_real-mutations_FFPE_Y.rds", full.names = TRUE)
Y.mut <- Y.mut.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.))))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(isSomatic == "1" | isSNP == "1")
X.deam.filenames <- list.files(path = datadir, pattern = "_deaminations_X.rds", full.names = TRUE)
X.deam <- X.deam.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.)))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(complete_id %in% Y.deam$complete_id)
X.mut.filenames <- list.files(path = datadir, pattern = "_real-mutations_FFPE_X.rds", full.names = TRUE)
X.mut <- X.mut.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.))))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(complete_id %in% Y.mut$complete_id)
X <- bind_rows(list(deam = X.deam, mut = X.mut), .id = "source") %>% mutate_if(is.character, as.factor)
Y <- bind_rows(list(deam = Y.deam, mut = Y.mut), .id = "source")
# Tidy X and Y matrices
# Keep only C>T/G>A.
nonCT.idx <- which(is.na(X), arr.ind = TRUE)[,1]
X <- X[-nonCT.idx,] %>%
droplevels()
Y <- Y[-nonCT.idx,]
# Check if a mutation is repeated i.e. considered both as somatic mutation and deamination. If it is, remove it from deamination list
X %>%
group_by(sample) %>%
filter(duplicated(id)) %>%
pull(complete_id) -> dup.ids
X %>%
filter(!(complete_id %in% dup.ids) | source != "deam") -> X
Y %>%
filter(!(complete_id %in% dup.ids) | source != "deam") -> Y
# Plot
X %>%
ggplot(aes(x = allele.freq, fill = source)) +
geom_density(alpha = 0.5) +
theme_bw() +
xlab("VAF") +
ylab("") +
scale_fill_npg(name = "Dose", labels = c("deamination", "non-deamination")) +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.text = element_text(size = 8))-> fig1
pdf(file = file.path(plot_outdir, "fig1.pdf"), width = 3, height = 3)
print(fig1)
dev.off()
# Figure 3
AF.cutoff <- 0.3
X %>%
filter(allele.freq <= AF.cutoff) -> X
Y %>%
filter(complete_id %in% X$complete_id) -> Y
X %>%
group_by(sample, source) %>%
summarize(var_count = n()) %>%
ungroup() %>%
group_by(sample) %>%
mutate(all_var_count = sum(var_count),
deam_fraction = var_count/all_var_count) %>%
ungroup() %>%
filter(source == "deam") %>%
arrange(all_var_count) %>%
mutate(sample = gsub("sample", "", sample)) %>%
mutate(sample = factor(sample, levels = unique(sample))) %>%
ggplot(aes(x = sample, y = all_var_count, fill = deam_fraction)) +
geom_bar(stat = "identity") +
theme_bw() +
xlab("sample") +
ylab("Variant count") +
scale_fill_gradient(low = "#4DBBD5FF", high = "#E64B35FF") +
labs(fill = "deamination fraction") +
theme(legend.text = element_text(size = 8),
axis.text.x = element_text(angle = 90, hjust = 1)) -> fig3
pdf(file = file.path(plot_outdir, "fig3.pdf"))
print(fig3)
dev.off() | /make-figs.R | no_license | mmaitenat/ideafix-behind | R | false | false | 9,632 | r | #!/usr/bin/env Rscript
library(tidyverse)
library(ggsci)
datadir <- "results/"
sample_list <- list.dirs(datadir, full.names = TRUE)
names(sample_list) <- basename(sample_list)
sample_list <- sample_list[grepl("sample.*[0-9]$", sample_list)]
plot_outdir <- "figs/"
# Load data ----
# Test error
test_list <- sample_list[grepl("train.*test", sample_list)]
test_data <- list.files(path = test_list, pattern = "*.test-error.*.RDS", full.names = TRUE) %>%
set_names(sprintf("%s-%s", basename(dirname(.)), basename(.))) %>%
map(readRDS) %>%
map_chr("auc") %>%
enframe() %>%
mutate(value = as.numeric(value)) %>%
mutate(name = gsub("_test-error.*", "", name)) %>%
separate(name, into = c("sample", "model"), sep = "-") %>%
mutate(sample = gsub("_[^_]+$", "", sample)) %>%
rename(ROC = value)
# Resubstitution error
resubst_data <- list.files(path = sample_list, pattern = "*.resubst-error.*.RDS", full.names = TRUE) %>%
set_names(sprintf("%s-%s", basename(dirname(.)), basename(.))) %>%
map(readRDS) %>%
map_chr("auc") %>%
enframe() %>%
mutate(value = as.numeric(value)) %>%
mutate(name = gsub("_resubst-error.*", "", name)) %>%
separate(name, into = c("sample", "model"), sep = "-") %>%
mutate(sample = gsub("_[^_]+$", "", sample)) %>%
rename(ROC = value)
# kcv error
# We need to treat RF and the rest of models separately
## Non-RF
kcv_data_nonRF <- list.files(path = sample_list, pattern = "[xgbTree|logReg|NB|NN]_model_[0-9].*.RDS", full.names = TRUE) %>%
set_names(sprintf("%s-%s", basename(dirname(.)), basename(.))) %>%
map_dfr(readRDS, .id = "name") %>%
mutate(name = gsub("_model.*", "", name)) %>%
separate(name, into = c("sample", "model"), sep = "-") %>%
mutate(sample = gsub("_[^_]+$", "", sample))
## RF
kcv_data_RF <- list.files(path = sample_list, pattern = "RF_model_[0-9].*.RDS", full.names = TRUE) %>%
set_names(sprintf("%s-%s", basename(dirname(.)), basename(.))) %>%
map_dfr(function(x) {
data <- readRDS(x)
data@model$cross_validation_metrics_summary["auc",][-(1:2)]
}, .id = "name") %>%
gather(Resample, ROC, cv_1_valid:cv_10_valid) %>%
mutate(ROC = as.numeric(ROC)) %>%
mutate(Resample = case_when(
Resample == "cv_1_valid" ~ "Fold01",
Resample == "cv_2_valid" ~ "Fold02",
Resample == "cv_3_valid" ~ "Fold03",
Resample == "cv_4_valid" ~ "Fold04",
Resample == "cv_5_valid" ~ "Fold05",
Resample == "cv_6_valid" ~ "Fold06",
Resample == "cv_7_valid" ~ "Fold07",
Resample == "cv_8_valid" ~ "Fold08",
Resample == "cv_9_valid" ~ "Fold09",
Resample == "cv_10_valid" ~ "Fold10")) %>%
mutate(name = gsub("_model.*", "", name)) %>%
separate(name, into = c("sample", "model"), sep = "-") %>%
mutate(sample = gsub("_[^_]+$", "", sample))
## Join data
kcv_data <- bind_rows(kcv_data_nonRF, kcv_data_RF)
## all data
all_data <- bind_rows(resubst_data, select(kcv_data, ROC, sample, model), .id = "origin")
all_data %>%
mutate(origin = ifelse(origin == "1", "resubstitution", "kcv")) -> all_data
test_data %>%
mutate(origin = "sample-out") -> test_data
all_data <- bind_rows(all_data, test_data)
all_data %>%
mutate(idx = gsub("sample", "", sample) %>%
as.numeric()) %>%
mutate(model = ifelse(model == "xgbTree", "XGBoost", model)) %>%
arrange(idx) %>%
mutate(sample = factor(sample, levels = unique(sample))) %>%
select(-idx) -> all_data
# Numeric values ----
all_data %>%
group_by(model, origin) %>%
summarize(median(ROC))
# Plots ----
## Figure 5
all_data %>%
filter(origin == "kcv") %>%
ggplot(aes(x = model, y = ROC, fill = model, colour = model)) +
geom_violin() +
theme_bw() +
scale_fill_npg() +
scale_colour_npg() +
ylab("kcv AUC (CVE)") +
xlab("") +
theme(legend.position = "none",
legend.title = element_blank()) -> fig5
pdf(file = file.path(plot_outdir, "fig5.pdf"), width = 4, height = 2)
print(fig5)
dev.off()
# Figure 4
all_data %>%
group_by(model, origin, sample) %>%
summarize(AUC = median(ROC)) %>%
ungroup() %>%
ggplot(aes(x = model, y = AUC, fill = origin, colour = origin)) +
geom_violin(position = "dodge", scale = 'width', lwd = 0.3) +
theme_bw() +
scale_fill_npg() +
scale_colour_npg() +
ylab("AUC") +
xlab("") +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.key.size = unit(1,"line"),
legend.text = element_text(size = 6)) -> fig4
pdf(file = file.path(plot_outdir, "fig4.pdf"), width = 4, height = 2)
print(fig4)
dev.off()
## Supplementary Figure 6
all_data %>%
filter(origin == "sample-out",
model %in% c("RF", "XGBoost")) %>%
mutate(sample = gsub("train_ALL_test_sample", "", sample)) %>%
mutate(idx = as.numeric(gsub("_.*", "", sample))) %>%
arrange(idx) %>%
mutate(sample = factor(sample, levels = unique(sample))) %>%
ggplot(aes(x = sample, y = ROC, fill = model)) +
geom_bar(stat = "identity", position = "dodge") +
theme_bw() +
scale_fill_manual(values = c("#3C5488FF", "#F39B7FFF")) +
ylab("sample-out AUC (SOE)") +
xlab("sample") +
theme(legend.position = "bottom",
legend.title = element_blank()) -> supp_fig6
pdf(file = file.path(plot_outdir, "supp_fig6.pdf"), width = 9, height = 3)
print(supp_fig6)
dev.off()
## Figure 6
all_data %>%
group_by(model, sample, origin) %>%
summarize(med_ROC = median(ROC)) %>%
ungroup() %>%
mutate(sample = gsub(".*sample", "", sample)) %>%
mutate(idx = as.numeric(gsub("_.*", "", sample))) %>%
arrange(idx) %>%
mutate(sample = factor(sample, levels = unique(sample))) %>%
filter(model == "RF") %>%
ggplot(aes(x = sample, y = med_ROC, fill = origin)) +
geom_bar(stat = "identity", position = "dodge") +
ylab("Average RF AUC") +
xlab("sample") +
coord_cartesian(ylim = c(0.7, 1)) +
scale_fill_manual(values = c("#89CFF0", "#4682B4", "#008081")) +
theme_bw() +
theme(legend.position = "bottom",
legend.title = element_blank())-> fig6
pdf(file = file.path(plot_outdir, "fig6.pdf"), width = 9, height = 3)
print(fig6)
dev.off()
## Figure 1: Deamination vs non-deamination VAF
# Load data --
datadir <- "~/data/ENA_SRP044740/tidydata/"
Y.deam.filenames <- list.files(path = datadir, pattern = "_deaminations_Y.rds", full.names = TRUE)
Y.deam <- Y.deam.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.)))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(isDeam == "1")
Y.mut.filenames <- list.files(path = datadir, pattern = "_real-mutations_FFPE_Y.rds", full.names = TRUE)
Y.mut <- Y.mut.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.))))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(isSomatic == "1" | isSNP == "1")
X.deam.filenames <- list.files(path = datadir, pattern = "_deaminations_X.rds", full.names = TRUE)
X.deam <- X.deam.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.)))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(complete_id %in% Y.deam$complete_id)
X.mut.filenames <- list.files(path = datadir, pattern = "_real-mutations_FFPE_X.rds", full.names = TRUE)
X.mut <- X.mut.filenames %>%
set_names(sub("_[^_]+$", "", sub("_[^_]+$", "", sub("_[^_]+$", "", basename(.))))) %>%
map_dfr(readRDS, .id = "sample") %>%
mutate(complete_id = paste(sample, id, sep = ":")) %>%
filter(complete_id %in% Y.mut$complete_id)
X <- bind_rows(list(deam = X.deam, mut = X.mut), .id = "source") %>% mutate_if(is.character, as.factor)
Y <- bind_rows(list(deam = Y.deam, mut = Y.mut), .id = "source")
# Tidy X and Y matrices
# Keep only C>T/G>A.
nonCT.idx <- which(is.na(X), arr.ind = TRUE)[,1]
X <- X[-nonCT.idx,] %>%
droplevels()
Y <- Y[-nonCT.idx,]
# Check if a mutation is repeated i.e. considered both as somatic mutation and deamination. If it is, remove it from deamination list
X %>%
group_by(sample) %>%
filter(duplicated(id)) %>%
pull(complete_id) -> dup.ids
X %>%
filter(!(complete_id %in% dup.ids) | source != "deam") -> X
Y %>%
filter(!(complete_id %in% dup.ids) | source != "deam") -> Y
# Plot
X %>%
ggplot(aes(x = allele.freq, fill = source)) +
geom_density(alpha = 0.5) +
theme_bw() +
xlab("VAF") +
ylab("") +
scale_fill_npg(name = "Dose", labels = c("deamination", "non-deamination")) +
theme(legend.position = "bottom",
legend.title = element_blank(),
legend.text = element_text(size = 8))-> fig1
pdf(file = file.path(plot_outdir, "fig1.pdf"), width = 3, height = 3)
print(fig1)
dev.off()
# Figure 3
AF.cutoff <- 0.3
X %>%
filter(allele.freq <= AF.cutoff) -> X
Y %>%
filter(complete_id %in% X$complete_id) -> Y
X %>%
group_by(sample, source) %>%
summarize(var_count = n()) %>%
ungroup() %>%
group_by(sample) %>%
mutate(all_var_count = sum(var_count),
deam_fraction = var_count/all_var_count) %>%
ungroup() %>%
filter(source == "deam") %>%
arrange(all_var_count) %>%
mutate(sample = gsub("sample", "", sample)) %>%
mutate(sample = factor(sample, levels = unique(sample))) %>%
ggplot(aes(x = sample, y = all_var_count, fill = deam_fraction)) +
geom_bar(stat = "identity") +
theme_bw() +
xlab("sample") +
ylab("Variant count") +
scale_fill_gradient(low = "#4DBBD5FF", high = "#E64B35FF") +
labs(fill = "deamination fraction") +
theme(legend.text = element_text(size = 8),
axis.text.x = element_text(angle = 90, hjust = 1)) -> fig3
pdf(file = file.path(plot_outdir, "fig3.pdf"))
print(fig3)
dev.off() |
################################################################################
#' OpencgaR Commons
#'
#' @description This is an S4 class which defines the OpencgaR object
#' @details This S4 class holds the default configuration required by OpencgaR
#' methods to stablish the connection with the web services. By default it is
#' configured to query HGVA (http://hgva.opencb.org/).
#' @slot host a character specifying the host url. Default
#' "http://bioinfo.hpc.cam.ac.uk/hgva"
#' @slot version a character specifying the API version. Default "v1"
#' @seealso \url{https://github.com/opencb/opencga/wiki}
#' and the RESTful API documentation
#' \url{http://bioinfo.hpc.cam.ac.uk/opencga/webservices/}
#' @export
fetchOpenCGA <- function(object=object, category=NULL, categoryId=NULL,
subcategory=NULL, subcategoryId=NULL, action=NULL,
params=NULL, httpMethod="GET",
num_threads=NULL, as.queryParam=NULL){
# Get connection info
host <- object@host
token <- object@sessionId
version <- object@version
# batch_size <- batch_size
if(!endsWith(x = host, suffix = "/")){
host <- paste0(host, "/")
}
if (!grepl("webservices/rest", host)){
host <- paste0(host, "webservices/rest/")
}
if(!endsWith(x = version, suffix = "/")){
version <- paste0(version, "/")
}
# Format category and subcategory
if(is.null(category)){
category <- ""
}else{
category <- paste0(category, "/", sep="")
}
if(is.null(subcategory)){
subcategory <- ""
}else{
subcategory <- paste0(subcategory, "/")
}
# Format IDs
if(is.null(categoryId)){
categoryId <- ""
}else{
categoryId <- paste0(categoryId, collapse = ",")
categoryId <- paste0(categoryId, "/")
}
if(is.null(subcategoryId)){
subcategoryId <- ""
}else{
subcategoryId <- paste0(subcategoryId, collapse = ",")
subcategoryId <- paste0(subcategoryId, "/")
}
# Extract limit from params
if(is.null(params)){
limit <- 100000
}else{
if(is.null(params$limit)){
limit <- 100000
}else{
limit <- params$limit
}
}
# Call server
i <- 1
batch_size <- min(c(1000, limit))
skip <- 0
num_results <- batch_size
container <- list()
count <- 0
if (is.null(params)){
params <- list()
}
while((unlist(num_results) == batch_size) && count <= limit){
pathUrl <- paste0(host, version, category, categoryId, subcategory,
subcategoryId, action)
## send batch size as limit to callrest
batch_size <- min(c(batch_size, limit-count))
if(batch_size == 0){
break()
}
params$limit <- batch_size
response <- callREST(pathUrl=pathUrl, params=params,
httpMethod=httpMethod, skip=skip, token=token,
as.queryParam=as.queryParam)
skip <- skip+batch_size
res_list <- parseResponse(resp=response$resp, content=response$content)
num_results <- res_list$num_results
result <- res_list$result
container[[i]] <- result
i=i+1
count <- count + unlist(num_results)
print(paste("Number of retrieved documents:", count))
}
if(class(container[[1]])=="data.frame"){
ds <- rbind_pages(container)
}else{
ds <- as.data.frame(container[[1]], stringsAsFactors=FALSE, names="result")
}
return(ds)
}
## all working functions
## Format query params
get_qparams <- function(params){
paramsVec <- c()
for(p in seq_along(params)){
paramsVec <- append(paramsVec, paste0(names(params)[p], "=", params[p]))
}
paramsStr <- paste(paramsVec, collapse = "&")
return(paramsStr)
}
## Make call to server
callREST <- function(pathUrl, params, httpMethod, skip, token, as.queryParam){
content <- list()
session <- paste("Bearer", token)
skip=paste0("?skip=", skip)
# Make GET call
if (httpMethod == "GET"){
if (!is.null(params)){
params <- get_qparams(params)
fullUrl <- paste0(pathUrl, skip, "&", params)
}else{
fullUrl <- paste0(pathUrl, skip)
}
print(paste("URL:",fullUrl))
resp <- httr::GET(fullUrl, add_headers(Accept="application/json", Authorization=session), timeout(30))
}else if(httpMethod == "POST"){
# Make POST call
if (!is.null(as.queryParam)){
if(class(as.queryParam) == "character"){
as.queryParam <- unique(c(as.queryParam, "study"))
}
}
if (!is.null(params)){
# extract study as query param
if (any(as.queryParam %in% names(params))){
queryParams <- get_qparams(params[which(names(params) %in% as.queryParam)])
bodyParams <- params[-which(names(params) %in% as.queryParam)]
}else{
bodyParams <- params
queryParams <- ""
}
}
if (is.null(params) | queryParams == ""){
fullUrl <- paste0(pathUrl, skip)
}else{
fullUrl <- paste0(pathUrl, skip, "&", queryParams)
}
print(paste("URL:",fullUrl))
if (exists("bodyParams")){
resp <- httr::POST(fullUrl, body = bodyParams,
add_headers(`Authorization` = session), encode = "json")
}else{
resp <- httr::POST(fullUrl, add_headers(`Authorization` = session),
encode = "json")
}
}
content <- httr::content(resp, as="text", encoding = "utf-8")
return(list(resp=resp, content=content))
}
## A function to parse the json data into R dataframes
parseResponse <- function(resp, content){
js <- lapply(content, function(x) jsonlite::fromJSON(x))
if (resp$status_code == 200){
if (js[[1]]$warning == ""){
print("Query successful!")
}else{
print("Query successful with warnings.")
print(paste("WARNING:", js[[1]]$warning))
}
}else{
print("Query unsuccessful.")
print(paste("Category:", http_status(resp)$category))
print(paste("Reason:", http_status(resp)$reason))
if (js[[1]]$warning != ""){
print(paste("WARNING:", js[[1]]$warning))
print()
}
if (js[[1]]$error != ""){
stop(paste("ERROR:", js[[1]]$error))
}
}
ares <- lapply(js, function(x)x$response$result)
nums <- lapply(js, function(x)x$response$numResults)
if (class(ares[[1]][[1]])=="data.frame"){
ds <- lapply(ares, function(x)rbind_pages(x))
# if(requireNamespace("pbapply", quietly = TRUE)){
# ds <- pbapply::pblapply(ares, function(x)rbind_pages(x))
# }
### Important to get correct vertical binding of dataframes
names(ds) <- NULL
ds <- jsonlite::rbind_pages(ds)
}else{
ds <- ares
names(ds) <- NULL
}
return(list(result=ds, num_results=nums))
}
###############################################
| /opencga-client/src/main/R/R/commons.R | permissive | alemarcha/opencga | R | false | false | 7,413 | r | ################################################################################
#' OpencgaR Commons
#'
#' @description This is an S4 class which defines the OpencgaR object
#' @details This S4 class holds the default configuration required by OpencgaR
#' methods to stablish the connection with the web services. By default it is
#' configured to query HGVA (http://hgva.opencb.org/).
#' @slot host a character specifying the host url. Default
#' "http://bioinfo.hpc.cam.ac.uk/hgva"
#' @slot version a character specifying the API version. Default "v1"
#' @seealso \url{https://github.com/opencb/opencga/wiki}
#' and the RESTful API documentation
#' \url{http://bioinfo.hpc.cam.ac.uk/opencga/webservices/}
#' @export
fetchOpenCGA <- function(object=object, category=NULL, categoryId=NULL,
subcategory=NULL, subcategoryId=NULL, action=NULL,
params=NULL, httpMethod="GET",
num_threads=NULL, as.queryParam=NULL){
# Get connection info
host <- object@host
token <- object@sessionId
version <- object@version
# batch_size <- batch_size
if(!endsWith(x = host, suffix = "/")){
host <- paste0(host, "/")
}
if (!grepl("webservices/rest", host)){
host <- paste0(host, "webservices/rest/")
}
if(!endsWith(x = version, suffix = "/")){
version <- paste0(version, "/")
}
# Format category and subcategory
if(is.null(category)){
category <- ""
}else{
category <- paste0(category, "/", sep="")
}
if(is.null(subcategory)){
subcategory <- ""
}else{
subcategory <- paste0(subcategory, "/")
}
# Format IDs
if(is.null(categoryId)){
categoryId <- ""
}else{
categoryId <- paste0(categoryId, collapse = ",")
categoryId <- paste0(categoryId, "/")
}
if(is.null(subcategoryId)){
subcategoryId <- ""
}else{
subcategoryId <- paste0(subcategoryId, collapse = ",")
subcategoryId <- paste0(subcategoryId, "/")
}
# Extract limit from params
if(is.null(params)){
limit <- 100000
}else{
if(is.null(params$limit)){
limit <- 100000
}else{
limit <- params$limit
}
}
# Call server
i <- 1
batch_size <- min(c(1000, limit))
skip <- 0
num_results <- batch_size
container <- list()
count <- 0
if (is.null(params)){
params <- list()
}
while((unlist(num_results) == batch_size) && count <= limit){
pathUrl <- paste0(host, version, category, categoryId, subcategory,
subcategoryId, action)
## send batch size as limit to callrest
batch_size <- min(c(batch_size, limit-count))
if(batch_size == 0){
break()
}
params$limit <- batch_size
response <- callREST(pathUrl=pathUrl, params=params,
httpMethod=httpMethod, skip=skip, token=token,
as.queryParam=as.queryParam)
skip <- skip+batch_size
res_list <- parseResponse(resp=response$resp, content=response$content)
num_results <- res_list$num_results
result <- res_list$result
container[[i]] <- result
i=i+1
count <- count + unlist(num_results)
print(paste("Number of retrieved documents:", count))
}
if(class(container[[1]])=="data.frame"){
ds <- rbind_pages(container)
}else{
ds <- as.data.frame(container[[1]], stringsAsFactors=FALSE, names="result")
}
return(ds)
}
## all working functions
## Format query params
get_qparams <- function(params){
paramsVec <- c()
for(p in seq_along(params)){
paramsVec <- append(paramsVec, paste0(names(params)[p], "=", params[p]))
}
paramsStr <- paste(paramsVec, collapse = "&")
return(paramsStr)
}
## Make call to server
callREST <- function(pathUrl, params, httpMethod, skip, token, as.queryParam){
content <- list()
session <- paste("Bearer", token)
skip=paste0("?skip=", skip)
# Make GET call
if (httpMethod == "GET"){
if (!is.null(params)){
params <- get_qparams(params)
fullUrl <- paste0(pathUrl, skip, "&", params)
}else{
fullUrl <- paste0(pathUrl, skip)
}
print(paste("URL:",fullUrl))
resp <- httr::GET(fullUrl, add_headers(Accept="application/json", Authorization=session), timeout(30))
}else if(httpMethod == "POST"){
# Make POST call
if (!is.null(as.queryParam)){
if(class(as.queryParam) == "character"){
as.queryParam <- unique(c(as.queryParam, "study"))
}
}
if (!is.null(params)){
# extract study as query param
if (any(as.queryParam %in% names(params))){
queryParams <- get_qparams(params[which(names(params) %in% as.queryParam)])
bodyParams <- params[-which(names(params) %in% as.queryParam)]
}else{
bodyParams <- params
queryParams <- ""
}
}
if (is.null(params) | queryParams == ""){
fullUrl <- paste0(pathUrl, skip)
}else{
fullUrl <- paste0(pathUrl, skip, "&", queryParams)
}
print(paste("URL:",fullUrl))
if (exists("bodyParams")){
resp <- httr::POST(fullUrl, body = bodyParams,
add_headers(`Authorization` = session), encode = "json")
}else{
resp <- httr::POST(fullUrl, add_headers(`Authorization` = session),
encode = "json")
}
}
content <- httr::content(resp, as="text", encoding = "utf-8")
return(list(resp=resp, content=content))
}
## A function to parse the json data into R dataframes
parseResponse <- function(resp, content){
js <- lapply(content, function(x) jsonlite::fromJSON(x))
if (resp$status_code == 200){
if (js[[1]]$warning == ""){
print("Query successful!")
}else{
print("Query successful with warnings.")
print(paste("WARNING:", js[[1]]$warning))
}
}else{
print("Query unsuccessful.")
print(paste("Category:", http_status(resp)$category))
print(paste("Reason:", http_status(resp)$reason))
if (js[[1]]$warning != ""){
print(paste("WARNING:", js[[1]]$warning))
print()
}
if (js[[1]]$error != ""){
stop(paste("ERROR:", js[[1]]$error))
}
}
ares <- lapply(js, function(x)x$response$result)
nums <- lapply(js, function(x)x$response$numResults)
if (class(ares[[1]][[1]])=="data.frame"){
ds <- lapply(ares, function(x)rbind_pages(x))
# if(requireNamespace("pbapply", quietly = TRUE)){
# ds <- pbapply::pblapply(ares, function(x)rbind_pages(x))
# }
### Important to get correct vertical binding of dataframes
names(ds) <- NULL
ds <- jsonlite::rbind_pages(ds)
}else{
ds <- ares
names(ds) <- NULL
}
return(list(result=ds, num_results=nums))
}
###############################################
|
library(data.table)
library(mltools)
load('../../data/iris.rda')
test_that("data frame with correct number of features is returned", {
res <- encode_and_bind(iris, 'species')
expect_equal(dim(res)[2], 6)
})
test_that("data frame with correct number of features is returned", {
res <- remove_features(iris, 'species')
expect_equal(dim(res)[2], 4)
})
test_that("newly created columns return correct sum", {
res <- apply_function_to_column(iris, "sepal_width, sepal_length", "new_col1, new_col2", "x*5")
expect_equal(sum(res$new_col1), 2290.5)
expect_equal(sum(res$new_col2), 4382.5)
})
test_that("closest matching string is returned", {
res <- get_closest_string(c("hey there", "we are here", "howdy doody"), "doody")
expect_true(identical(res, 'howdy doody'))
})
| /tests/testthat/test-datapeek.R | no_license | briantacderan/r-pkg-test | R | false | false | 805 | r | library(data.table)
library(mltools)
load('../../data/iris.rda')
test_that("data frame with correct number of features is returned", {
res <- encode_and_bind(iris, 'species')
expect_equal(dim(res)[2], 6)
})
test_that("data frame with correct number of features is returned", {
res <- remove_features(iris, 'species')
expect_equal(dim(res)[2], 4)
})
test_that("newly created columns return correct sum", {
res <- apply_function_to_column(iris, "sepal_width, sepal_length", "new_col1, new_col2", "x*5")
expect_equal(sum(res$new_col1), 2290.5)
expect_equal(sum(res$new_col2), 4382.5)
})
test_that("closest matching string is returned", {
res <- get_closest_string(c("hey there", "we are here", "howdy doody"), "doody")
expect_true(identical(res, 'howdy doody'))
})
|
#' Find maximum tension for wire files in a folder
#'
#' @param data_folder the folder where the tension files are kept
#'
#' @return
#' @export
#'
#' @examples
max_tension <- function(data_folder, cruiseID = NULL,
width = 8, thresh = 100,
plotFL = TRUE, check = TRUE) {
# Create list of files to cycle through
files <- list.files(data_folder,"^[CS]{1}[0-9]{3}")
if(length(files)==0)
stop("no files that meet criteria in folder")
if(is.null(cruiseID))
cruiseID <- stringr::str_extract(files[1],"^[CS][0-9]{3}")
# create output data folder
process_out <- file.path(data_folder,"processed")
if (!file.exists(process_out))
dir.create(process_out)
# create output plotting folder
if(plotFL) {
plot_out <- file.path(process_out,"plots")
if (!file.exists(plot_out))
dir.create(plot_out)
}
# name output file
file_out <- paste0(cruiseID, "_maxtension.csv")
# check if file has already been processed
if (check == TRUE & file.exists(file.path(process_out,file_out))) {
max_tension <- readr::read_csv(file.path(process_out,file_out),
col_types = "cd")
files <- setdiff(files,max_tension$cast)
}
if(length(files)>0) {
# cycle through data files, find max tension and plot
max_tension2 <- tibble::tibble(cast = files, max_tension = NA)
for (i in 1:length(files)) {
# read and process data
file <- file.path(data_folder,files[i])
data <- readr::read_csv(file, col_names = F, col_types = readr::cols(.default = readr::col_double()), skip = 10)
data$X3 <- oce::despike(data$X3)
datasm <- runmed(data$X3, 121, endrule = "constant")
tr <- range(which(datasm > thresh), na.rm = T)
tr <- round(tr + c(1,-1)*diff(tr)/width)
max_tension2$max_tension[i] = max(data$X3[tr[1]:tr[2]],na.rm = T)
max_t_i <- tr[1] - 1 + which.max(data$X3[tr[1]:tr[2]])
# plot data if required
if(plotFL) {
ggplot2::qplot(1:nrow(data),data$X3) +
ggplot2::geom_point(ggplot2::aes(max_t_i, max_tension2$max_tension[i]),
color = "red", size = 5) +
ggplot2::ylab("Wire Tension") +
ggplot2::xlab("Index")
ggplot2::ggsave(file.path(plot_out, paste0(files[i], ".png")))
}
}
if(check == TRUE & file.exists(file.path(process_out,file_out))) {
max_tension <- rbind(max_tension,max_tension2)
} else {
max_tension <- max_tension2
}
readr::write_csv(max_tension, file.path(process_out,file_out))
}
}
| /R/tension.R | no_license | benharden27/sea | R | false | false | 2,590 | r | #' Find maximum tension for wire files in a folder
#'
#' @param data_folder the folder where the tension files are kept
#'
#' @return
#' @export
#'
#' @examples
max_tension <- function(data_folder, cruiseID = NULL,
width = 8, thresh = 100,
plotFL = TRUE, check = TRUE) {
# Create list of files to cycle through
files <- list.files(data_folder,"^[CS]{1}[0-9]{3}")
if(length(files)==0)
stop("no files that meet criteria in folder")
if(is.null(cruiseID))
cruiseID <- stringr::str_extract(files[1],"^[CS][0-9]{3}")
# create output data folder
process_out <- file.path(data_folder,"processed")
if (!file.exists(process_out))
dir.create(process_out)
# create output plotting folder
if(plotFL) {
plot_out <- file.path(process_out,"plots")
if (!file.exists(plot_out))
dir.create(plot_out)
}
# name output file
file_out <- paste0(cruiseID, "_maxtension.csv")
# check if file has already been processed
if (check == TRUE & file.exists(file.path(process_out,file_out))) {
max_tension <- readr::read_csv(file.path(process_out,file_out),
col_types = "cd")
files <- setdiff(files,max_tension$cast)
}
if(length(files)>0) {
# cycle through data files, find max tension and plot
max_tension2 <- tibble::tibble(cast = files, max_tension = NA)
for (i in 1:length(files)) {
# read and process data
file <- file.path(data_folder,files[i])
data <- readr::read_csv(file, col_names = F, col_types = readr::cols(.default = readr::col_double()), skip = 10)
data$X3 <- oce::despike(data$X3)
datasm <- runmed(data$X3, 121, endrule = "constant")
tr <- range(which(datasm > thresh), na.rm = T)
tr <- round(tr + c(1,-1)*diff(tr)/width)
max_tension2$max_tension[i] = max(data$X3[tr[1]:tr[2]],na.rm = T)
max_t_i <- tr[1] - 1 + which.max(data$X3[tr[1]:tr[2]])
# plot data if required
if(plotFL) {
ggplot2::qplot(1:nrow(data),data$X3) +
ggplot2::geom_point(ggplot2::aes(max_t_i, max_tension2$max_tension[i]),
color = "red", size = 5) +
ggplot2::ylab("Wire Tension") +
ggplot2::xlab("Index")
ggplot2::ggsave(file.path(plot_out, paste0(files[i], ".png")))
}
}
if(check == TRUE & file.exists(file.path(process_out,file_out))) {
max_tension <- rbind(max_tension,max_tension2)
} else {
max_tension <- max_tension2
}
readr::write_csv(max_tension, file.path(process_out,file_out))
}
}
|
# Proposed list of checks all "vetted" models should pass.
# When adding a new "vetted model", copy paste the below list and
# add appropriate section of unit tests to cover the below.
# 1. Runs as expected with standard use
# - without errors, warnings, messages
# - numbers in table are correct
# - labels are correct
# 2. If applicable, runs as expected with logit and log link
# - without errors, warnings, messages
# - numbers in table are correct
# 3. Interaction terms are correctly printed in output table
# - without errors, warnings, messages
# - numbers in table are correct
# - interaction labels are correct
# 4. Other gtsummary functions work with model: add_global_p(), combine_terms(), add_nevent()
# - without errors, warnings, messages
# - numbers in table are correct
# 5. tbl_uvregression() works as expected
# - without errors, warnings, messages
# - works with add_global_p(), add_nevent(), add_q()
skip_on_cran()
# vetted models checks take a long time--only perform on CI checks
skip_if(!isTRUE(as.logical(Sys.getenv("CI"))))
library(dplyr)
library(survival)
# clogit() ---------------------------------------------------------------------
test_that("vetted_models clogit()", {
# building models to check
mod_clogit_lin <- clogit(response ~ age + trt + grade + strata(stage),
data = trial)
mod_clogit_int <- clogit(response ~ age + trt * grade + strata(stage),
data = trial)
# 1. Runs as expected with standard use
# - without errors, warnings, messages
expect_error(
tbl_clogit_lin <- tbl_regression(mod_clogit_lin), NA
)
expect_warning(
tbl_clogit_lin, NA
)
expect_error(
tbl_clogit_int <- tbl_regression(mod_clogit_int), NA
)
expect_warning(
tbl_clogit_int, NA
)
# - numbers in table are correct
expect_equal(
coef(mod_clogit_lin),
coefs_in_gt(tbl_clogit_lin),
ignore_attr = TRUE
)
expect_equal(
coef(mod_clogit_int),
coefs_in_gt(tbl_clogit_int),
ignore_attr = TRUE
)
# - labels are correct
expect_equal(
tbl_clogit_lin$table_body %>%
filter(row_type == "label") %>%
pull(label),
c("Age", "Chemotherapy Treatment", "Grade"),
ignore_attr = TRUE
)
expect_equal(
tbl_clogit_int$table_body %>%
filter(row_type == "label") %>%
pull(label),
c("Age", "Chemotherapy Treatment", "Grade", "Chemotherapy Treatment * Grade"),
ignore_attr = TRUE
)
# 2. If applicable, runs as expected with logit and log link
expect_equal(
coef(mod_clogit_lin) %>% exp(),
coefs_in_gt(mod_clogit_lin %>% tbl_regression(exponentiate = TRUE)),
ignore_attr = TRUE
)
# 3. Interaction terms are correctly printed in output table
# - interaction labels are correct
expect_equal(
tbl_clogit_int$table_body %>%
filter(var_type == "interaction") %>%
pull(label),
c("Chemotherapy Treatment * Grade", "Drug B * II", "Drug B * III"),
ignore_attr = TRUE
)
# 4. Other gtsummary functions work with model: add_global_p(), combine_terms(), add_nevent()
# - without errors, warnings, messages
# clogit models fail in car::Anova on old versions
if (r_version >= "3.5.0") {
expect_error(
tbl_clogit_lin2 <- tbl_clogit_lin %>% add_global_p(include = everything(), test = "Wald"), NA
)
expect_error(
tbl_clogit_int2 <- tbl_clogit_int %>% add_global_p(include = everything(), test = "Wald"), NA
)
expect_warning(
tbl_clogit_lin2, NA
)
expect_warning(
tbl_clogit_int2, NA
)
expect_error(
tbl_clogit_lin3 <- tbl_clogit_lin %>% combine_terms(. ~ . - trt, test = "Wald"), NA
)
expect_warning(
tbl_clogit_lin3, NA
)
}
expect_error(
tbl_clogit_lin4 <- tbl_clogit_lin %>% add_nevent(), NA
)
# - numbers in table are correct
# clogit models fail in car::Anova on old versions
if (r_version >= "3.5.0") {
expect_equal(
tbl_clogit_lin2$table_body %>%
pull(p.value) %>%
na.omit() %>%
as.vector(),
car::Anova(mod_clogit_lin, type = "III", test = "Wald") %>%
as.data.frame() %>%
pull(`Pr(>Chisq)`)
)
expect_equal(
tbl_clogit_int2$table_body %>%
pull(p.value) %>%
na.omit() %>%
as.vector(),
car::Anova(mod_clogit_int, type = "III", test = "Wald") %>%
as.data.frame() %>%
pull(`Pr(>Chisq)`)
)
# anova() and car::Anova() do not match
# expect_equal(
# tbl_clogit_lin3$table_body %>% filter(variable == "trt") %>% pull(p.value),
# car::Anova(mod_clogit_lin, type = "III", test = "Wald") %>%
# as.data.frame() %>%
# tibble::rownames_to_column() %>%
# filter(rowname == "trt") %>%
# pull(`Pr(>Chisq)`)
# )
}
# 5. tbl_uvregression() works as expected
# - without errors, warnings, messages
# - works with add_global_p(), add_nevent()
expect_error(
trial %>%
tbl_uvregression(
y = response,
method = clogit,
formula = "{y} ~ {x} + strata(stage)"
) %>%
add_global_p(test = "Wald") %>%
add_q(),
NA
)
expect_warning(
trial %>%
tbl_uvregression(
y = response,
method = clogit,
formula = "{y} ~ {x} + strata(stage)"
) %>%
add_global_p(test = "Wald") %>%
add_q(),
NA
)
})
| /tests/testthat/test-vetted_models-clogit.R | permissive | DrShaneBurke/gtsummary | R | false | false | 5,506 | r | # Proposed list of checks all "vetted" models should pass.
# When adding a new "vetted model", copy paste the below list and
# add appropriate section of unit tests to cover the below.
# 1. Runs as expected with standard use
# - without errors, warnings, messages
# - numbers in table are correct
# - labels are correct
# 2. If applicable, runs as expected with logit and log link
# - without errors, warnings, messages
# - numbers in table are correct
# 3. Interaction terms are correctly printed in output table
# - without errors, warnings, messages
# - numbers in table are correct
# - interaction labels are correct
# 4. Other gtsummary functions work with model: add_global_p(), combine_terms(), add_nevent()
# - without errors, warnings, messages
# - numbers in table are correct
# 5. tbl_uvregression() works as expected
# - without errors, warnings, messages
# - works with add_global_p(), add_nevent(), add_q()
skip_on_cran()
# vetted models checks take a long time--only perform on CI checks
skip_if(!isTRUE(as.logical(Sys.getenv("CI"))))
library(dplyr)
library(survival)
# clogit() ---------------------------------------------------------------------
test_that("vetted_models clogit()", {
# building models to check
mod_clogit_lin <- clogit(response ~ age + trt + grade + strata(stage),
data = trial)
mod_clogit_int <- clogit(response ~ age + trt * grade + strata(stage),
data = trial)
# 1. Runs as expected with standard use
# - without errors, warnings, messages
expect_error(
tbl_clogit_lin <- tbl_regression(mod_clogit_lin), NA
)
expect_warning(
tbl_clogit_lin, NA
)
expect_error(
tbl_clogit_int <- tbl_regression(mod_clogit_int), NA
)
expect_warning(
tbl_clogit_int, NA
)
# - numbers in table are correct
expect_equal(
coef(mod_clogit_lin),
coefs_in_gt(tbl_clogit_lin),
ignore_attr = TRUE
)
expect_equal(
coef(mod_clogit_int),
coefs_in_gt(tbl_clogit_int),
ignore_attr = TRUE
)
# - labels are correct
expect_equal(
tbl_clogit_lin$table_body %>%
filter(row_type == "label") %>%
pull(label),
c("Age", "Chemotherapy Treatment", "Grade"),
ignore_attr = TRUE
)
expect_equal(
tbl_clogit_int$table_body %>%
filter(row_type == "label") %>%
pull(label),
c("Age", "Chemotherapy Treatment", "Grade", "Chemotherapy Treatment * Grade"),
ignore_attr = TRUE
)
# 2. If applicable, runs as expected with logit and log link
expect_equal(
coef(mod_clogit_lin) %>% exp(),
coefs_in_gt(mod_clogit_lin %>% tbl_regression(exponentiate = TRUE)),
ignore_attr = TRUE
)
# 3. Interaction terms are correctly printed in output table
# - interaction labels are correct
expect_equal(
tbl_clogit_int$table_body %>%
filter(var_type == "interaction") %>%
pull(label),
c("Chemotherapy Treatment * Grade", "Drug B * II", "Drug B * III"),
ignore_attr = TRUE
)
# 4. Other gtsummary functions work with model: add_global_p(), combine_terms(), add_nevent()
# - without errors, warnings, messages
# clogit models fail in car::Anova on old versions
if (r_version >= "3.5.0") {
expect_error(
tbl_clogit_lin2 <- tbl_clogit_lin %>% add_global_p(include = everything(), test = "Wald"), NA
)
expect_error(
tbl_clogit_int2 <- tbl_clogit_int %>% add_global_p(include = everything(), test = "Wald"), NA
)
expect_warning(
tbl_clogit_lin2, NA
)
expect_warning(
tbl_clogit_int2, NA
)
expect_error(
tbl_clogit_lin3 <- tbl_clogit_lin %>% combine_terms(. ~ . - trt, test = "Wald"), NA
)
expect_warning(
tbl_clogit_lin3, NA
)
}
expect_error(
tbl_clogit_lin4 <- tbl_clogit_lin %>% add_nevent(), NA
)
# - numbers in table are correct
# clogit models fail in car::Anova on old versions
if (r_version >= "3.5.0") {
expect_equal(
tbl_clogit_lin2$table_body %>%
pull(p.value) %>%
na.omit() %>%
as.vector(),
car::Anova(mod_clogit_lin, type = "III", test = "Wald") %>%
as.data.frame() %>%
pull(`Pr(>Chisq)`)
)
expect_equal(
tbl_clogit_int2$table_body %>%
pull(p.value) %>%
na.omit() %>%
as.vector(),
car::Anova(mod_clogit_int, type = "III", test = "Wald") %>%
as.data.frame() %>%
pull(`Pr(>Chisq)`)
)
# anova() and car::Anova() do not match
# expect_equal(
# tbl_clogit_lin3$table_body %>% filter(variable == "trt") %>% pull(p.value),
# car::Anova(mod_clogit_lin, type = "III", test = "Wald") %>%
# as.data.frame() %>%
# tibble::rownames_to_column() %>%
# filter(rowname == "trt") %>%
# pull(`Pr(>Chisq)`)
# )
}
# 5. tbl_uvregression() works as expected
# - without errors, warnings, messages
# - works with add_global_p(), add_nevent()
expect_error(
trial %>%
tbl_uvregression(
y = response,
method = clogit,
formula = "{y} ~ {x} + strata(stage)"
) %>%
add_global_p(test = "Wald") %>%
add_q(),
NA
)
expect_warning(
trial %>%
tbl_uvregression(
y = response,
method = clogit,
formula = "{y} ~ {x} + strata(stage)"
) %>%
add_global_p(test = "Wald") %>%
add_q(),
NA
)
})
|
library(BTM)
### Name: terms.BTM
### Title: Get highest token probabilities for each topic or get biterms
### used in the model
### Aliases: terms.BTM
### ** Examples
library(udpipe)
data("brussels_reviews_anno", package = "udpipe")
x <- subset(brussels_reviews_anno, language == "nl")
x <- subset(x, xpos %in% c("NN", "NNP", "NNS"))
x <- x[, c("doc_id", "lemma")]
model <- BTM(x, k = 5, iter = 5, trace = TRUE)
terms(model)
terms(model, top_n = 10)
terms(model, threshold = 0.01, top_n = +Inf)
bi <- terms(model, type = "biterms")
str(bi)
| /data/genthat_extracted_code/BTM/examples/terms.BTM.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 550 | r | library(BTM)
### Name: terms.BTM
### Title: Get highest token probabilities for each topic or get biterms
### used in the model
### Aliases: terms.BTM
### ** Examples
library(udpipe)
data("brussels_reviews_anno", package = "udpipe")
x <- subset(brussels_reviews_anno, language == "nl")
x <- subset(x, xpos %in% c("NN", "NNP", "NNS"))
x <- x[, c("doc_id", "lemma")]
model <- BTM(x, k = 5, iter = 5, trace = TRUE)
terms(model)
terms(model, top_n = 10)
terms(model, threshold = 0.01, top_n = +Inf)
bi <- terms(model, type = "biterms")
str(bi)
|
# Due to large size of data, the input data will be filtered for the dates of interest
# (i.e. Feb. 01 and Feb. 02 of 2007) as it is read into R using functions in the 'sqldf'
# package
fileptr <- file("household_power_consumption.txt")
housedatafeb <- sqldf('select * from fileptr where Date="1/2/2007" or Date="2/2/2007"',
file.format=list(sep=";"))
# add column which combines Date and Time together and converts it to a date-time format
housedatafeb$datetime<- as.POSIXct(paste(housedatafeb$Date, housedatafeb$Time),
format="%d/%m/%Y %H:%M:%S")
# Plot 1 - save to 480x480 PNG file
# Prepare png file for plot then draw the histogram
png("plot1.png",height=480,width=480,units="px")
hist(housedatafeb$Global_active_power,main="Global Active Power",
xlab="Global Active Power (kilowatts)",col="red")
# close the file device
dev.off()
| /plot1.R | no_license | derek3/ExData_Plotting1 | R | false | false | 887 | r | # Due to large size of data, the input data will be filtered for the dates of interest
# (i.e. Feb. 01 and Feb. 02 of 2007) as it is read into R using functions in the 'sqldf'
# package
fileptr <- file("household_power_consumption.txt")
housedatafeb <- sqldf('select * from fileptr where Date="1/2/2007" or Date="2/2/2007"',
file.format=list(sep=";"))
# add column which combines Date and Time together and converts it to a date-time format
housedatafeb$datetime<- as.POSIXct(paste(housedatafeb$Date, housedatafeb$Time),
format="%d/%m/%Y %H:%M:%S")
# Plot 1 - save to 480x480 PNG file
# Prepare png file for plot then draw the histogram
png("plot1.png",height=480,width=480,units="px")
hist(housedatafeb$Global_active_power,main="Global Active Power",
xlab="Global Active Power (kilowatts)",col="red")
# close the file device
dev.off()
|
x <- rep(1:5, each = 3)
y <- rep(c('A', 'B', 'C'), times = 5)
set.seed(12)
z <- round(runif(min = 10, max = 20, n = 15))
df <- data.frame(x = x, y = y, z = z)
ggplot(df, aes(x = factor(x), y = z, fill = y)) + geom_bar(stat = 'identity', position = 'dodge')
ggplot(df, aes(x = interaction(x, y), y = z, fill = y)) + geom_bar(stat = 'identity') + geom_text(aes(label = z))
ggplot(df, aes(x = interaction(x, y), y = z, fill = y)) + geom_bar(stat = 'identity') + geom_text(aes(label = z), size = 8, color = 'orange', vjust = 1)
ggplot(df, aes(x = x, y = z, fill = y)) + geom_bar(stat = 'identity', position = 'dodge') + geom_text(aes(label = z), size = 5, color = 'black', vjust = 1, hjust = .5, position = position_dodge(0.9))
ggplot(df, aes(x = x, y = z, fill = y)) + geom_bar(stat = 'identity', position = 'stack') + geom_text(aes(label = z), size = 5, color = 'black', vjust = 3.5, hjust = .5, position = position_stack())
| /bar_6.R | no_license | nevaryyy/learn_ggplot2 | R | false | false | 927 | r | x <- rep(1:5, each = 3)
y <- rep(c('A', 'B', 'C'), times = 5)
set.seed(12)
z <- round(runif(min = 10, max = 20, n = 15))
df <- data.frame(x = x, y = y, z = z)
ggplot(df, aes(x = factor(x), y = z, fill = y)) + geom_bar(stat = 'identity', position = 'dodge')
ggplot(df, aes(x = interaction(x, y), y = z, fill = y)) + geom_bar(stat = 'identity') + geom_text(aes(label = z))
ggplot(df, aes(x = interaction(x, y), y = z, fill = y)) + geom_bar(stat = 'identity') + geom_text(aes(label = z), size = 8, color = 'orange', vjust = 1)
ggplot(df, aes(x = x, y = z, fill = y)) + geom_bar(stat = 'identity', position = 'dodge') + geom_text(aes(label = z), size = 5, color = 'black', vjust = 1, hjust = .5, position = position_dodge(0.9))
ggplot(df, aes(x = x, y = z, fill = y)) + geom_bar(stat = 'identity', position = 'stack') + geom_text(aes(label = z), size = 5, color = 'black', vjust = 3.5, hjust = .5, position = position_stack())
|
## Load the package and read the file
if (!require(XML)) {
install.packages("XML", dep = TRUE)
require(XML)
}
doc = xmlTreeParse('studentdata.xml')
root = xmlRoot(doc)
## Get the number of children of the root node
n = xmlSize(root)
print(n)
## Access the first child node of the root tag
student1 = root[[1]]
print(student1)
## Access each component of this node
## 1. Get node name
print(xmlName(student1))
## 2. Get node attributes
### a. List all attributes of one node
print(xmlAttrs(student1))
### b. Query specific attribute by name,
### assign NA as default value if this attribute does not exist
student1.attr = c();
student1.attr[1] = xmlGetAttr(student1, "SocialID", NA)
student1.attr[2] = xmlGetAttr(student1, "SchoolID", NA)
student1.attr[3] = xmlGetAttr(student1, "OfficeLocation", NA)
print(student1.attr)
### c. Get to the subchildren of student1 and extract their values
### The first subchild of student1
print(student1[[1]])
### The above is not really what we desired,
### because we don't want the tag ``<name>'' to come along.
### Use ``xmlValue'' to strip the tag.
print(xmlValue(student1[[1]]))
### or
print(xmlValue(student1[["name"]]))
### Now, if we want the values of all children in one run
print(xmlSApply(student1, xmlValue))
### d. Get node text without its children
### Look at student4
student4 = root[[4]]
### Want the words in the parenthesis but nothing else
### xmlValue with recursive turned off will work
print(xmlValue(student4, recursive = FALSE))
### Alternatively, we can use xmlChildren
print(xmlChildren(student4))
print(xmlChildren(student4)$text)
### e. A more complicated example
### Goal: we want to extract some attributes and elements from the XML file
### and compile them into a table.
### Variables that we want: attributes: SchoolID
### child tags: name, major, minor(if exist), hobby(if exist)
extractInfo <- function(x) {
list(
SchoolID = as.integer(xmlGetAttr(x, "SchoolID")),
Name = xmlValue(x[["name"]]),
Major = xmlValue(x[["major"]]),
Minor = xmlValue(x[["minor"]]),
Hobby = xmlValue(x[["hobby"]])
)
}
QueriedTable = t(as.data.frame(xmlSApply(root,extractInfo)))
print(QueriedTable)
### f. Exercises
### Run the following code to load the file:
doc2 = xmlTreeParse('METriology.xml')
root2 = xmlRoot(doc2)
### Question 1: Return all listed locations in the game Mass Effect 1 as a vector or a list
### Question 2: Report all listed characters in all Mass Effect franchise. Take a union across all Mass Effect games.
### Hint for Question 2: use ``unlist'' to convert a nested list to a vector, and then use ``unique''.
| /Stats406/old_lab_notes2015/Lab 10/Lab_10.r | no_license | Pill-GZ/Teaching | R | false | false | 2,671 | r | ## Load the package and read the file
if (!require(XML)) {
install.packages("XML", dep = TRUE)
require(XML)
}
doc = xmlTreeParse('studentdata.xml')
root = xmlRoot(doc)
## Get the number of children of the root node
n = xmlSize(root)
print(n)
## Access the first child node of the root tag
student1 = root[[1]]
print(student1)
## Access each component of this node
## 1. Get node name
print(xmlName(student1))
## 2. Get node attributes
### a. List all attributes of one node
print(xmlAttrs(student1))
### b. Query specific attribute by name,
### assign NA as default value if this attribute does not exist
student1.attr = c();
student1.attr[1] = xmlGetAttr(student1, "SocialID", NA)
student1.attr[2] = xmlGetAttr(student1, "SchoolID", NA)
student1.attr[3] = xmlGetAttr(student1, "OfficeLocation", NA)
print(student1.attr)
### c. Get to the subchildren of student1 and extract their values
### The first subchild of student1
print(student1[[1]])
### The above is not really what we desired,
### because we don't want the tag ``<name>'' to come along.
### Use ``xmlValue'' to strip the tag.
print(xmlValue(student1[[1]]))
### or
print(xmlValue(student1[["name"]]))
### Now, if we want the values of all children in one run
print(xmlSApply(student1, xmlValue))
### d. Get node text without its children
### Look at student4
student4 = root[[4]]
### Want the words in the parenthesis but nothing else
### xmlValue with recursive turned off will work
print(xmlValue(student4, recursive = FALSE))
### Alternatively, we can use xmlChildren
print(xmlChildren(student4))
print(xmlChildren(student4)$text)
### e. A more complicated example
### Goal: we want to extract some attributes and elements from the XML file
### and compile them into a table.
### Variables that we want: attributes: SchoolID
### child tags: name, major, minor(if exist), hobby(if exist)
extractInfo <- function(x) {
list(
SchoolID = as.integer(xmlGetAttr(x, "SchoolID")),
Name = xmlValue(x[["name"]]),
Major = xmlValue(x[["major"]]),
Minor = xmlValue(x[["minor"]]),
Hobby = xmlValue(x[["hobby"]])
)
}
QueriedTable = t(as.data.frame(xmlSApply(root,extractInfo)))
print(QueriedTable)
### f. Exercises
### Run the following code to load the file:
doc2 = xmlTreeParse('METriology.xml')
root2 = xmlRoot(doc2)
### Question 1: Return all listed locations in the game Mass Effect 1 as a vector or a list
### Question 2: Report all listed characters in all Mass Effect franchise. Take a union across all Mass Effect games.
### Hint for Question 2: use ``unlist'' to convert a nested list to a vector, and then use ``unique''.
|
#Variable aleatoria exponencial en R
#Run if -> 10 aleatorios entre 0 y 1
u <- runif(10)
lambda <- 2
x <- -log(1-u)/lambda
x
#Genero la esperanza de la exponencial
u <- runif(1000)
lambda <- 2
x <- -log(1-u)/lambda
mean(x)
# Metodo para simular la v.a. exponencial
x <- rexp(10,2)
x
# Simulo la esperanza
x <- rexp(1000,2)
mean(x)
# Variable aleatoria discreta
u <- runif(10)
x <- 1*(u>=0 & u<0.5) + 2*(u >= 0.5 & u < 0.8) + 5*(u >=0.8 & u<1)
x
#Verifico
u <- runif(10000)
x <- 1*(u>=0 & u<0.5) + 2*(u >= 0.5 & u < 0.8) + 5*(u >=0.8 & u<1)
c(mean(x==1), mean(x==2),mean(x==5))
# Monto de transferencias
# a
# Funcion densidad
f_X <- function(x){
x/18 * (x>0 & x < 6)
}
xs <- seq(-1,7,0.001)
plot(xs, f_X(xs), cex=0.1)
#Calculo y grafico la acumulada
F_X <- function(x){
(x^2)/36 * (x>=0 & x<6) + (x>= 6)
}
xs<-seq(-1, 7, 0.1)
plot(xs, F_X(xs), type = "l")
# Ahora busco con U
u <- runif(100)
x <- 6*sqrt(u)
x
#Prom
mean(x)
#COmparo con el valor verdadero
faux <- function(x) x*f_X(x)
integrate(faux, 0, 6)
| /Practica 3/numeros_aleatorios.R | no_license | vRaphiel/Probabilidad_Estadistica | R | false | false | 1,024 | r | #Variable aleatoria exponencial en R
#Run if -> 10 aleatorios entre 0 y 1
u <- runif(10)
lambda <- 2
x <- -log(1-u)/lambda
x
#Genero la esperanza de la exponencial
u <- runif(1000)
lambda <- 2
x <- -log(1-u)/lambda
mean(x)
# Metodo para simular la v.a. exponencial
x <- rexp(10,2)
x
# Simulo la esperanza
x <- rexp(1000,2)
mean(x)
# Variable aleatoria discreta
u <- runif(10)
x <- 1*(u>=0 & u<0.5) + 2*(u >= 0.5 & u < 0.8) + 5*(u >=0.8 & u<1)
x
#Verifico
u <- runif(10000)
x <- 1*(u>=0 & u<0.5) + 2*(u >= 0.5 & u < 0.8) + 5*(u >=0.8 & u<1)
c(mean(x==1), mean(x==2),mean(x==5))
# Monto de transferencias
# a
# Funcion densidad
f_X <- function(x){
x/18 * (x>0 & x < 6)
}
xs <- seq(-1,7,0.001)
plot(xs, f_X(xs), cex=0.1)
#Calculo y grafico la acumulada
F_X <- function(x){
(x^2)/36 * (x>=0 & x<6) + (x>= 6)
}
xs<-seq(-1, 7, 0.1)
plot(xs, F_X(xs), type = "l")
# Ahora busco con U
u <- runif(100)
x <- 6*sqrt(u)
x
#Prom
mean(x)
#COmparo con el valor verdadero
faux <- function(x) x*f_X(x)
integrate(faux, 0, 6)
|
# Load data
data(email50)
# View its structure
str(email50)
# Glimpse email50
glimpse(email50)
# Subset of emails with big numbers: email50_big
email50_big <- email50 %>%
filter(number == "big")
# Glimpse the subset
glimpse(email50_big)
# Table of number variable
table(email50_big$number)
# Drop levels
email50_big$number <- droplevels(email50_big$number)
# Another table of number variable
table(email50_big$number)
# Calculate median number of characters: med_num_char
med_num_char <- median(email50$num_char)
# Create num_char_cat variable in email50
email50 <- email50 %>%
mutate(num_char_cat = ifelse(num_char < med_num_char, "below median", "at or above median"))
# Count emails in each category
table(email50$num_char_cat)
# Create number_yn column in email50
email50 <- email50 %>%
mutate(number_yn = ifelse(number == "none","no", "yes"))
# Visualize number_yn
ggplot(email50, aes(x = number_yn)) +
geom_bar()
# Load ggplot2
library(ggplot2)
# Scatterplot of exclaim_mess vs. num_char
ggplot(email50, aes(x = num_char, y = exclaim_mess, color = factor(spam))) +
geom_point()
| /12-introduction-to-data/language-of-data.R | no_license | vardavo/datacamp-data-scientist-r | R | false | false | 1,113 | r | # Load data
data(email50)
# View its structure
str(email50)
# Glimpse email50
glimpse(email50)
# Subset of emails with big numbers: email50_big
email50_big <- email50 %>%
filter(number == "big")
# Glimpse the subset
glimpse(email50_big)
# Table of number variable
table(email50_big$number)
# Drop levels
email50_big$number <- droplevels(email50_big$number)
# Another table of number variable
table(email50_big$number)
# Calculate median number of characters: med_num_char
med_num_char <- median(email50$num_char)
# Create num_char_cat variable in email50
email50 <- email50 %>%
mutate(num_char_cat = ifelse(num_char < med_num_char, "below median", "at or above median"))
# Count emails in each category
table(email50$num_char_cat)
# Create number_yn column in email50
email50 <- email50 %>%
mutate(number_yn = ifelse(number == "none","no", "yes"))
# Visualize number_yn
ggplot(email50, aes(x = number_yn)) +
geom_bar()
# Load ggplot2
library(ggplot2)
# Scatterplot of exclaim_mess vs. num_char
ggplot(email50, aes(x = num_char, y = exclaim_mess, color = factor(spam))) +
geom_point()
|
library(data.table)
library(jsonlite)
library(dplyr)
library(purrr)
library(janitor)
library(binr)
library(tidyverse)
library(hrbrthemes)
library(NLP)
library(readxl)
library(naniar)
library(forcats)
library(ggplot2)
library(scales)
# Copyright 2020 By Gonzalo Mena, gomena.github.io, @mena_gonzalo, github.com/gomena
CasosNuevosConSintomas = fread("https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto26/CasosNuevosConSintomas.csv") %>% clean_names() %>% as_tibble() %>% filter(region!='Total') %>%
pivot_longer(-c('region'), names_to='date', values_to='new_cases_with_symptoms') %>% mutate(date = as.Date(sub('_','-',sub('_','-',str_remove(date, 'x'))))) %>%
group_by(date)
CasosNuevosSinSintomas = fread("https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto27/CasosNuevosSinSintomas.csv") %>% clean_names() %>% as_tibble() %>% filter(region!='Total') %>%
pivot_longer(-c('region'), names_to='date', values_to='new_cases_without_symptoms') %>% mutate(date = as.Date(sub('_','-',sub('_','-',str_remove(date, 'x'))))) %>%
group_by(date)
CasosNuevos = fread("https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto13/CasosNuevosCumulativo.csv") %>% clean_names() %>% as_tibble() %>% filter(region!='Total') %>%
pivot_longer(-c('region'), names_to='date', values_to='new_cases_total') %>% mutate(date = as.Date(sub('_','-',sub('_','-',str_remove(date, 'x'))))) %>%
group_by(date)
case_test_df = CasosNuevosConSintomas %>% full_join(CasosNuevosSinSintomas) %>% full_join(CasosNuevos)
case_test_df_pivoted = case_test_df %>% pivot_longer(-c('region','date'), names_to = 'type_metric', values_to = 'metric')
dates1 = c('2020-04-15','2020-04-17','2020-04-20','2020-04-24','2020-04-27','2020-05-01','2020-05-04','2020-05-08','2020-05-11','2020-05-15')
list = c("FechaInicioSintomas-16-04","FechaInicioSintomas-18-04","FechaInicioSintomas-22-04","FechaInicioSintomas-25-04", "FechaInicioSintomas-29-04","FechaInicioSintomas-01-05","FechaInicioSintomas-04-05","FechaInicioSintomas-08-05")
archivos_csv = paste("https://github.com/gomena/Underreportingcovid/tree/master/data/historysintom/",list,'.txt',sep="")
listcols = c()
for(i in 7:19){
listcols = append(listcols, paste("se", as.character(i), sep=""))
}
tsem=tibble('se7'=0,'se8'=0,'se9'=0,'se10'=0,'se11'=0,'se12'=0,'se13'=0,'se14'=0,'se15'=0,'se16'=0,'se17'=0,'se18'=0,'se19'=0)
datereportlist=c("2020-04-15","2020-04-17","2020-04-20","2020-04-24","2020-04-27","2020-05-01","2020-05-04","2020-05-08")
sintom_history <- map(
seq_along(archivos_csv),
function(x) {
y <- fread(archivos_csv[x]) %>%
clean_names() %>%
as_tibble() %>% mutate(date = datereportlist[x])
y = add_column(y, !!!tsem[setdiff(names(tsem), names(y))]) %>%
pivot_longer(-c("region", "codigo_region","comuna","codigo_comuna","poblacion","date"), names_to = 'senum', values_to ="cases")
})
n=5
sumary =case_test_df %>% filter(date<=dates1[n+1] &date>dates1[n]) %>% group_by(region) %>%
summarize_at(vars(new_cases_total, new_cases_with_symptoms,new_cases_without_symptoms),funs(sum(., na.rm=TRUE))) %>%
mutate(period=paste(dates1[n], dates1[n+1],sep=" a ")) %>% mutate(date=as.Date(dates1[n+1]))
for(n in 2:9){
sumary = sumary %>% full_join(case_test_df %>% filter(date<=dates1[n+1] &date>dates1[n]) %>% group_by(region) %>%
summarize_at(vars(new_cases_total, new_cases_with_symptoms,new_cases_without_symptoms),funs(sum(., na.rm=TRUE))) %>%
mutate(period=paste(dates1[n], dates1[n+1],sep=" a ")) %>% mutate(date=as.Date(dates1[n+1])))
}
sintom_history_all = sintom_history[[1]]
for(i in 2:length(sintom_history)){
sintom_history_all = sintom_history_all %>% full_join(sintom_history[[i]])
}
sintom_history_all = sintom_history_all %>% rename(senumsin=senum) %>%mutate_at(vars(senumsin, date), factor) %>%
group_by(comuna, senumsin) %>% mutate(new_cases_sint = c(cases[1], (cases - lag(cases))[-1])) %>% group_by(region, date) %>% summarize_at(vars(new_cases_sint),funs(sum(., na.rm=TRUE))) %>%
mutate(date=as.Date(date))
sumary = sumary %>% full_join(sintom_history_all)
sumary = sumary %>% filter(date>="2020-04-25")
theme_set(theme_classic())
sumary %>% ggplot(aes(x=new_cases_sint)) +
geom_point(aes(y=new_cases_with_symptoms, color='Con síntomas'),size=4) +
geom_point(aes(y=new_cases_total,color='Totales'),size=4) +
geom_segment(aes(x=0,
xend=4500,
y=0,
yend=4500),
linetype="dashed",
size=0.5) +
facet_wrap("period") +
labs(title="Casos nuevos por diagnóstico vs Casos por inicio de síntomas",
caption="Codigo: https://github.com/gomena/UnderreportingCovid/. fuente: minsal/ministerio de ciencia", color='Tipo de confirmacion') +
ylab('Casos nuevos por diagnóstico') +
xlab('Casos nuevos por inicio de síntomas') +
theme(plot.title = element_text(color="black", size=14, face="bold"),
plot.subtitle = element_text(color="black", size=12, face="bold"),
axis.title.x = element_text(color="black", size=14, face="bold"),
axis.title.y = element_text(color="black", size=14, face="bold"),
axis.text.x = element_text(face = "bold", color = "black",
size = 10, angle = 15),
axis.text.y = element_text(face = "bold", color = "black",
size = 8))
| /code/new_cases_vs_new_cases_symptom.R | permissive | gomena/Underreportingcovid | R | false | false | 5,492 | r | library(data.table)
library(jsonlite)
library(dplyr)
library(purrr)
library(janitor)
library(binr)
library(tidyverse)
library(hrbrthemes)
library(NLP)
library(readxl)
library(naniar)
library(forcats)
library(ggplot2)
library(scales)
# Copyright 2020 By Gonzalo Mena, gomena.github.io, @mena_gonzalo, github.com/gomena
CasosNuevosConSintomas = fread("https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto26/CasosNuevosConSintomas.csv") %>% clean_names() %>% as_tibble() %>% filter(region!='Total') %>%
pivot_longer(-c('region'), names_to='date', values_to='new_cases_with_symptoms') %>% mutate(date = as.Date(sub('_','-',sub('_','-',str_remove(date, 'x'))))) %>%
group_by(date)
CasosNuevosSinSintomas = fread("https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto27/CasosNuevosSinSintomas.csv") %>% clean_names() %>% as_tibble() %>% filter(region!='Total') %>%
pivot_longer(-c('region'), names_to='date', values_to='new_cases_without_symptoms') %>% mutate(date = as.Date(sub('_','-',sub('_','-',str_remove(date, 'x'))))) %>%
group_by(date)
CasosNuevos = fread("https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto13/CasosNuevosCumulativo.csv") %>% clean_names() %>% as_tibble() %>% filter(region!='Total') %>%
pivot_longer(-c('region'), names_to='date', values_to='new_cases_total') %>% mutate(date = as.Date(sub('_','-',sub('_','-',str_remove(date, 'x'))))) %>%
group_by(date)
case_test_df = CasosNuevosConSintomas %>% full_join(CasosNuevosSinSintomas) %>% full_join(CasosNuevos)
case_test_df_pivoted = case_test_df %>% pivot_longer(-c('region','date'), names_to = 'type_metric', values_to = 'metric')
dates1 = c('2020-04-15','2020-04-17','2020-04-20','2020-04-24','2020-04-27','2020-05-01','2020-05-04','2020-05-08','2020-05-11','2020-05-15')
list = c("FechaInicioSintomas-16-04","FechaInicioSintomas-18-04","FechaInicioSintomas-22-04","FechaInicioSintomas-25-04", "FechaInicioSintomas-29-04","FechaInicioSintomas-01-05","FechaInicioSintomas-04-05","FechaInicioSintomas-08-05")
archivos_csv = paste("https://github.com/gomena/Underreportingcovid/tree/master/data/historysintom/",list,'.txt',sep="")
listcols = c()
for(i in 7:19){
listcols = append(listcols, paste("se", as.character(i), sep=""))
}
tsem=tibble('se7'=0,'se8'=0,'se9'=0,'se10'=0,'se11'=0,'se12'=0,'se13'=0,'se14'=0,'se15'=0,'se16'=0,'se17'=0,'se18'=0,'se19'=0)
datereportlist=c("2020-04-15","2020-04-17","2020-04-20","2020-04-24","2020-04-27","2020-05-01","2020-05-04","2020-05-08")
sintom_history <- map(
seq_along(archivos_csv),
function(x) {
y <- fread(archivos_csv[x]) %>%
clean_names() %>%
as_tibble() %>% mutate(date = datereportlist[x])
y = add_column(y, !!!tsem[setdiff(names(tsem), names(y))]) %>%
pivot_longer(-c("region", "codigo_region","comuna","codigo_comuna","poblacion","date"), names_to = 'senum', values_to ="cases")
})
n=5
sumary =case_test_df %>% filter(date<=dates1[n+1] &date>dates1[n]) %>% group_by(region) %>%
summarize_at(vars(new_cases_total, new_cases_with_symptoms,new_cases_without_symptoms),funs(sum(., na.rm=TRUE))) %>%
mutate(period=paste(dates1[n], dates1[n+1],sep=" a ")) %>% mutate(date=as.Date(dates1[n+1]))
for(n in 2:9){
sumary = sumary %>% full_join(case_test_df %>% filter(date<=dates1[n+1] &date>dates1[n]) %>% group_by(region) %>%
summarize_at(vars(new_cases_total, new_cases_with_symptoms,new_cases_without_symptoms),funs(sum(., na.rm=TRUE))) %>%
mutate(period=paste(dates1[n], dates1[n+1],sep=" a ")) %>% mutate(date=as.Date(dates1[n+1])))
}
sintom_history_all = sintom_history[[1]]
for(i in 2:length(sintom_history)){
sintom_history_all = sintom_history_all %>% full_join(sintom_history[[i]])
}
sintom_history_all = sintom_history_all %>% rename(senumsin=senum) %>%mutate_at(vars(senumsin, date), factor) %>%
group_by(comuna, senumsin) %>% mutate(new_cases_sint = c(cases[1], (cases - lag(cases))[-1])) %>% group_by(region, date) %>% summarize_at(vars(new_cases_sint),funs(sum(., na.rm=TRUE))) %>%
mutate(date=as.Date(date))
sumary = sumary %>% full_join(sintom_history_all)
sumary = sumary %>% filter(date>="2020-04-25")
theme_set(theme_classic())
sumary %>% ggplot(aes(x=new_cases_sint)) +
geom_point(aes(y=new_cases_with_symptoms, color='Con síntomas'),size=4) +
geom_point(aes(y=new_cases_total,color='Totales'),size=4) +
geom_segment(aes(x=0,
xend=4500,
y=0,
yend=4500),
linetype="dashed",
size=0.5) +
facet_wrap("period") +
labs(title="Casos nuevos por diagnóstico vs Casos por inicio de síntomas",
caption="Codigo: https://github.com/gomena/UnderreportingCovid/. fuente: minsal/ministerio de ciencia", color='Tipo de confirmacion') +
ylab('Casos nuevos por diagnóstico') +
xlab('Casos nuevos por inicio de síntomas') +
theme(plot.title = element_text(color="black", size=14, face="bold"),
plot.subtitle = element_text(color="black", size=12, face="bold"),
axis.title.x = element_text(color="black", size=14, face="bold"),
axis.title.y = element_text(color="black", size=14, face="bold"),
axis.text.x = element_text(face = "bold", color = "black",
size = 10, angle = 15),
axis.text.y = element_text(face = "bold", color = "black",
size = 8))
|
setwd("D:\\Documents\\Downloads\\EDA\\Week 1\\Assignment 1")
inputData<-read.table("household_power_consumption.txt", header=TRUE, sep=";",colClasses =c("character","character",rep("numeric",7)),na="?")
subData<-inputData[as.Date(inputData$Date, format="%d/%m/%Y") == "2007-02-01" | as.Date(inputData$Date, format="%d/%m/%Y") == "2007-02-02",]
png("plot1.png")
hist(subData$Global_active_power, col = 'red', xlab = "Global Active Power(kilowatts)", main = "Global Active Power")
dev.off()
| /plot1.R | no_license | navneet100/ExData_Plotting1 | R | false | false | 492 | r | setwd("D:\\Documents\\Downloads\\EDA\\Week 1\\Assignment 1")
inputData<-read.table("household_power_consumption.txt", header=TRUE, sep=";",colClasses =c("character","character",rep("numeric",7)),na="?")
subData<-inputData[as.Date(inputData$Date, format="%d/%m/%Y") == "2007-02-01" | as.Date(inputData$Date, format="%d/%m/%Y") == "2007-02-02",]
png("plot1.png")
hist(subData$Global_active_power, col = 'red', xlab = "Global Active Power(kilowatts)", main = "Global Active Power")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mc.discrete.R
\name{mc.discrete}
\alias{mc.discrete}
\title{Markov Chain Simulator}
\usage{
mc.discrete(p,k,n)
}
\arguments{
\item{p}{is a transition probability matrix for Markov Chain simulation.}
\item{k}{is a single integer number that states which state the simulation starts at.}
\item{n}{is a single natural number that decides how many simulation steps is to be done.}
}
\value{
Returns a vector of values corresponding to the stationary probability for each state in the given transition probability matrix.
}
\description{
This function takes 3 arguments to simulate the stationary probabilities from a transition probability matrix.
}
\examples{
mc.discrete(matrix(c(0,0,0,0.4,0.7,0.5,0.6,0.3,0.5),nrow=3,ncol=3),1,100)
}
\author{
Nguyen Khanh Le Ho & Emil H. Andersen \cr
Department of Mathematics and Computer Science (IMADA) \cr
University of Southern Denmark, Denmark \cr
\email{emila14@student.sdu.dk} \cr
\email{ngho14@student.sdu.dk} \cr
}
\keyword{chain}
\keyword{markov}
\keyword{matrix}
\keyword{probability}
\keyword{simulation}
\keyword{transition}
| /man/mc.discrete.Rd | no_license | Chaiji/LemilExamST522 | R | false | true | 1,153 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mc.discrete.R
\name{mc.discrete}
\alias{mc.discrete}
\title{Markov Chain Simulator}
\usage{
mc.discrete(p,k,n)
}
\arguments{
\item{p}{is a transition probability matrix for Markov Chain simulation.}
\item{k}{is a single integer number that states which state the simulation starts at.}
\item{n}{is a single natural number that decides how many simulation steps is to be done.}
}
\value{
Returns a vector of values corresponding to the stationary probability for each state in the given transition probability matrix.
}
\description{
This function takes 3 arguments to simulate the stationary probabilities from a transition probability matrix.
}
\examples{
mc.discrete(matrix(c(0,0,0,0.4,0.7,0.5,0.6,0.3,0.5),nrow=3,ncol=3),1,100)
}
\author{
Nguyen Khanh Le Ho & Emil H. Andersen \cr
Department of Mathematics and Computer Science (IMADA) \cr
University of Southern Denmark, Denmark \cr
\email{emila14@student.sdu.dk} \cr
\email{ngho14@student.sdu.dk} \cr
}
\keyword{chain}
\keyword{markov}
\keyword{matrix}
\keyword{probability}
\keyword{simulation}
\keyword{transition}
|
testlist <- list(data = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 3L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) | /biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610554018-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 196 | r | testlist <- list(data = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 3L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_settings.R
\name{get_settings_modis}
\alias{get_settings_modis}
\title{Defines settings for settings for MODIS download using MODISTools}
\usage{
get_settings_modis(
bundle = "modis_fpar",
data_path = ".",
method_interpol = "linear",
keep = FALSE,
overwrite_raw = FALSE,
overwrite_interpol = FALSE,
n_focal = 0,
filename_with_year = TRUE,
network = NA
)
}
\arguments{
\item{bundle}{A character string specifying which dataset (bundle) to
download.Defaults to \code{"modis_fpar"}.
Available: \code{c("modis_fpar", "modis_evi", "modis_lai", "modis_gpp")}.}
\item{data_path}{A character string specifying the path of where the data
should be downloaded to. Defaults to \code{"."} (present working directory).}
\item{method_interpol}{A character string specifying which interpolation
method to use. Defaults to linear interpolation (\code{"linear"}).}
\item{keep}{A logical specifying whether to keep all intermediate data
(before filtering, and before imputing mean seasonal cycle),
and all alternative interpolation results. Defaults to \code{FALSE}.}
\item{overwrite_raw}{A logical specifying whether raw data as downloaded
from MODISTools is to be overwritten. Defaults to \code{FALSE},
i.e. data is read from exisitng file if available.}
\item{overwrite_interpol}{A logical specifying whether processed
(interpolated) data, is to be overwritten. Defaults to \code{FALSE},
i.e. data is read from exisitng file if available.}
\item{n_focal}{An integer specifying the distance (in number of pixels)
around the center pixel to be used for averaging. Defaults
to zero (using only the center pixel).}
\item{filename_with_year}{A logical specifying whether the years covered are
specified in the file name. Added here for consistency with
earlier versions of ingestr where years were not specified.}
\item{network}{A character string specifying the network for which
the site names correspond.}
}
\description{
Defines settings for settings for MODISTools download
for a pre-defined set of "bundles" (\code{c("modis_fpar",
"modis_evi", "modis_lai", "modis_gpp")}).
}
\examples{
\dontrun{settings_modis <- get_settings_modis()}
}
| /man/get_settings_modis.Rd | no_license | geco-bern/ingestr | R | false | true | 2,239 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_settings.R
\name{get_settings_modis}
\alias{get_settings_modis}
\title{Defines settings for settings for MODIS download using MODISTools}
\usage{
get_settings_modis(
bundle = "modis_fpar",
data_path = ".",
method_interpol = "linear",
keep = FALSE,
overwrite_raw = FALSE,
overwrite_interpol = FALSE,
n_focal = 0,
filename_with_year = TRUE,
network = NA
)
}
\arguments{
\item{bundle}{A character string specifying which dataset (bundle) to
download.Defaults to \code{"modis_fpar"}.
Available: \code{c("modis_fpar", "modis_evi", "modis_lai", "modis_gpp")}.}
\item{data_path}{A character string specifying the path of where the data
should be downloaded to. Defaults to \code{"."} (present working directory).}
\item{method_interpol}{A character string specifying which interpolation
method to use. Defaults to linear interpolation (\code{"linear"}).}
\item{keep}{A logical specifying whether to keep all intermediate data
(before filtering, and before imputing mean seasonal cycle),
and all alternative interpolation results. Defaults to \code{FALSE}.}
\item{overwrite_raw}{A logical specifying whether raw data as downloaded
from MODISTools is to be overwritten. Defaults to \code{FALSE},
i.e. data is read from exisitng file if available.}
\item{overwrite_interpol}{A logical specifying whether processed
(interpolated) data, is to be overwritten. Defaults to \code{FALSE},
i.e. data is read from exisitng file if available.}
\item{n_focal}{An integer specifying the distance (in number of pixels)
around the center pixel to be used for averaging. Defaults
to zero (using only the center pixel).}
\item{filename_with_year}{A logical specifying whether the years covered are
specified in the file name. Added here for consistency with
earlier versions of ingestr where years were not specified.}
\item{network}{A character string specifying the network for which
the site names correspond.}
}
\description{
Defines settings for settings for MODISTools download
for a pre-defined set of "bundles" (\code{c("modis_fpar",
"modis_evi", "modis_lai", "modis_gpp")}).
}
\examples{
\dontrun{settings_modis <- get_settings_modis()}
}
|
comp <- read.table(file ="data-raw/comparaison.txt" )
colnames(comp) <- c("Q1O", "Q1N", "Q2O", "Q2N")
save(comp, file = "data/comparaison.rdata")
| /data-raw/comparaison.R | no_license | qianlin-qz/AnalyseDD | R | false | false | 149 | r | comp <- read.table(file ="data-raw/comparaison.txt" )
colnames(comp) <- c("Q1O", "Q1N", "Q2O", "Q2N")
save(comp, file = "data/comparaison.rdata")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/witch_query.R
\name{witch_query}
\alias{witch_query}
\title{Fast query of WITCH results files}
\usage{
witch_query(
item = NULL,
resgdx = NULL,
filter = list(),
add_scenario = TRUE,
scenario_mapping = witch_scen_name,
add_year = TRUE,
year_mapping = witch_period_year,
valigdx = NULL,
histgdx = NULL,
agg_n = NULL,
clean_columns = TRUE,
...
)
}
\arguments{
\item{item}{parameter or variable name}
\item{resgdx}{list of results gdx from WITCH}
\item{filter}{named list of queries}
\item{add_year}{convert t into year}
\item{year_mapping}{a mapping table to translate t into year}
\item{add_scen}{convert gdx into scenario name}
\item{scen_table}{a conversion function or a mapping table to translate gdx into scenario name}
}
\description{
Returns a data.table containing the result of the query with additional
columns on scenario
}
\seealso{
Other WITCH helper functions:
\code{\link{witch_results_files}()},
\code{\link{witch_scen_name}()}
}
\concept{WITCH helper functions}
| /man/witch_query.Rd | permissive | witch-team/witchtools | R | false | true | 1,086 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/witch_query.R
\name{witch_query}
\alias{witch_query}
\title{Fast query of WITCH results files}
\usage{
witch_query(
item = NULL,
resgdx = NULL,
filter = list(),
add_scenario = TRUE,
scenario_mapping = witch_scen_name,
add_year = TRUE,
year_mapping = witch_period_year,
valigdx = NULL,
histgdx = NULL,
agg_n = NULL,
clean_columns = TRUE,
...
)
}
\arguments{
\item{item}{parameter or variable name}
\item{resgdx}{list of results gdx from WITCH}
\item{filter}{named list of queries}
\item{add_year}{convert t into year}
\item{year_mapping}{a mapping table to translate t into year}
\item{add_scen}{convert gdx into scenario name}
\item{scen_table}{a conversion function or a mapping table to translate gdx into scenario name}
}
\description{
Returns a data.table containing the result of the query with additional
columns on scenario
}
\seealso{
Other WITCH helper functions:
\code{\link{witch_results_files}()},
\code{\link{witch_scen_name}()}
}
\concept{WITCH helper functions}
|
fct_limma = data$limma$fct %>%
rownames_to_column("Feature") %>%
arrange(pvalue) %>%
sapply(function(col){
if(!is.numeric(col)) return(col)
round(col, digits = 3)
}) %>%
as.data.frame %>%
column_to_rownames("Feature")
output$fct_limma = renderDT(
fct_limma,
selection = list(mode = "single", selected = 1),
server=T
)
fct_boxplot_selector = reactive({
rownames(fct_limma)[input$fct_limma_rows_selected]
})
output$fct_boxplot = renderPlotly({
mset = data$data$fct
p = plot_boxplot(mset,
x = "Timepoint",
feature = fct_boxplot_selector(),
cols = "Treatment",
line = "Subject",
color = "Subject",
color.pal = pal_jama()(7)) +
labs(x = "")
ggplotly(p)
}) | /hdl/apps/app/server/fct/boxplot.R | no_license | zhuchcn/egg_study | R | false | false | 858 | r | fct_limma = data$limma$fct %>%
rownames_to_column("Feature") %>%
arrange(pvalue) %>%
sapply(function(col){
if(!is.numeric(col)) return(col)
round(col, digits = 3)
}) %>%
as.data.frame %>%
column_to_rownames("Feature")
output$fct_limma = renderDT(
fct_limma,
selection = list(mode = "single", selected = 1),
server=T
)
fct_boxplot_selector = reactive({
rownames(fct_limma)[input$fct_limma_rows_selected]
})
output$fct_boxplot = renderPlotly({
mset = data$data$fct
p = plot_boxplot(mset,
x = "Timepoint",
feature = fct_boxplot_selector(),
cols = "Treatment",
line = "Subject",
color = "Subject",
color.pal = pal_jama()(7)) +
labs(x = "")
ggplotly(p)
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/includedData.R
\docType{data}
\name{sampleData}
\alias{sampleData}
\title{Example data}
\description{
Example data
}
\keyword{data}
\keyword{streamflow}
| /man/sampleData.Rd | permissive | klingerf2/EflowStats | R | false | true | 232 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/includedData.R
\docType{data}
\name{sampleData}
\alias{sampleData}
\title{Example data}
\description{
Example data
}
\keyword{data}
\keyword{streamflow}
|
#parte1
directorio <-setwd("~/GitHub/Programacion_Actuarial_lll_OT16/specdata")
mediacontaminante<-function(directorio,contaminante,id) {
karen<-0
contador<-0
if (contaminante=="sulfato"){
columna<-2
} else if (contaminante=="nitrato"){
columna<-3
} else {
stop("Error en el contaminante")
}
medida <- length(id)
resultadofinal<-c("numeric",medida)
casoscompletos<-c("numeric",medida)
for (i in 1:medida){
id1<-id[i] #va a expresar el vector de acuerdo a su longitud, estrae sus cantidades
if (id1<10){id2<-paste(0,id1, sep = "0")}
#creamos un vector para con el formato si id1<10
else if (id1<100){id2<-paste(0,id1,sep = "")}
else {id2<-id1}
#si el vector inicial nok cumple con la condicion entonces se crea el vector con el formato
idfinal<-paste(id2,"csv",sep = ".")
#se crea nuevo vector tal que el idfinal tenga el siguiente formato
idfinal2<-paste(directorio,idfinal, sep = "/")
#para que muestre los datos
mostrar<-read.csv(idfinal2)
#para mostrar en un vector la suma de los casos completos de los id dependiendo del contaminante
#casoscompletos[i]<-sum(complete.cases(mostrar[,columna]))
#para mostrar el promedio de cada elemnto del vector de casos completos
#resultadofinal[i]<-mean(mostrar[,columna], na.rm=TRUE)
nuevo<-complete.cases(mostrar)
nuevo2<-mostrar[nuevo,2:3]
contador<-contador + nrow(nuevo2)
karen<- karen + sum(nuevo2[,contaminante])
}
#crear una variable que muestre los casos completos del mostrar
promedio<-karen/contador
promedio
#para mostrar el promedio ponderado del resultadofinal
#media<-sum(casoscompletos*resultadofinal)/sum(casoscompletos)
#media
#
}
#pruebas
#mediacontaminante("w", "hola", c(3,45)) | /caso 1/mediacontaminante.R | no_license | KarenM96/Programacion_Actuarial_lll_OT16 | R | false | false | 1,767 | r | #parte1
directorio <-setwd("~/GitHub/Programacion_Actuarial_lll_OT16/specdata")
mediacontaminante<-function(directorio,contaminante,id) {
karen<-0
contador<-0
if (contaminante=="sulfato"){
columna<-2
} else if (contaminante=="nitrato"){
columna<-3
} else {
stop("Error en el contaminante")
}
medida <- length(id)
resultadofinal<-c("numeric",medida)
casoscompletos<-c("numeric",medida)
for (i in 1:medida){
id1<-id[i] #va a expresar el vector de acuerdo a su longitud, estrae sus cantidades
if (id1<10){id2<-paste(0,id1, sep = "0")}
#creamos un vector para con el formato si id1<10
else if (id1<100){id2<-paste(0,id1,sep = "")}
else {id2<-id1}
#si el vector inicial nok cumple con la condicion entonces se crea el vector con el formato
idfinal<-paste(id2,"csv",sep = ".")
#se crea nuevo vector tal que el idfinal tenga el siguiente formato
idfinal2<-paste(directorio,idfinal, sep = "/")
#para que muestre los datos
mostrar<-read.csv(idfinal2)
#para mostrar en un vector la suma de los casos completos de los id dependiendo del contaminante
#casoscompletos[i]<-sum(complete.cases(mostrar[,columna]))
#para mostrar el promedio de cada elemnto del vector de casos completos
#resultadofinal[i]<-mean(mostrar[,columna], na.rm=TRUE)
nuevo<-complete.cases(mostrar)
nuevo2<-mostrar[nuevo,2:3]
contador<-contador + nrow(nuevo2)
karen<- karen + sum(nuevo2[,contaminante])
}
#crear una variable que muestre los casos completos del mostrar
promedio<-karen/contador
promedio
#para mostrar el promedio ponderado del resultadofinal
#media<-sum(casoscompletos*resultadofinal)/sum(casoscompletos)
#media
#
}
#pruebas
#mediacontaminante("w", "hola", c(3,45)) |
# All migrant winter females from 2018 – one way migration to breeding grounds
library(moveVis)
library(move)
library(raster)
library(ggplot2)
library(magrittr)
library(readxl)
move_all_spring_df <- as.data.frame(read_xls("Data/move_all_spring.xlsx"))
move_all_spring_df <- as.data.frame(move_all_spring)
# remove the duplicate locations from the dataset, then convert to a move object
move_all_spring_df <- move_all_spring_df[!duplicated(move_all_spring_df),]
move_all <- df2move(move_all_spring_df,
proj = "+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0",
x = "lon", y = "lat", time = "dt", track_id = "id")
#now look at time steps
unique(timestamps(move_all))
timeLag(move_all, unit = "hours") # results in ~ 2 hour locations(with 8 hours missing overnight)
move_all_data <- align_move(move_all, res = 1, digit = 2 , unit = "hours")
#move_all_data
#frames <- frames_spatial(move_data, path_colours = c("red"),
# map_service = "osm", map_type = "watercolor", alpha = 0.5)
#extent of map
#ext <- extent(33.948710, 47.848907, 108.804355, 126.147724)
frames_move_all <- frames_spatial(move_all_data, path_colours = c("red", "green", "blue", "yellow", "orange", "pink", "purple"), path_legend = FALSE, path_size = 2,map_service = "mapbox", map_type = "satellite", map_token = "pk.eyJ1Ijoic3NrYWxvczQiLCJhIjoiY2pzbmZocHFkMDFndzN5cnZxNDBuejB3NCJ9.bZYBIy5C8vuMNbzpe7sVcQ")
# edit frames
frames_move_all <- add_labels(frames_move_all, x = "Longitude", y = "Latitude") %>%
add_progress() %>%
add_scalebar(colour = "white", height = 0.015, label_margin = 1, position = "bottomright") %>%
add_northarrow(colour = "white", position = "bottomleft") %>%
add_timestamps(move_all_data, type = "label")
## Have a look at one of the frames:
#length(frames_move_all) #number of frames
frames_move_all[[500]] #look at one frame
animate_frames(frames_move_all, out_file = "~/Desktop/R_Forever/Dissertation/noha-move-hab/Output/moveVis_all_spring_migration_hours2.mov", overwrite = TRUE)
| /Script/move_vis.R | no_license | sskalos4/noha-move-hab | R | false | false | 2,093 | r | # All migrant winter females from 2018 – one way migration to breeding grounds
library(moveVis)
library(move)
library(raster)
library(ggplot2)
library(magrittr)
library(readxl)
move_all_spring_df <- as.data.frame(read_xls("Data/move_all_spring.xlsx"))
move_all_spring_df <- as.data.frame(move_all_spring)
# remove the duplicate locations from the dataset, then convert to a move object
move_all_spring_df <- move_all_spring_df[!duplicated(move_all_spring_df),]
move_all <- df2move(move_all_spring_df,
proj = "+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0",
x = "lon", y = "lat", time = "dt", track_id = "id")
#now look at time steps
unique(timestamps(move_all))
timeLag(move_all, unit = "hours") # results in ~ 2 hour locations(with 8 hours missing overnight)
move_all_data <- align_move(move_all, res = 1, digit = 2 , unit = "hours")
#move_all_data
#frames <- frames_spatial(move_data, path_colours = c("red"),
# map_service = "osm", map_type = "watercolor", alpha = 0.5)
#extent of map
#ext <- extent(33.948710, 47.848907, 108.804355, 126.147724)
frames_move_all <- frames_spatial(move_all_data, path_colours = c("red", "green", "blue", "yellow", "orange", "pink", "purple"), path_legend = FALSE, path_size = 2,map_service = "mapbox", map_type = "satellite", map_token = "pk.eyJ1Ijoic3NrYWxvczQiLCJhIjoiY2pzbmZocHFkMDFndzN5cnZxNDBuejB3NCJ9.bZYBIy5C8vuMNbzpe7sVcQ")
# edit frames
frames_move_all <- add_labels(frames_move_all, x = "Longitude", y = "Latitude") %>%
add_progress() %>%
add_scalebar(colour = "white", height = 0.015, label_margin = 1, position = "bottomright") %>%
add_northarrow(colour = "white", position = "bottomleft") %>%
add_timestamps(move_all_data, type = "label")
## Have a look at one of the frames:
#length(frames_move_all) #number of frames
frames_move_all[[500]] #look at one frame
animate_frames(frames_move_all, out_file = "~/Desktop/R_Forever/Dissertation/noha-move-hab/Output/moveVis_all_spring_migration_hours2.mov", overwrite = TRUE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.